text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
```
import pandas as pd
import numpy as np
import pickle as pk
file_name = '1_min'
df = pd.read_csv(file_name + '.csv')
df['behavior'] = np.zeros(len(df)).astype(np.int)
intention_2_action_delay = 3000
acc_threshold = 1
# 0 for changing to left
# 1 for changing to right
# 2 for following
next_lane_change_time = dict()
next_lane_change_direct = dict()
next_vel_change_time = dict()
next_vel_change_direct = dict()
def classify_behavior(v_id, cur_time):
if next_lane_change_time[v_id] > -1 and next_lane_change_time[v_id] - cur_time < intention_2_action_delay:
return next_lane_change_direct[v_id]
return 2
# if next_vel_change_time[v_id] > -1 and next_vel_change_time[v_id] - r.Global_Time < intention_2_action_delay:
# return next_vel_change_direct[v_id] + 2
# return 4
lane_id = dict()
behavior_seq = dict()
behavior_seq_id = dict()
change_point = list()
cnt = np.zeros((5, 5))
for i in reversed(range(len(df))):
r = df.iloc[i]
v_id = r.Vehicle_ID
if v_id not in lane_id.keys():
lane_id[v_id] = r.Lane_ID
next_lane_change_time[v_id] = -1
next_vel_change_time[v_id] = -1
behavior_seq[v_id] = list()
if r.Lane_ID != lane_id[v_id]:
next_lane_change_time[v_id] = r.Global_Time
next_lane_change_direct[v_id] = int(r.Lane_ID < lane_id[v_id])
lane_id[v_id] = r.Lane_ID
# if abs(r.v_Acc) > acc_threshold:
# next_vel_change_time[v_id] = r.Global_Time
# next_vel_change_direct[v_id] = int(r.v_Acc > 0)
bhv = classify_behavior(v_id, r.Global_Time)
if len(behavior_seq[v_id])>0 and behavior_seq[v_id][-1] < 2 and bhv != behavior_seq[v_id][-1]:
change_point.append((v_id, behavior_seq_id[v_id]))
behavior_seq[v_id].append(bhv)
behavior_seq_id[v_id] = i
df.at[i,'behavior']= bhv
dT = 0.1
x = list()
y = list()
vehicles = dict()
show_up = set()
vel_sum = 0
for i in range(len(df)):
r = df.iloc[i]
v_id = r.Vehicle_ID
show_up.add(v_id)
if v_id not in vehicles.keys():
df.at[i,'lateral_acc'] = 0
df.at[i,'lateral_vel'] = 0
vehicles[v_id] = df.iloc[i].copy()
vel_sum += vehicles[v_id].v_Vel
else:
lateral_V = (r.Local_X - vehicles[v_id].Local_X) / dT
vel_sum -= vehicles[v_id].v_Vel
df.at[i,'lateral_acc'] = (lateral_V - vehicles[v_id]['lateral_vel']) /dT
df.at[i,'lateral_vel'] = lateral_V
vehicles[v_id] = df.iloc[i].copy()
vel_sum += vehicles[v_id].v_Vel
v_mean = vel_sum / len(vehicles)
df.at[i,'mean_vel'] = v_mean
#remove exited car after every moment
if i == len(df)-1 or r.Global_Time != df.iloc[i+1].Global_Time:
v_ids = list(vehicles.keys())
for v_id in v_ids:
if v_id not in show_up:
vel_sum -= vehicles[v_id].v_Vel
vehicles.pop(v_id)
show_up = set()
df[:10]
df.to_csv(file_name + '_labeled.csv')
for i in range((len(change_point)-1) // 10):
v_id, idx = change_point[i*10]
df[max(0,idx-5000):min(idx+15000,len(df))].to_csv('lane_changing_data/'+str(v_id)+'_'+str(idx)+'.csv')
len(change_point)
```
|
github_jupyter
|
### Testing accuracy of RF classifier for lightly loaded, testing and training with all the rotational speeds
```
from jupyterthemes import get_themes
import jupyterthemes as jt
from jupyterthemes.stylefx import set_nb_theme
set_nb_theme('chesterish')
import pandas as pd
data_10=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train10hz.csv')
data_20=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train20hz.csv')
data_30=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train30Hz.csv')
data_15=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train15hz.csv')
data_25=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train25hz.csv')
data_35=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train35Hz.csv')
data_40=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Train40Hz.csv')
data_10=data_10.head(44990)
data_15=data_15.head(44990)
data_20=data_20.head(44990)
data_25=data_25.head(44990)
data_30=data_30.head(44990)
data_35=data_35.head(44990)
data_40=data_40.head(44990)
data_25.head
#shuffling
data_20=data_20.sample(frac=1)
data_30=data_30.sample(frac=1)
data_40=data_40.sample(frac=1)
data_10=data_10.sample(frac=1)
data_15=data_15.sample(frac=1)
data_25=data_25.sample(frac=1)
data_35=data_35.sample(frac=1)
data_25.head
import sklearn as sk
```
### Assigning X and y for training
```
dataset_10=data_10.values
X_10= dataset_10[:,0:9]
print(X_10)
y_10=dataset_10[:,9]
print(y_10)
dataset_15=data_15.values
X_15= dataset_15[:,0:9]
print(X_15)
y_15=dataset_15[:,9]
print(y_15)
dataset_20=data_20.values
X_20= dataset_20[:,0:9]
print(X_20)
y_20=dataset_20[:,9]
print(y_20)
dataset_25=data_25.values
X_25= dataset_25[:,0:9]
print(X_25)
y_25=dataset_25[:,9]
print(y_25)
dataset_30=data_30.values
X_30= dataset_30[:,0:9]
print(X_30)
y_30=dataset_30[:,9]
print(y_30)
dataset_35=data_35.values
X_35= dataset_35[:,0:9]
print(X_35)
y_35=dataset_35[:,9]
print(y_35)
dataset_40=data_40.values
X_40= dataset_40[:,0:9]
print(X_40)
y_40=dataset_40[:,9]
print(y_40)
```
### Training Random Forest Classifier
```
from sklearn.ensemble import RandomForestClassifier
rf_10 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_10.fit(X_10, y_10);
from sklearn.ensemble import RandomForestClassifier
rf_15 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_15.fit(X_15, y_15);
from sklearn.ensemble import RandomForestClassifier
rf_20 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_20.fit(X_20, y_20);
from sklearn.ensemble import RandomForestClassifier
rf_25 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_25.fit(X_25, y_25);
from sklearn.ensemble import RandomForestClassifier
rf_30 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_30.fit(X_30, y_30);
from sklearn.ensemble import RandomForestClassifier
rf_35 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_35.fit(X_35, y_35);
from sklearn.ensemble import RandomForestClassifier
rf_40 = RandomForestClassifier(n_estimators = 1000, random_state = 42)
rf_40.fit(X_40, y_40);
```
### Importing Testing data
```
test_10=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test10hz.csv')
test_20=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test20hz.csv')
test_30=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test30Hz.csv')
test_15=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test15hz.csv')
test_25=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test25hz.csv')
test_35=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test35Hz.csv')
test_40=pd.read_csv(r'D:\Acads\BTP\Lightly Loaded\Test40Hz.csv')
test_10=test_10.head(99990)
test_15=test_15.head(99990)
test_20=test_20.head(99990)
test_25=test_25.head(99990)
test_30=test_30.head(99990)
test_35=test_35.head(99990)
test_40=test_40.head(99990)
#shuffling
test_20=test_20.sample(frac=1)
test_30=test_30.sample(frac=1)
test_40=test_40.sample(frac=1)
test_10=test_10.sample(frac=1)
test_15=test_15.sample(frac=1)
test_25=test_25.sample(frac=1)
test_35=test_35.sample(frac=1)
```
### Assigning X and y for testing
```
dataset_test_10 = test_10.values
X_test_10 = dataset_test_10[:,0:9]
print(X_test_10)
y_test_10= dataset_test_10[:,9]
print(y_test_10)
dataset_test_15 = test_15.values
X_test_15 = dataset_test_15[:,0:9]
print(X_test_15)
y_test_15= dataset_test_15[:,9]
print(y_test_15)
dataset_test_20 = test_20.values
X_test_20 = dataset_test_20[:,0:9]
print(X_test_20)
y_test_20= dataset_test_20[:,9]
print(y_test_20)
dataset_test_25 = test_25.values
X_test_25 = dataset_test_25[:,0:9]
print(X_test_25)
y_test_25= dataset_test_25[:,9]
print(y_test_25)
dataset_test_30 = test_30.values
X_test_30 = dataset_test_30[:,0:9]
print(X_test_30)
y_test_30= dataset_test_30[:,9]
print(y_test_30)
dataset_test_35 = test_35.values
X_test_35 = dataset_test_35[:,0:9]
print(X_test_35)
y_test_35= dataset_test_35[:,9]
print(y_test_35)
dataset_test_40 = test_40.values
X_test_40 = dataset_test_40[:,0:9]
print(X_test_40)
y_test_40= dataset_test_40[:,9]
print(y_test_40)
```
### Predictions with 10Hz Trained Model
```
import numpy as np
predictions_10 = rf_10.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 10Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_15 = rf_10.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 10Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_10.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 10Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_25 = rf_10.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 10Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_30 = rf_10.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 10Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_35 = rf_10.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 10Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_40 = rf_10.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 10Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
```
### Predictions with 15Hz model
```
predictions_10 = rf_15.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 15Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_15 = rf_15.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 15Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_15.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 15Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_25 = rf_15.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 15Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_30 = rf_15.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 15Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_35 = rf_15.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 15Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_40 = rf_15.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 15Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
```
### Predictions with 20Hz model
```
predictions_10 = rf_20.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 20Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_15 = rf_20.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 20Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_20.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 20Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_25 = rf_20.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 20Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_30 = rf_20.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 20Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_35 = rf_20.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 20Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_40 = rf_20.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 20Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
```
### Predictions with 25Hz model
```
predictions_10 = rf_25.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 25Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_15 = rf_25.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 25Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_25.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 25Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_25 = rf_25.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 25Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_30 = rf_25.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 25Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_35 = rf_25.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 25Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_40 = rf_25.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 25Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
```
### Predictions with 30Hz model
```
predictions_10 = rf_30.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 30Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_15 = rf_30.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 30Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_30.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 30Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_25 = rf_30.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 30Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_30 = rf_30.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 30Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_35 = rf_30.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 30Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_40 = rf_30.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 30Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
```
### Testing with 35Hz model
```
predictions_10 = rf_35.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 35Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_15 = rf_35.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 35Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_35.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 35Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_25 = rf_35.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 35Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_30 = rf_35.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 35Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_35 = rf_35.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 35Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_40 = rf_35.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 35Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
```
### Training with 40Hz model
```
predictions_10 = rf_35.predict(X_test_10)
errors_10 = abs(predictions_10 - y_test_10)
print('Mean Absolute Error 10Hz with 35Hz:', round(np.mean(errors_10), 3), 'degrees.')
accuracy = 100 - np.mean(errors_10)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_15 = rf_35.predict(X_test_15)
errors_15 = abs(predictions_15 - y_test_15)
print('Mean Absolute Error 15Hz with 35Hz:', round(np.mean(errors_15), 3), 'degrees.')
accuracy = 100 - np.mean(errors_15)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_35.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 35Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_20 = rf_35.predict(X_test_20)
errors_20 = abs(predictions_20 - y_test_20)
print('Mean Absolute Error 20Hz with 35Hz:', round(np.mean(errors_20), 3), 'degrees.')
accuracy = 100 - np.mean(errors_20)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_25 = rf_35.predict(X_test_25)
errors_25 = abs(predictions_25 - y_test_25)
print('Mean Absolute Error 25Hz with 35Hz:', round(np.mean(errors_25), 3), 'degrees.')
accuracy = 100 - np.mean(errors_25)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_30 = rf_35.predict(X_test_30)
errors_30 = abs(predictions_30 - y_test_30)
print('Mean Absolute Error 30Hz with 35Hz:', round(np.mean(errors_30), 3), 'degrees.')
accuracy = 100 - np.mean(errors_30)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_35 = rf_35.predict(X_test_35)
errors_35 = abs(predictions_35 - y_test_35)
print('Mean Absolute Error 35Hz with 35Hz:', round(np.mean(errors_35), 3), 'degrees.')
accuracy = 100 - np.mean(errors_35)
print('Accuracy:', round(accuracy, 3), '%.')
predictions_40 = rf_35.predict(X_test_40)
errors_40 = abs(predictions_40 - y_test_40)
print('Mean Absolute Error 40Hz with 35Hz:', round(np.mean(errors_40), 3), 'degrees.')
accuracy = 100 - np.mean(errors_40)
print('Accuracy:', round(accuracy, 3), '%.')
```
|
github_jupyter
|
```
%matplotlib inline
```
# Demo Axes Grid
Grid of 2x2 images with single or own colorbar.
```
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
plt.rcParams["mpl_toolkits.legacy_colorbar"] = False
def get_demo_image():
import numpy as np
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3, 4, -4, 3)
def demo_simple_grid(fig):
"""
A grid of 2x2 images with 0.05 inch pad between images and only
the lower-left axes is labeled.
"""
grid = ImageGrid(fig, 141, # similar to subplot(141)
nrows_ncols=(2, 2),
axes_pad=0.05,
label_mode="1",
)
Z, extent = get_demo_image()
for ax in grid:
ax.imshow(Z, extent=extent, interpolation="nearest")
# This only affects axes in first column and second row as share_all=False.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
def demo_grid_with_single_cbar(fig):
"""
A grid of 2x2 images with a single colorbar
"""
grid = ImageGrid(fig, 142, # similar to subplot(142)
nrows_ncols=(2, 2),
axes_pad=0.0,
share_all=True,
label_mode="L",
cbar_location="top",
cbar_mode="single",
)
Z, extent = get_demo_image()
for ax in grid:
im = ax.imshow(Z, extent=extent, interpolation="nearest")
grid.cbar_axes[0].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(False)
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
def demo_grid_with_each_cbar(fig):
"""
A grid of 2x2 images. Each image has its own colorbar.
"""
grid = ImageGrid(fig, 143, # similar to subplot(143)
nrows_ncols=(2, 2),
axes_pad=0.1,
label_mode="1",
share_all=True,
cbar_location="top",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
)
Z, extent = get_demo_image()
for ax, cax in zip(grid, grid.cbar_axes):
im = ax.imshow(Z, extent=extent, interpolation="nearest")
cax.colorbar(im)
cax.toggle_label(False)
# This affects all axes because we set share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
def demo_grid_with_each_cbar_labelled(fig):
"""
A grid of 2x2 images. Each image has its own colorbar.
"""
grid = ImageGrid(fig, 144, # similar to subplot(144)
nrows_ncols=(2, 2),
axes_pad=(0.45, 0.15),
label_mode="1",
share_all=True,
cbar_location="right",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
)
Z, extent = get_demo_image()
# Use a different colorbar range every time
limits = ((0, 1), (-2, 2), (-1.7, 1.4), (-1.5, 1))
for ax, cax, vlim in zip(grid, grid.cbar_axes, limits):
im = ax.imshow(Z, extent=extent, interpolation="nearest",
vmin=vlim[0], vmax=vlim[1])
cb = cax.colorbar(im)
cb.set_ticks((vlim[0], vlim[1]))
# This affects all axes because we set share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
fig = plt.figure(figsize=(10.5, 2.5))
fig.subplots_adjust(left=0.05, right=0.95)
demo_simple_grid(fig)
demo_grid_with_single_cbar(fig)
demo_grid_with_each_cbar(fig)
demo_grid_with_each_cbar_labelled(fig)
plt.show()
```
|
github_jupyter
|
### Generator States
Let's look at a simple generator function:
```
def gen(s):
for c in s:
yield c
```
We create an generator object by calling the generator function:
```
g = gen('abc')
```
At this point the generator object is **created**, but we have not actually started running it. To do so, we call `next()`, which then starts running the function body until the first `yield` is encountered:
```
next(g)
```
Now the generator is **suspended**, waiting for us to call next again:
```
next(g)
```
Every time we call `next`, the generator function runs, or is in a **running** state until the next yield is encountered, or no more results are yielded and the function actually returns:
```
next(g)
next(g)
```
Once we exhaust the generator, we get a `StopIteration` exception, and we can think of the generator as being **closed**.
As we can see, a generator can be in one of four states:
* created
* running
* suspended
* closed
We can actually request the state of a generator programmatically by using the `inspect` module's `getgeneratorstate()` function:
```
from inspect import getgeneratorstate
g = gen('abc')
getgeneratorstate(g)
```
We can start running the generator by calling `next`:
```
next(g)
```
And the state is now:
```
getgeneratorstate(g)
```
Once we exhaust the generator:
```
next(g), next(g), next(g)
```
The generator is now in a closed state:
```
getgeneratorstate(g)
```
Now we haven't seen the running state - to do that we just need to print the state from inside the generator - but to do that we need to have a reference to the generator object itself. This is not that easy to do, so I'm going to cheat and assume that the generator object will be referenced by a global variable `global_gen`:
```
def gen(s):
for c in s:
print(getgeneratorstate(global_gen))
yield c
global_gen = gen('abc')
next(global_gen)
```
So a generator can be in these four very distinct states.
When the generator is created, it is not in a running or suspended state - it is simply in a **created** state.
We have to kick-off, or prime, the generator by calling `next` on it.
After the generator has yielded a value, it it is in **suspended** state.
Finally, once the generator **returns** (not yields), i.e. the StopIteration is raised, the generator is **closed**.
Finally it is really important to understand that when a `yield` is encountered, the generator is suspended **exactly** at that point, but not before it has evaluated the expression to the right of the yield statement so it can produce that value in the return value of the `next()` function.
To see this, let's write a simple function and a generator function as follows:
```
def square(i):
print(f'squaring {i}')
return i ** 2
def squares(n):
for i in range(n):
yield square(i)
print ('right after yield')
sq = squares(5)
next(sq)
```
As you can see `square(i)` was evaluated, **then** the value was yielded, and the genrator was suspended exactly at the point the `yield` statement was encountered:
```
next(sq)
```
As you can see, only now does the `right after yield` string get printed from our generator.
|
github_jupyter
|
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('.')
import utils
def f(x):
return x * np.cos(np.pi*x)
utils.set_fig_size(mpl, (4.5, 2.5))
x = np.arange(-1.0, 2.0, 0.1)
fig = plt.figure()
subplot = fig.add_subplot(111)
subplot.annotate('local minimum', xy=(-0.3, -0.25), xytext=(-0.77, -1.0), arrowprops=dict(facecolor='black', shrink=0.05))
subplot.annotate('global minimum', xy=(1.1, -0.9), xytext=(0.6, 0.8), arrowprops=dict(facecolor='black', shrink=0.05))
plt.plot(x, f(x))
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
x = np.arange(-2.0, 2.0, 0.1)
fig = plt.figure()
subplt = fig.add_subplot(111)
subplt.annotate('saddle point', xy=(0, -0.2), xytext=(-0.52, -5.0),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.plot(x, x**3)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x, y = np.mgrid[-1:1:31j, -1:1:31j]
z = x**2 - y**2
ax.plot_surface(x, y, z, **{'rstride':1, 'cstride':1, 'cmap':'Greens_r'})
ax.plot([0], [0], [0], 'ro')
ax.view_init(azim=50, elev=20)
plt.xticks([-1, -0.5, 0, 0.5, 1])
plt.yticks([-1, -0.5, 0, 0.5, 1])
ax.set_zticks([-1, -0.5, 0, 0.5, 1])
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# mini-batch SGD
def sgd(params, lr, batch_size):
for param in params:
param[:] = param - lr * param.grad/batch_size
%config InlineBackend.figure_format='retina'
%matplotlib inline
import mxnet as mx
from mxnet import autograd
from mxnet import gluon
from mxnet import nd
import numpy as np
import random
import sys
sys.path.append('..')
import utils
# 生成数据集。
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
X = nd.random_normal(scale=1, shape=(num_examples, num_inputs))
y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_b
y += .01 * nd.random_normal(scale=1, shape=y.shape)
def init_params():
w = nd.random_normal(scale=1, shape=(num_inputs, 1))
b = nd.zeros(shape=(1,))
params = [w, b]
for param in params:
param.attach_grad()
return params
def linreg(X, w, b):
return nd.dot(X, w) + b
def squared_loss(yhat, y):
return (yhat - y.reshape(yhat.shape))**2/2
def data_iter(batch_size, num_examples, X, y):
idx = list(range(num_examples))
random.shuffle(idx)
for i in range(0, num_examples, batch_size):
j = nd.array(idx[i:min(i+batch_size, num_examples)])
yield X.take(j), y.take(j)
net = linreg
squared_loss = squared_loss
# mini-batch sgd 时,当迭代周期大于2,lr 在每个迭代周期开始时自乘 0.1 做衰减 decay
def optimize(batch_size, lr, num_epochs, log_interval, decay_epoch):
w, b = init_params()
y_vals = [squared_loss(net(X, w, b), y).mean().asnumpy()]
print('batch_size', batch_size)
for epoch in range(1, num_epochs+1):
# 学习率自我衰减
if decay_epoch and epoch > decay_epoch:
lr *= 0.1
for batch_i, (features, label) in enumerate(data_iter(batch_size, num_examples, X, y)):
with autograd.record():
output = net(features, w, b)
loss = squared_loss(output, label)
loss.backward()
sgd([w, b], lr, batch_size)
if batch_i*batch_size % log_interval == 0:
y_vals.append(squared_loss(net(X, w, b), y).mean().asnumpy())
print('epoch %d, learning rage %f, loss %.4e' %(epoch, lr, y_vals[-1]))
# 为了便于打印,改变输出形状并转化成numpy数组。
print('w', w.reshape((1, -1)).asnumpy(), 'b', b.asscalar(), '\n')
x_vals = np.linspace(0, num_epochs, len(y_vals), endpoint=True)
utils.semilogy(x_vals, y_vals, 'epoch', 'loss')
optimize(batch_size=2, lr=0.2, num_epochs=3, decay_epoch=2, log_interval=10)
```
|
github_jupyter
|
```
!pip install d2l==0.17.2
# implement several utility functions to facilitate data downloading
import hashlib
import os
import tarfile
import zipfile
import requests
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
# download function to download a dataset
def download(name, cache_dir=os.path.join('..', 'data')):
"""Download a file inserted into DATA_HUB, return the local filename."""
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f'Downloading {fname} from {url}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
# implement two additional utility functions: one is to download and extract a zip or tar file and the other to download all the datasets used in this book from DATA_HUB into the cache directory
def download_extract(name, folder=None):
"""Download and extract a zip/tar file."""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'Only zip/tar files can be extracted.'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def download_all():
"""Download all files in the DATA_HUB."""
for name in DATA_HUB:
download(name)
```
Accessing and Reading the Dataset
```
# If pandas is not installed, please uncomment the following line:
!pip install pandas
%matplotlib inline
import numpy as np
import pandas as pd
import tensorflow as tf
from d2l import tensorflow as d2l
# download and cache the Kaggle housing dataset
DATA_HUB['kaggle_house_train'] = (
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = (
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
# use pandas to load the two csv files containing training and test data respectively
train_data = pd.read_csv(download('kaggle_house_train'))
test_data = pd.read_csv(download('kaggle_house_test'))
# training dataset includes 1460 examples, 80 features, and 1 label, while the test data contains 1459 examples and 80 features
print(train_data.shape)
print(test_data.shape)
# take a look at the first four and last two features as well as the label (SalePrice)
print(train_data.iloc[0:4, [0, 1, 2, 3, -3, -2, -1]])
all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))
```
Data Preprocessing
```
# If test data were inaccessible, mean and standard deviation could be
# calculated from training data
numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index
all_features[numeric_features] = all_features[numeric_features].apply(
lambda x: (x - x.mean()) / (x.std()))
# After standardizing the data all means vanish, hence we can set missing
# values to 0
all_features[numeric_features] = all_features[numeric_features].fillna(0)
# `Dummy_na=True` considers "na" (missing value) as a valid feature value, and
# creates an indicator feature for it
all_features = pd.get_dummies(all_features, dummy_na=True)
all_features.shape
# extract the NumPy format from the pandas format and convert it into the tensor
n_train = train_data.shape[0]
train_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)
test_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)
train_labels = tf.constant(
train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)
```
Training
```
loss = tf.keras.losses.MeanSquaredError()
def get_net():
net = tf.keras.models.Sequential()
net.add(tf.keras.layers.Dense(
1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))
return net
def log_rmse(y_true, y_pred):
# To further stabilize the value when the logarithm is taken, set the
# value less than 1 as 1
clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))
return tf.sqrt(tf.reduce_mean(loss(
tf.math.log(y_true), tf.math.log(clipped_preds))))
def train(net, train_features, train_labels, test_features, test_labels,
num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
train_iter = d2l.load_array((train_features, train_labels), batch_size)
# The Adam optimization algorithm is used here
optimizer = tf.keras.optimizers.Adam(learning_rate)
net.compile(loss=loss, optimizer=optimizer)
for epoch in range(num_epochs):
for X, y in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
l = loss(y, y_hat)
params = net.trainable_variables
grads = tape.gradient(l, params)
optimizer.apply_gradients(zip(grads, params))
train_ls.append(log_rmse(train_labels, net(train_features)))
if test_labels is not None:
test_ls.append(log_rmse(test_labels, net(test_features)))
return train_ls, test_ls
```
K -Fold Cross-Validation
```
# function that returns the ith fold of the data in a K -fold cross-validation procedure
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
for j in range(k):
idx = slice(j * fold_size, (j + 1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = tf.concat([X_train, X_part], 0)
y_train = tf.concat([y_train, y_part], 0)
return X_train, y_train, X_valid, y_valid
# The training and verification error averages are returned when we train K times in the K -fold cross-validation
def k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay,
batch_size):
train_l_sum, valid_l_sum = 0, 0
for i in range(k):
data = get_k_fold_data(k, i, X_train, y_train)
net = get_net()
train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
weight_decay, batch_size)
train_l_sum += train_ls[-1]
valid_l_sum += valid_ls[-1]
if i == 0:
d2l.plot(list(range(1, num_epochs + 1)), [train_ls, valid_ls],
xlabel='epoch', ylabel='rmse', xlim=[1, num_epochs],
legend=['train', 'valid'], yscale='log')
print(f'fold {i + 1}, train log rmse {float(train_ls[-1]):f}, '
f'valid log rmse {float(valid_ls[-1]):f}')
return train_l_sum / k, valid_l_sum / k
```
Model Selection
```
!pip uninstall matplotlib
!pip install --upgrade matplotlib
k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr,
weight_decay, batch_size)
print(f'{k}-fold validation: avg train log rmse: {float(train_l):f}, '
f'avg valid log rmse: {float(valid_l):f}')
```
|
github_jupyter
|
# Filling in Missing Values in Tabular Records
You can select Run->Run All Cells from the menu to run all cells in Studio (or Cell->Run All in a SageMaker Notebook Instance).
## Introduction
Missing data values are common due to omissions during manual entry or optional input. Simple data imputation such as using the median/mode/average may not be satisfactory. When there are many features, we can sometimes train a model to use the existing features to predict the desired feature.
This solution provides and end-to-end example that takes a tabular data set with a target column, trains and deploys an endpoint, and calls that endpoint to make predictions.
## Architecture
As part of the solution, the following services are used:
* Amazon S3: Used to store datasets.
* Amazon SageMaker Notebook: Used to preprocess and process the data, and to train the deep learning model.
* Amazon SageMaker Endpoint: Used to deploy the trained model.

## Data Set
We will use public data from the City of Cincinnati Public Services describing Fleet Inventory. We will train a model to predict missing values of a 'target' column based on the other columns.
Please see.
https://www.cincinnati-oh.gov/public-services/about-public-services/fleet-services/
https://data.cincinnati-oh.gov/Thriving-Neighborhoods/Fleet-Inventory/m8ba-xmjz
## Acknowledgements
AutoPilot code based on
https://github.com/aws/amazon-sagemaker-examples/blob/master/autopilot/sagemaker_autopilot_direct_marketing.ipynb
```
# Replace these with your train/test CSV data and target columns.
# If left empty, the sample data set will be used.
data_location = '' # Ex. s3://your_bucket/your_file.csv
target = '' # Specify target column name
if data_location == '':
# Use sample dataset.
dataset_file = 'data/dataset.csv'
target = 'ASSET_TYPE'
else:
# Download custom dataset.
!aws s3 cp $data_location data/custom_dataset.csv
print('Downloaded custom dataset')
dataset_file = 'data/custom_dataset.csv'
```
## Inspect the Data
```
import pandas as pd
data = pd.read_csv(dataset_file)
data
```
## Preprocess Data
Some of the entries in the target column are null. We will remove those entries for training/testing.
```
import numpy as np
def remove_null_rows(data, target):
idx = data[target].notna()
return data.loc[idx]
def split_train_test(data, p=.9):
idx = np.random.choice([True, False], replace = True, size = len(data), p=[.8, .2])
train_df = data.iloc[idx]
test_df = data.iloc[[not i for i in idx]]
return train_df, test_df
non_null_data = remove_null_rows(data, target)
train, test = split_train_test(non_null_data)
train_file = 'data/train.csv'
test_file = 'data/test.csv'
train.to_csv(train_file, index=False, header=True)
test.to_csv(test_file, index=False, header=True)
```
## Store Processed Data on S3
Now that we have our data in files, we store this data to S3 so we can use SageMaker AutoPilot.
```
import sagemaker
from sagemaker.s3 import S3Uploader
import json
with open('stack_outputs.json') as f:
sagemaker_configs = json.load(f)
s3_bucket = sagemaker_configs['S3Bucket']
train_data_s3_path = S3Uploader.upload(train_file, 's3://{}/data'.format(s3_bucket))
print('Train data uploaded to: ' + train_data_s3_path)
test_data_s3_path = S3Uploader.upload(test_file, 's3://{}/data'.format(s3_bucket))
print('Test data uploaded to: ' + test_data_s3_path)
```
### Configure AutoPilot
For the purposes of a demo, we will use only 2 candidates. Remove this parameter to run AutoPilot with its defaults (note: for this data set a full run will take ~ 4 several hours.)
```
input_data_config = [{
'DataSource': {
'S3DataSource': {
'S3DataType': 'S3Prefix',
'S3Uri': 's3://{}/data/train'.format(s3_bucket)
}
},
'TargetAttributeName': target
}]
output_data_config = {
'S3OutputPath': 's3://{}/data/output'.format(s3_bucket)
}
automl_job_config ={
'CompletionCriteria': {
'MaxCandidates': 2 # Remove this option for the default run.
}
}
import boto3
from time import gmtime, strftime, sleep
role = sagemaker_configs['SageMakerIamRole']
solution_prefix = sagemaker_configs['SolutionPrefix']
auto_ml_job_name = solution_prefix + strftime('%d-%H-%M-%S', gmtime())
print('AutoMLJobName: ' + auto_ml_job_name)
sm = boto3.Session().client(service_name='sagemaker',region_name='us-west-2')
sm.create_auto_ml_job(AutoMLJobName=auto_ml_job_name,
InputDataConfig=input_data_config,
OutputDataConfig=output_data_config,
AutoMLJobConfig=automl_job_config,
RoleArn=role)
# This will take approximately 20 minutes to run.
secondary_status = ''
while True:
describe_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)
job_run_status = describe_response['AutoMLJobStatus']
if job_run_status in ('Failed', 'Completed', 'Stopped'):
print('\n{}: {}'.format(describe_response['AutoMLJobSecondaryStatus'], job_run_status))
break
if secondary_status == describe_response['AutoMLJobSecondaryStatus']:
print('.', end='')
else:
secondary_status = describe_response['AutoMLJobSecondaryStatus']
print('\n{}: {}'.format(secondary_status, job_run_status), end='')
sleep(60)
best_candidate = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['BestCandidate']
best_candidate_name = best_candidate['CandidateName']
print(best_candidate)
print('\n')
print("CandidateName: " + best_candidate_name)
print("FinalAutoMLJobObjectiveMetricName: " + best_candidate['FinalAutoMLJobObjectiveMetric']['MetricName'])
print("FinalAutoMLJobObjectiveMetricValue: " + str(best_candidate['FinalAutoMLJobObjectiveMetric']['Value']))
model_name = sagemaker_configs['SageMakerModelName']
model = sm.create_model(Containers=best_candidate['InferenceContainers'],
ModelName=model_name,
ExecutionRoleArn=role)
```
## Deploy and Endpoint
```
print("Building endpoint with model {}".format(model))
endpoint_config_name = sagemaker_configs['SageMakerEndpointName'] + '-config'
create_endpoint_config_response = sm.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m5.xlarge',
'InitialVariantWeight':1,
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
endpoint_name = sagemaker_configs['SageMakerEndpointName']
create_endpoint_response = sm.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name,
)
print(create_endpoint_response['EndpointArn'])
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
import time
print('Creating Endpoint... this may take several minutes')
while status=='Creating':
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print('.', end='')
time.sleep(15)
print("\nStatus: " + status)
```
## Test the Endpoint
```
runtime_client = boto3.client('runtime.sagemaker')
test_input = test.drop(columns=[target])[0:10]
test_input_csv = test_input.to_csv(index=False, header=False).split('\n')
test_labels = test[target][0:10]
for i, (single_test, single_label) in enumerate(zip(test_input_csv, test_labels)):
print('=== Test {} ===\nInput: {}\n'.format(i, single_test))
response = runtime_client.invoke_endpoint(EndpointName = endpoint_name,
ContentType = 'text/csv',
Body = single_test)
result = response['Body'].read().decode('ascii')
print('Predicted label is {}\nCorrect label is {}\n'.format(result.rstrip(), single_label.rstrip()))
```
## Clean up
Stack deletion will clean up all created resources including S3 buckets, Endpoint configurations, Endpoints and Models.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/mrklees/pgmpy/blob/feature%2Fcausalmodel/examples/Causal_Games.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Causal Games
Causal Inference is a new feature for pgmpy, so I wanted to develop a few examples which show off the features that we're developing!
This particular notebook walks through the 5 games that used as examples for building intuition about backdoor paths in *The Book of Why* by Judea Peal. I have consistently been using them to test different implementations of backdoor adjustment from different libraries and include them as unit tests in pgmpy, so I wanted to walk through them and a few other related games as a potential resource to both understand the implementation of CausalInference in pgmpy, as well as develope some useful intuitions about backdoor paths.
## Objective of the Games
For each game we get a causal graph, and our goal is to identify the set of deconfounders (often denoted $Z$) which will close all backdoor paths from nodes $X$ to $Y$. For the time being, I'll assume that you're familiar with the concept of backdoor paths, though I may expand this portion to explain it.
```
import sys
!pip3 install -q daft
import matplotlib.pyplot as plt
%matplotlib inline
import daft
from daft import PGM
# We can now import the development version of pgmpy
from pgmpy.models.BayesianModel import BayesianModel
from pgmpy.inference.CausalInference import CausalInference
def convert_pgm_to_pgmpy(pgm):
"""Takes a Daft PGM object and converts it to a pgmpy BayesianModel"""
edges = [(edge.node1.name, edge.node2.name) for edge in pgm._edges]
model = BayesianModel(edges)
return model
#@title # Game 1
#@markdown While this is a "trivial" example, many statisticians would consider including either or both A and B in their models "just for good measure". Notice though how controlling for A would close off the path of causal information from X to Y, actually *impeding* your effort to measure that effect.
pgm = PGM(shape=[4, 3])
pgm.add_node(daft.Node('X', r"X", 1, 2))
pgm.add_node(daft.Node('Y', r"Y", 3, 2))
pgm.add_node(daft.Node('A', r"A", 2, 2))
pgm.add_node(daft.Node('B', r"B", 2, 1))
pgm.add_edge('X', 'A')
pgm.add_edge('A', 'Y')
pgm.add_edge('A', 'B')
pgm.render()
plt.show()
#@markdown Notice how there are no nodes with arrows pointing into X. Said another way, X has no parents. Therefore, there can't be any backdoor paths confounding X and Y. pgmpy will confirm this in the following way:
game1 = convert_pgm_to_pgmpy(pgm)
inference1 = CausalInference(game1)
print(f"Are there are active backdoor paths? {not inference1.is_valid_backdoor_adjustment_set('X', 'Y')}")
adj_sets = inference1.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
#@title # Game 2
#@markdown This graph looks harder, but actualy is also trivial to solve. The key is noticing the one backdoor path, which goes from X <- A -> B <- D -> E -> Y, has a collider at B (or a 'V structure'), and therefore the backdoor path is closed.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 2, 3))
pgm.add_node(daft.Node('C', r"C", 3, 3))
pgm.add_node(daft.Node('D', r"D", 2, 2))
pgm.add_node(daft.Node('E', r"E", 2, 1))
pgm.add_edge('X', 'E')
pgm.add_edge('A', 'X')
pgm.add_edge('A', 'B')
pgm.add_edge('B', 'C')
pgm.add_edge('D', 'B')
pgm.add_edge('D', 'E')
pgm.add_edge('E', 'Y')
pgm.render()
plt.show()
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {not inference.is_valid_backdoor_adjustment_set('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
#@title # Game 3
#@markdown This game actually requires some action. Notice the backdoor path X <- B -> Y. This is a confounding pattern, is one of the clearest signs that we'll need to control for something, in this case B.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 2, 1.75))
pgm.add_node(daft.Node('B', r"B", 2, 3))
pgm.add_edge('X', 'Y')
pgm.add_edge('X', 'A')
pgm.add_edge('B', 'A')
pgm.add_edge('B', 'X')
pgm.add_edge('B', 'Y')
pgm.render()
plt.show()
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {not inference.is_valid_backdoor_adjustment_set('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
#@title # Game 4
#@markdown Pearl named this particular configuration "M Bias", not only because of it's shape, but also because of the common practice of statisticians to want to control for B in many situations. However, notice how in this configuration X and Y start out as *not confounded* and how by controlling for B we would actually introduce confounding by opening the path at the collider, B.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 2, 2))
pgm.add_node(daft.Node('C', r"C", 3, 3))
pgm.add_edge('A', 'X')
pgm.add_edge('A', 'B')
pgm.add_edge('C', 'B')
pgm.add_edge('C', 'Y')
pgm.render()
plt.show()
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {not inference.is_valid_backdoor_adjustment_set('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
#@title # Game 5
#@markdown This is the last game in The Book of Why is the most complex. In this case we have two backdoor paths, one going through A and the other through B, and it's important to notice that if we only control for B that the path: X <- A -> B <- C -> Y (which starts out as closed because B is a collider) actually is opened. Therefore we have to either close both A and B or, as astute observers will notice, we can also just close C and completely close both backdoor paths. pgmpy will nicely confirm these results for us.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 2, 2))
pgm.add_node(daft.Node('C', r"C", 3, 3))
pgm.add_edge('A', 'X')
pgm.add_edge('A', 'B')
pgm.add_edge('C', 'B')
pgm.add_edge('C', 'Y')
pgm.add_edge("X", "Y")
pgm.add_edge("B", "X")
pgm.render()
plt.show()
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {not inference.is_valid_backdoor_adjustment_set('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
#@title # Game 6
#@markdown So these are no longer drawn from The Book of Why, but were either drawn from another source (which I will reference) or a developed to try to induce a specific bug.
#@markdown This example is drawn from Causality by Pearl on p. 80. This example is kind of interesting because there are many possible combinations of nodes which will close the two backdoor paths which exist in this graph. In turns out that D plus any other node in {A, B, C, E} will deconfound X and Y.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 3, 3))
pgm.add_node(daft.Node('C', r"C", 1, 2))
pgm.add_node(daft.Node('D', r"D", 2, 2))
pgm.add_node(daft.Node('E', r"E", 3, 2))
pgm.add_node(daft.Node('F', r"F", 2, 1))
pgm.add_edge('X', 'F')
pgm.add_edge('F', 'Y')
pgm.add_edge('C', 'X')
pgm.add_edge('A', 'C')
pgm.add_edge('A', 'D')
pgm.add_edge('D', 'X')
pgm.add_edge('D', 'Y')
pgm.add_edge('B', 'D')
pgm.add_edge('B', 'E')
pgm.add_edge('E', 'Y')
pgm.render()
plt.show()
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {not inference.is_valid_backdoor_adjustment_set('X', 'Y')}")
bd_adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {bd_adj_sets}")
fd_adj_sets = inference.get_all_frontdoor_adjustment_sets("X", "Y")
print(f"Ehat's the possible front adjustment sets? {fd_adj_sets}")
#@title # Game 7
#@markdown This game tests the front door adjustment. B is taken to be unobserved, and therfore we cannot close the backdoor path X <- B -> Y.
pgm = PGM(shape=[4, 3])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 2, 1))
pgm.add_node(daft.Node('B', r"B", 2, 2))
pgm.add_edge('X', 'A')
pgm.add_edge('A', 'Y')
pgm.add_edge('B', 'X')
pgm.add_edge('B', 'Y')
pgm.render()
plt.show()
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {not inference.is_valid_backdoor_adjustment_set('X', 'Y')}")
bd_adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {bd_adj_sets}")
fd_adj_sets = inference.get_all_frontdoor_adjustment_sets("X", "Y")
print(f"Ehat's the possible front adjustment sets? {fd_adj_sets}")
```
|
github_jupyter
|
# Bounding Box Visualizer
```
try:
import cv2
except ImportError:
cv2 = None
COLORS = [
"#6793be", "#990000", "#00ff00", "#ffbcc9", "#ffb9c7", "#fdc6d1",
"#fdc9d3", "#6793be", "#73a4d4", "#9abde0", "#9abde0", "#8fff8f", "#ffcfd8", "#808080", "#808080",
"#ffba00", "#6699ff", "#009933", "#1c1c1c", "#08375f", "#116ebf", "#e61d35", "#106bff", "#8f8fff",
"#8fff8f", "#dbdbff", "#dbffdb", "#dbffff", "#ffdbdb", "#ffc2c2", "#ffa8a8", "#ff8f8f", "#e85e68",
"#123456", "#5cd38c", "#1d1f5f", "#4e4b04", "#495a5b", "#489d73", "#9d4872", "#d49ea6", "#ff0080",
"#6793be", "#990000", "#fececf", "#ffbcc9", "#ffb9c7", "#fdc6d1",
"#fdc9d3", "#6793be", "#73a4d4", "#9abde0", "#9abde0", "#8fff8f", "#ffcfd8", "#808080", "#808080",
"#ffba00", "#6699ff", "#009933", "#1c1c1c", "#08375f", "#116ebf", "#e61d35", "#106bff", "#8f8fff",
"#8fff8f", "#dbdbff", "#dbffdb", "#dbffff", "#ffdbdb", "#ffc2c2", "#ffa8a8", "#ff8f8f", "#e85e68",
"#123456", "#5cd38c", "#1d1f5f", "#4e4b04", "#495a5b", "#489d73", "#9d4872", "#d49ea6", "#ff0080"
]
def hex_to_rgb(color_hex):
color_hex = color_hex.lstrip('#')
color_rgb = tuple(int(color_hex[i:i+2], 16) for i in (0, 2, 4))
return color_rgb
def annotate_image(image, detection):
""" Annotate images with object detection results
# Arguments:
image: numpy array representing the image used for detection
detection: `DetectionResult` result from SKIL on the same image
# Return value:
annotated image as numpy array
"""
if cv2 is None:
raise Exception("OpenCV is not installed.")
objects = detection.get('objects')
if objects:
for detect in objects:
confs = detect.get('confidences')
max_conf = max(confs)
max_index = confs.index(max_conf)
classes = detect.get('predictedClasses')
max_class = classes[max_index]
class_number = detect.get('predictedClassNumbers')[max_index]
h = detect.get('height')
w = detect.get('width')
center_x = detect.get('centerX')
center_y = detect.get('centerY')
color_hex = COLORS[class_number]
b,g,r = hex_to_rgb(color_hex)
color_rgb = (r,g,b)
# bounding box
xmin, ymin = int(center_x - w/2), int(center_y - h/2)
xmax, ymax = int(center_x + w/2), int(center_y + h/2)
upper = (xmin, ymin)
lower = (xmax, ymax)
cv2.rectangle(image, lower, upper, color_rgb, thickness=3)
# bounding box label: class_name: confidence
text = max_class + ": " + str(int(100*max(confs)))+"%"
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.7
# get text size
size = cv2.getTextSize(text, font, fontScale+0.1, thickness=2)
text_width = size[0][0]
text_height = size[0][1]
# text-box background
cv2.rectangle(image,
(xmin-2, ymin),
(xmin+text_width, ymin-35), color_rgb, thickness=-1)
cv2.putText(image, text, (xmin, ymin-10), font, fontScale, color=0, thickness=2)
return image
import json
import matplotlib.pyplot as plt
%matplotlib inline
with open('detections/img-5.json') as FILE:
detections = json.load(FILE)
print(json.dumps(detections['objects'][0], indent=4))
image = annotate_image(cv2.imread("images/img-5.jpg"), detections)
cv2.imwrite('images/annotated.jpg', image)
plt.figure(figsize=(8,8))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()
image.shape
for k, detection in enumerate(detections['objects']):
predicted = detection['predictedClasses'][0]
confidence = detection['confidences'][0]
print('{}: [{}, {:.5}]'.format(k+1, predicted, confidence))
len(COLORS)
```
|
github_jupyter
|
# Analyze Data Quality with SageMaker Processing Jobs and Spark
Typically a machine learning (ML) process consists of few steps. First, gathering data with various ETL jobs, then pre-processing the data, featurizing the dataset by incorporating standard techniques or prior knowledge, and finally training an ML model using an algorithm.
Often, distributed data processing frameworks such as Spark are used to process and analyze data sets in order to detect data quality issues and prepare them for model training.
In this notebook we'll use Amazon SageMaker Processing with a library called [**Deequ**](https://github.com/awslabs/deequ), and leverage the power of Spark with a managed SageMaker Processing Job to run our data processing workloads.
Here are some great resources on Deequ:
* Blog Post: https://aws.amazon.com/blogs/big-data/test-data-quality-at-scale-with-deequ/
* Research Paper: https://assets.amazon.science/4a/75/57047bd343fabc46ec14b34cdb3b/towards-automated-data-quality-management-for-machine-learning.pdf


# Amazon Customer Reviews Dataset
https://s3.amazonaws.com/amazon-reviews-pds/readme.html
### Dataset Columns:
- `marketplace`: 2-letter country code (in this case all "US").
- `customer_id`: Random identifier that can be used to aggregate reviews written by a single author.
- `review_id`: A unique ID for the review.
- `product_id`: The Amazon Standard Identification Number (ASIN). `http://www.amazon.com/dp/<ASIN>` links to the product's detail page.
- `product_parent`: The parent of that ASIN. Multiple ASINs (color or format variations of the same product) can roll up into a single parent.
- `product_title`: Title description of the product.
- `product_category`: Broad product category that can be used to group reviews (in this case digital videos).
- `star_rating`: The review's rating (1 to 5 stars).
- `helpful_votes`: Number of helpful votes for the review.
- `total_votes`: Number of total votes the review received.
- `vine`: Was the review written as part of the [Vine](https://www.amazon.com/gp/vine/help) program?
- `verified_purchase`: Was the review from a verified purchase?
- `review_headline`: The title of the review itself.
- `review_body`: The text of the review.
- `review_date`: The date the review was written.
```
ingest_create_athena_table_tsv = False
%store -r ingest_create_athena_table_tsv
if not ingest_create_athena_table_tsv:
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('[ERROR] YOU HAVE TO RUN THE NOTEBOOKS IN THE INGEST FOLDER FIRST.')
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
else:
print('[OK]')
import sagemaker
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
bucket = sagemaker_session.default_bucket()
```
# Pull the Spark-Deequ Docker Image
```
public_image_uri='docker.io/datascienceonaws/spark-deequ:1.0.0'
!docker pull $public_image_uri
```
# Push the Image to a Private Docker Repo
```
private_docker_repo = 'spark-deequ'
private_docker_tag = '1.0.0'
import boto3
account_id = boto3.client('sts').get_caller_identity().get('Account')
region = boto3.session.Session().region_name
private_image_uri = '{}.dkr.ecr.{}.amazonaws.com/{}:{}'.format(account_id, region, private_docker_repo, private_docker_tag)
print(private_image_uri)
!docker tag $public_image_uri $private_image_uri
!$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email)
```
# Ignore `spark-deequ does not exist` error below
```
!aws ecr describe-repositories --repository-names $private_docker_repo || aws ecr create-repository --repository-name $private_docker_repo
```
# Ignore ^^ `spark-deequ does not exist` ^^ error above
```
!docker push $private_image_uri
```
# Run the Analysis Job using a SageMaker Processing Job
Next, use the Amazon SageMaker Python SDK to submit a processing job. Use the Spark container that was just built with our Spark script.
# Review the Spark preprocessing script.
```
!pygmentize preprocess-deequ.py
!pygmentize preprocess-deequ.scala
from sagemaker.processing import ScriptProcessor
processor = ScriptProcessor(base_job_name='spark-amazon-reviews-analyzer',
image_uri=private_image_uri,
command=['/opt/program/submit'],
role=role,
instance_count=2, # instance_count needs to be > 1 or you will see the following error: "INFO yarn.Client: Application report for application_ (state: ACCEPTED)"
instance_type='ml.r5.2xlarge',
env={
'mode': 'jar',
'main_class': 'Main'
})
s3_input_data = 's3://{}/amazon-reviews-pds/tsv/'.format(bucket)
print(s3_input_data)
!aws s3 ls $s3_input_data
```
## Setup Output Data
```
from time import gmtime, strftime
timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
output_prefix = 'amazon-reviews-spark-analyzer-{}'.format(timestamp_prefix)
processing_job_name = 'amazon-reviews-spark-analyzer-{}'.format(timestamp_prefix)
print('Processing job name: {}'.format(processing_job_name))
s3_output_analyze_data = 's3://{}/{}/output'.format(bucket, output_prefix)
print(s3_output_analyze_data)
```
## Start the Spark Processing Job
_Notes on Invoking from Lambda:_
* However, if we use the boto3 SDK (ie. with a Lambda), we need to copy the `preprocess.py` file to S3 and specify the everything include --py-files, etc.
* We would need to do the following before invoking the Lambda:
!aws s3 cp preprocess.py s3://<location>/sagemaker/spark-preprocess-reviews-demo/code/preprocess.py
!aws s3 cp preprocess.py s3://<location>/sagemaker/spark-preprocess-reviews-demo/py_files/preprocess.py
* Then reference the s3://<location> above in the --py-files, etc.
* See Lambda example code in this same project for more details.
_Notes on not using ProcessingInput and Output:_
* Since Spark natively reads/writes from/to S3 using s3a://, we can avoid the copy required by ProcessingInput and ProcessingOutput (FullyReplicated or ShardedByS3Key) and just specify the S3 input and output buckets/prefixes._"
* See https://github.com/awslabs/amazon-sagemaker-examples/issues/994 for issues related to using /opt/ml/processing/input/ and output/
* If we use ProcessingInput, the data will be copied to each node (which we don't want in this case since Spark already handles this)
```
from sagemaker.processing import ProcessingOutput
processor.run(code='preprocess-deequ.py',
arguments=['s3_input_data', s3_input_data,
's3_output_analyze_data', s3_output_analyze_data,
],
# See https://github.com/aws/sagemaker-python-sdk/issues/1341
# for why we need to specify a null-output
outputs=[
ProcessingOutput(s3_upload_mode='EndOfJob',
output_name='null-output',
source='/opt/ml/processing/output')
],
logs=True,
wait=False
)
from IPython.core.display import display, HTML
processing_job_name = processor.jobs[-1].describe()['ProcessingJobName']
display(HTML('<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/{}">Processing Job</a></b>'.format(region, processing_job_name)))
from IPython.core.display import display, HTML
processing_job_name = processor.jobs[-1].describe()['ProcessingJobName']
display(HTML('<b>Review <a target="blank" href="https://console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/ProcessingJobs;prefix={};streamFilter=typeLogStreamPrefix">CloudWatch Logs</a> After a Few Minutes</b>'.format(region, processing_job_name)))
from IPython.core.display import display, HTML
s3_job_output_prefix = output_prefix
display(HTML('<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/{}/?region={}&tab=overview">S3 Output Data</a> After The Spark Job Has Completed</b>'.format(bucket, s3_job_output_prefix, region)))
```
# Monitor the Processing Job
```
running_processor = sagemaker.processing.ProcessingJob.from_processing_name(processing_job_name=processing_job_name,
sagemaker_session=sagemaker_session)
processing_job_description = running_processor.describe()
print(processing_job_description)
running_processor.wait()
```
# _Please Wait Until the ^^ Processing Job ^^ Completes Above._
# Inspect the Processed Output
## These are the quality checks on our dataset.
## _The next cells will not work properly until the job completes above._
```
!aws s3 ls --recursive $s3_output_analyze_data/
```
## Copy the Output from S3 to Local
* dataset-metrics/
* constraint-checks/
* success-metrics/
* constraint-suggestions/
```
!aws s3 cp --recursive $s3_output_analyze_data ./amazon-reviews-spark-analyzer/ --exclude="*" --include="*.csv"
```
## Analyze Constraint Checks
```
import glob
import pandas as pd
import os
def load_dataset(path, sep, header):
data = pd.concat([pd.read_csv(f, sep=sep, header=header) for f in glob.glob('{}/*.csv'.format(path))], ignore_index = True)
return data
df_constraint_checks = load_dataset(path='./amazon-reviews-spark-analyzer/constraint-checks/', sep='\t', header=0)
df_constraint_checks[['check', 'constraint', 'constraint_status', 'constraint_message']]
```
## Analyze Dataset Metrics
```
df_dataset_metrics = load_dataset(path='./amazon-reviews-spark-analyzer/dataset-metrics/', sep='\t', header=0)
df_dataset_metrics
```
## Analyze Success Metrics
```
df_success_metrics = load_dataset(path='./amazon-reviews-spark-analyzer/success-metrics/', sep='\t', header=0)
df_success_metrics
```
## Analyze Constraint Suggestions
```
df_constraint_suggestions = load_dataset(path='./amazon-reviews-spark-analyzer/constraint-suggestions/', sep='\t', header=0)
df_constraint_suggestions.columns=['column_name', 'description', 'code']
df_constraint_suggestions
```
# Save for the Next Notebook(s)
```
%store df_dataset_metrics
%%javascript
Jupyter.notebook.save_checkpoint();
Jupyter.notebook.session.delete();
```
|
github_jupyter
|
<p><font size="6"><b> CASE - Observation data - analysis</b></font></p>
> *© 2021, Joris Van den Bossche and Stijn Van Hoey (<mailto:[email protected]>, <mailto:[email protected]>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*
---
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-whitegrid')
```
## 1. Reading in the enriched observations data
<div class="alert alert-success">
**EXERCISE**
- Read in the `survey_data_completed.csv` file and save the resulting `DataFrame` as variable `survey_data_processed` (if you did not complete the previous notebook, a version of the csv file is available in the `data` folder).
- Interpret the 'eventDate' column directly as python `datetime` objects and make sure the 'occurrenceID' column is used as the index of the resulting DataFrame (both can be done at once when reading the csv file using parameters of the `read_csv` function)
- Inspect the first five rows of the DataFrame and the data types of each of the data columns. Verify that the 'eventDate' indeed has a datetime data type.
<details><summary>Hints</summary>
- All read functions in Pandas start with `pd.read_...`.
- To check the documentation of a function, use the keystroke combination of SHIFT + TAB when the cursor is on the function.
- Remember `.head()` and `.info()`?
</details>
</div>
```
survey_data_processed = pd.read_csv("data/survey_data_completed.csv",
parse_dates=['eventDate'], index_col="occurrenceID")
survey_data_processed.head()
survey_data_processed.info()
```
## 2. Tackle missing values (NaN) and duplicate values
See [pandas_08_missing_values.ipynb](pandas_08_missing_values.ipynb) for an overview of functionality to work with missing values.
<div class="alert alert-success">
**EXERCISE**
How many records in the data set have no information about the `species`? Use the `isna()` method to find out.
<details><summary>Hints</summary>
- Do NOT use `survey_data_processed['species'] == np.nan`, but use the available method `isna()` to check if a value is NaN
- The result of an (element-wise) condition returns a set of True/False values, corresponding to 1/0 values. The amount of True values is equal to the sum.
</details>
```
survey_data_processed['species'].isna().sum()
```
<div class="alert alert-success">
**EXERCISE**
How many duplicate records are present in the dataset? Use the method `duplicated()` to check if a row is a duplicate.
<details><summary>Hints</summary>
- The result of an (element-wise) condition returns a set of True/False values, corresponding to 1/0 values. The amount of True values is equal to the sum.
</details>
```
survey_data_processed.duplicated().sum()
```
<div class="alert alert-success">
**EXERCISE**
- Select all duplicate data by filtering the `observations` data and assign the result to a new variable `duplicate_observations`. The `duplicated()` method provides a `keep` argument define which duplicates (if any) to mark.
- Sort the `duplicate_observations` data on both the columns `eventDate` and `verbatimLocality` and show the first 9 records.
<details><summary>Hints</summary>
- Check the documentation of the `duplicated` method to find out which value the argument `keep` requires to select all duplicate data.
- `sort_values()` can work with a single columns name as well as a list of names.
</details>
```
duplicate_observations = survey_data_processed[survey_data_processed.duplicated(keep=False)]
duplicate_observations.sort_values(["eventDate", "verbatimLocality"]).head(9)
```
<div class="alert alert-success">
**EXERCISE**
- Exclude the duplicate values (i.e. keep the first occurrence while removing the other ones) from the `observations` data set and save the result as `survey_data_unique`. Use the `drop duplicates()` method from Pandas.
- How many observations are still left in the data set?
<details><summary>Hints</summary>
- `keep=First` is the default option for `drop_duplicates`
- The number of rows in a DataFrame is equal to the `len`gth
</details>
```
survey_data_unique = survey_data_processed.drop_duplicates()
len(survey_data_unique)
```
<div class="alert alert-success">
**EXERCISE**
Use the `dropna()` method to find out:
- For how many observations (rows) we have all the information available (i.e. no NaN values in any of the columns)?
- For how many observations (rows) we do have the `species_ID` data available ?
<details><summary>Hints</summary>
- `dropna` by default removes by default all rows for which _any_ of the columns contains a `NaN` value.
- To specify which specific columns to check, use the `subset` argument
</details>
```
len(survey_data_unique.dropna()), len(survey_data_unique.dropna(subset=['species']))
```
<div class="alert alert-success">
**EXERCISE**
Filter the `survey_data_unique` data and select only those records that do not have a `species` while having information on the `sex`. Store the result as variable `not_identified`.
<details><summary>Hints</summary>
- To combine logical operators element-wise in Pandas, use the `&` operator.
- Pandas provides both a `isna()` and a `notna()` method to check the existence of `NaN` values.
</details>
```
mask = survey_data_unique['species'].isna() & survey_data_unique['sex'].notna()
not_identified = survey_data_unique[mask]
not_identified.head()
```
__NOTE!__
The `DataFrame` we will use in the further analyses contains species information:
```
survey_data = survey_data_unique.dropna(subset=['species']).copy()
survey_data['name'] = survey_data['genus'] + ' ' + survey_data['species']
```
<div class="alert alert-info">
**INFO**
For biodiversity studies, absence values (knowing that something is not present) are useful as well to normalize the observations, but this is out of scope for these exercises.
</div>
## 3. Select subsets of the data
```
survey_data['taxa'].value_counts()
#survey_data.groupby('taxa').size()
```
<div class="alert alert-success">
**EXERCISE**
- Select the observations for which the `taxa` is equal to 'Rabbit', 'Bird' or 'Reptile'. Assign the result to a variable `non_rodent_species`. Use the `isin` method for the selection.
<details><summary>Hints</summary>
- You do not have to combine three different conditions, but use the `isin` operator with a list of names.
</details>
```
non_rodent_species = survey_data[survey_data['taxa'].isin(['Rabbit', 'Bird', 'Reptile'])]
non_rodent_species.head()
len(non_rodent_species)
```
<div class="alert alert-success">
**EXERCISE**
Select the observations for which the `name` starts with the characters 'r' (make sure it does not matter if a capital character is used in the 'taxa' name). Call the resulting variable `r_species`.
<details><summary>Hints</summary>
- Remember the `.str.` construction to provide all kind of string functionalities? You can combine multiple of these after each other.
- If the presence of capital letters should not matter, make everything lowercase first before comparing (`.lower()`)
</details>
```
r_species = survey_data[survey_data['name'].str.lower().str.startswith('r')]
r_species.head()
len(r_species)
r_species["name"].value_counts()
```
<div class="alert alert-success">
**EXERCISE**
Select the observations that are not Birds. Call the resulting variable <code>non_bird_species</code>.
<details><summary>Hints</summary>
- Logical operators like `==`, `!=`, `>`,... can still be used.
</details>
```
non_bird_species = survey_data[survey_data['taxa'] != 'Bird']
non_bird_species.head()
len(non_bird_species)
```
<div class="alert alert-success">
**EXERCISE**
Select the __Bird__ (taxa is Bird) observations from 1985-01 till 1989-12 using the `eventDate` column. Call the resulting variable `birds_85_89`.
<details><summary>Hints</summary>
- No hints, you can do this! (with the help of some `<=` and `&`, and don't forget the put brackets around each comparison that you combine)
</details>
```
birds_85_89 = survey_data[(survey_data["eventDate"] >= "1985-01-01")
& (survey_data["eventDate"] <= "1989-12-31 23:59")
& (survey_data['taxa'] == 'Bird')]
birds_85_89.head()
```
Alternative solution:
```
# alternative solution
birds_85_89 = survey_data[(survey_data["eventDate"].dt.year >= 1985)
& (survey_data["eventDate"].dt.year <= 1989)
& (survey_data['taxa'] == 'Bird')]
birds_85_89.head()
```
<div class="alert alert-success">
**EXERCISE**
- Drop the observations for which no 'weight' (`wgt` column) information is available.
- On the filtered data, compare the median weight for each of the species (use the `name` column)
- Sort the output from high to low median weight (i.e. descending)
__Note__ You can do this all in a single line statement, but don't have to do it as such!
<details><summary>Hints</summary>
- You will need `dropna`, `groupby`, `median` and `sort_values`.
</details>
```
# Multiple lines
obs_with_weight = survey_data.dropna(subset=["wgt"])
median_weight = obs_with_weight.groupby(['name'])["wgt"].median()
median_weight.sort_values(ascending=False)
# Single line statement
(survey_data
.dropna(subset=["wgt"])
.groupby(['name'])["wgt"]
.median()
.sort_values(ascending=False)
)
```
## 4. Species abundance
<div class="alert alert-success">
**EXERCISE**
Which 8 species (use the `name` column to identify the different species) have been observed most over the entire data set?
<details><summary>Hints</summary>
- Pandas provide a function to combine sorting and showing the first n records, see [here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.nlargest.html)...
</details>
```
survey_data.groupby("name").size().nlargest(8)
survey_data['name'].value_counts()[:8]
```
<div class="alert alert-success">
**EXERCISE**
- What is the number of different species in each of the `verbatimLocality` plots? Use the `nunique` method. Assign the output to a new variable `n_species_per_plot`.
- Define a Matplotlib `Figure` (`fig`) and `Axes` (`ax`) to prepare a plot. Make an horizontal bar chart using Pandas `plot` function linked to the just created Matplotlib `ax`. Each bar represents the `species per plot/verbatimLocality`. Change the y-label to 'Plot number'.
<details><summary>Hints</summary>
- _...in each of the..._ should provide a hint to use `groupby` for this exercise. The `nunique` is the aggregation function for each of the groups.
- `fig, ax = plt.subplots()` prepares a Matplotlib Figure and Axes.
</details>
```
n_species_per_plot = survey_data.groupby(["verbatimLocality"])["name"].nunique()
fig, ax = plt.subplots(figsize=(6, 6))
n_species_per_plot.plot(kind="barh", ax=ax, color="lightblue")
ax.set_ylabel("plot number")
# Alternative option:
# inspired on the pivot table we already had:
# species_per_plot = survey_data.reset_index().pivot_table(
# index="name", columns="verbatimLocality", values="occurrenceID", aggfunc='count')
# n_species_per_plot = species_per_plot.count()
```
<div class="alert alert-success">
**EXERCISE**
- What is the number of plots (`verbatimLocality`) each of the species have been observed in? Assign the output to a new variable `n_plots_per_species`. Sort the counts from low to high.
- Make an horizontal bar chart using Pandas `plot` function to show the number of plots each of the species was found (using the `n_plots_per_species` variable).
<details><summary>Hints</summary>
- Use the previous exercise to solve this one.
</details>
```
n_plots_per_species = survey_data.groupby(["name"])["verbatimLocality"].nunique().sort_values()
fig, ax = plt.subplots(figsize=(8, 8))
n_plots_per_species.plot(kind="barh", ax=ax, color='0.4')
ax.set_xlabel("Number of plots");
ax.set_ylabel("");
```
<div class="alert alert-success">
**EXERCISE**
- Starting from the `survey_data`, calculate the amount of males and females present in each of the plots (`verbatimLocality`). The result should return the counts for each of the combinations of `sex` and `verbatimLocality`. Assign to a new variable `n_plot_sex` and ensure the counts are in a column named "count".
- Use `pivot` to convert the `n_plot_sex` DataFrame to a new DataFrame with the `verbatimLocality` as index and `male`/`female` as column names. Assign to a new variable `pivoted`.
<details><summary>Hints</summary>
- _...for each of the combinations..._ `groupby` can also be used with multiple columns at the same time.
- If a `groupby` operation gives a Series as result, you can give that Series a name with the `.rename(..)` method.
- `reset_index()` is useful function to convert multiple indices into columns again.
</details>
```
n_plot_sex = survey_data.groupby(["sex", "verbatimLocality"]).size().rename("count").reset_index()
n_plot_sex.head()
pivoted = n_plot_sex.pivot(columns="sex", index="verbatimLocality", values="count")
pivoted.head()
```
To check, we can use the variable `pivoted` to plot the result:
```
pivoted.plot(kind='bar', figsize=(12, 6), rot=0)
```
<div class="alert alert-success">
**EXERCISE**
Recreate the previous plot with the `catplot` function from the Seaborn library starting from `n_plot_sex`.
<details><summary>Hints</summary>
- Check the `kind` argument of the `catplot` function to figure out to specify you want a barplot with given x and y values.
- To link a column to different colors, use the `hue` argument
</details>
```
sns.catplot(data=n_plot_sex, x="verbatimLocality", y="count",
hue="sex", kind="bar", height=3, aspect=3)
```
<div class="alert alert-success">
**EXERCISE**
Recreate the previous plot with the `catplot` function from the Seaborn library directly starting from `survey_data`.
<details><summary>Hints</summary>
- Check the `kind`argument of the `catplot` function to find out how to use counts to define the bars instead of a `y` value.
- To link a column to different colors, use the `hue` argument
</details>
```
sns.catplot(data=survey_data, x="verbatimLocality",
hue="sex", kind="count", height=3, aspect=3)
```
<div class="alert alert-success">
**EXERCISE**
- Make a summary table with the number of records of each of the species in each of the plots (also called `verbatimLocality`). Each of the species `name`s is a row index and each of the `verbatimLocality` plots is a column name.
- Using the Seaborn <a href="http://seaborn.pydata.org/generated/seaborn.heatmap.html">documentation</a> to make a heatmap.
<details><summary>Hints</summary>
- Make sure to pass the correct columns to respectively the `index`, `columns`, `values` and `aggfunc` parameters of the `pivot_table` function. You can use the `datasetName` to count the number of observations for each name/locality combination (when counting rows, the exact column doesn't matter).
</details>
```
species_per_plot = survey_data.pivot_table(index="name",
columns="verbatimLocality",
values="datasetName",
aggfunc='count')
# alternative ways to calculate this
#species_per_plot = survey_data.groupby(['name', 'verbatimLocality']).size().unstack(level=-1)
#pecies_per_plot = pd.crosstab(survey_data['name'], survey_data['verbatimLocality'])
fig, ax = plt.subplots(figsize=(8,8))
sns.heatmap(species_per_plot, ax=ax, cmap='Greens')
```
## 5. Observations over time
<div class="alert alert-success">
**EXERCISE**
Make a plot visualizing the evolution of the number of observations for each of the individual __years__ (i.e. annual counts) using the `resample` method.
<details><summary>Hints</summary>
- You want to `resample` the data using the `eventDate` column to create annual counts. If the index is not a datetime-index, you can use the `on=` keyword to specify which datetime column to use.
- `resample` needs an aggregation function on how to combine the values within a single 'group' (in this case data within a year). In this example, we want to know the `size` of each group, i.e. the number of records within each year.
</details>
```
survey_data.resample('A', on='eventDate').size().plot()
```
To evaluate the intensity or number of occurrences during different time spans, a heatmap is an interesting representation.
<div class="alert alert-success">
**EXERCISE**
- Create a table, called `heatmap_prep`, based on the `survey_data` DataFrame with the row index the individual years, in the column the months of the year (1-> 12) and as values of the table, the counts for each of these year/month combinations.
- Using the seaborn <a href="http://seaborn.pydata.org/generated/seaborn.heatmap.html">documentation</a>, make a heatmap starting from the `heatmap_prep` variable.
<details><summary>Hints</summary>
- The `.dt` accessor can be used to get the `year`, `month`,... from a `datetime` column
- Use `pivot_table` and provide the years to `index` and the months to `columns`. Do not forget to `count` the number for each combination (`aggfunc`).
- Seaborn has an `heatmap` function which requires a short-form DataFrame, comparable to giving each element in a table a color value.
</details>
```
heatmap_prep = survey_data.pivot_table(index=survey_data['eventDate'].dt.year,
columns=survey_data['eventDate'].dt.month,
values='species', aggfunc='count')
fig, ax = plt.subplots(figsize=(10, 8))
ax = sns.heatmap(heatmap_prep, cmap='Reds')
```
Remark that we started from a `tidy` data format (also called *long* format) and converted to *short* format with in the row index the years, in the column the months and the counts for each of these year/month combinations as values.
## (OPTIONAL SECTION) 6. Evolution of species during monitoring period
*In this section, all plots can be made with the embedded Pandas plot function, unless specificly asked*
<div class="alert alert-success">
**EXERCISE**
Plot using Pandas `plot` function the number of records for `Dipodomys merriami` for each month of the year (January (1) -> December (12)), aggregated over all years.
<details><summary>Hints</summary>
- _...for each month of..._ requires `groupby`.
- `resample` is not useful here, as we do not want to change the time-interval, but look at month of the year (over all years)
</details>
```
merriami = survey_data[survey_data["name"] == "Dipodomys merriami"]
fig, ax = plt.subplots()
merriami.groupby(merriami['eventDate'].dt.month).size().plot(kind="barh", ax=ax)
ax.set_xlabel("number of occurrences");
ax.set_ylabel("Month of the year");
```
<div class="alert alert-success">
**EXERCISE**
Plot, for the species 'Dipodomys merriami', 'Dipodomys ordii', 'Reithrodontomys megalotis' and 'Chaetodipus baileyi', the monthly number of records as a function of time during the monitoring period. Plot each of the individual species in a separate subplot and provide them all with the same y-axis scale
<details><summary>Hints</summary>
- `isin` is useful to select from within a list of elements.
- `groupby` AND `resample` need to be combined. We do want to change the time-interval to represent data as a function of time (`resample`) and we want to do this _for each name/species_ (`groupby`). The order matters!
- `unstack` is a Pandas function a bit similar to `pivot`. Check the [unstack documentation](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.unstack.html) as it might be helpful for this exercise.
</details>
```
subsetspecies = survey_data[survey_data["name"].isin(['Dipodomys merriami', 'Dipodomys ordii',
'Reithrodontomys megalotis', 'Chaetodipus baileyi'])]
month_evolution = subsetspecies.groupby("name").resample('M', on='eventDate').size()
species_evolution = month_evolution.unstack(level=0)
axs = species_evolution.plot(subplots=True, figsize=(14, 8), sharey=True)
```
<div class="alert alert-success">
**EXERCISE**
Recreate the same plot as in the previous exercise using Seaborn `relplot` functon with the `month_evolution` variable.
<details><summary>Hints</summary>
- We want to have the `counts` as a function of `eventDate`, so link these columns to y and x respectively.
- To create subplots in Seaborn, the usage of _facetting_ (splitting data sets to multiple facets) is used by linking a column name to the `row`/`col` parameter.
- Using `height` and `widht`, the figure size can be optimized.
</details>
Uncomment the next cell (calculates `month_evolution`, the intermediate result of the previous excercise):
```
# Given as solution..
subsetspecies = survey_data[survey_data["name"].isin(['Dipodomys merriami', 'Dipodomys ordii',
'Reithrodontomys megalotis', 'Chaetodipus baileyi'])]
month_evolution = subsetspecies.groupby("name").resample('M', on='eventDate').size().rename("counts")
month_evolution = month_evolution.reset_index()
month_evolution.head()
```
Plotting with seaborn:
```
sns.relplot(data=month_evolution, x='eventDate', y="counts",
row="name", kind="line", hue="name", height=2, aspect=5)
```
<div class="alert alert-success">
**EXERCISE**
Plot the annual amount of occurrences for each of the 'taxa' as a function of time using Seaborn. Plot each taxa in a separate subplot and do not share the y-axis among the facets.
<details><summary>Hints</summary>
- Combine `resample` and `groupby`!
- Check out the previous exercise for the plot function.
- Pass the `sharey=False` to the `facet_kws` argument as a dictionary.
</details>
```
year_evolution = survey_data.groupby("taxa").resample('A', on='eventDate').size()
year_evolution.name = "counts"
year_evolution = year_evolution.reset_index()
sns.relplot(data=year_evolution, x='eventDate', y="counts",
col="taxa", col_wrap=2, kind="line", height=2, aspect=5,
facet_kws={"sharey": False})
```
<div class="alert alert-success">
**EXERCISE**
The observations where taken by volunteers. You wonder on which day of the week the most observations where done. Calculate for each day of the week (`dayofweek`) the number of observations and make a bar plot.
<details><summary>Hints</summary>
- Did you know the Python standard Library has a module `calendar` which contains names of week days, month names,...?
</details>
```
fig, ax = plt.subplots()
survey_data.groupby(survey_data["eventDate"].dt.dayofweek).size().plot(kind='barh', color='#66b266', ax=ax)
import calendar
xticks = ax.set_yticklabels(calendar.day_name)
```
Nice work!
|
github_jupyter
|
# Classify Images using Residual Network with 50 layers (ResNet-50)
## Import Turi Create
Please follow the repository README instructions to install the Turi Create package.
**Note**: Turi Create is currently only compatible with Python 2.7
```
import turicreate as turi
```
## Reference the dataset path
```
url = "data/food_images"
```
## Label the dataset
In the following block of code we will labels the image in the dataset of **Egg** and **Soup** images. Then we will export it as an `SFrame` data object to use it for training the image classification model.
1. The first line of code loads the folder images content using the `image_analysis` property.
2. The second line creates a _foodType_ key for each image in the dataset to specify whether it's an **Egg** or **Soup** based on which folder it's located in.
3. The third line exports the analyzed data as an `SFrame` object in order to use it while creating our image classifier.
4. The fourth line simply visualises the new labeled image into a large list.
**Note**:- You do not have to run the following block of code everytime you create a classifer, unless you changed/edited the dataset.
```
data = turi.image_analysis.load_images(url)
data["foodType"] = data["path"].apply(lambda path: "Eggs" if "eggs" in path else "Soup")
data.save("egg_or_soup.sframe")
data.explore()
```
## Load the labeled SFrame
In the following line of code we are loading the `SFrame` object that contains the images in our dataset with their labels.
```
dataBuffer = turi.SFrame("egg_or_soup.sframe")
```
## Create training and test data using our existing dataset
Here, we're randomly splitting the data.
- 90% of the data in the `SFrame` object will be used for training the image classifier.
- 10% of the data in the `SFrame` object will be used for testing the image classifier.
```
trainingBuffers, testingBuffers = dataBuffer.random_split(0.9)
```
## Train the image classifier
In the following line of code, we will create an image classifier and we'll feed it with the training data we have.
In this example, the image classifer's architecture will be a state-of-the-art Residual Network with 50 layers, also known as **ResNet-50**.
Check out the official paper here: https://arxiv.org/abs/1512.03385.
```
model = turi.image_classifier.create(trainingBuffers, target="foodType", model="resnet-50")
```
## Evaluate the test data to determine the model accuracy
```
evaluations = model.evaluate(testingBuffers)
print evaluations["accuracy"]
```
## Save the Turi Create model to retrieve it later
```
model.save("egg_or_soup.model")
```
## Export the image classification model for Core ML
```
model.export_coreml("EggSoupClassifier.mlmodel")
```
|
github_jupyter
|
# More To Come. Stay Tuned. !!
If there are any suggestions/changes you would like to see in the Kernel please let me know :). Appreciate every ounce of help!
**This notebook will always be a work in progress**. Please leave any comments about further improvements to the notebook! Any feedback or constructive criticism is greatly appreciated!. **If you like it or it helps you , you can upvote and/or leave a comment :).**|
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import IPython.display as ipd # To play sound in the notebook
from tqdm import tqdm_notebook
import wave
from scipy.io import wavfile
SAMPLE_RATE = 44100
import seaborn as sns # for making plots with seaborn
color = sns.color_palette()
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.offline as offline
offline.init_notebook_mode()
import plotly.tools as tls
# Math
import numpy as np
from scipy.fftpack import fft
from scipy import signal
from scipy.io import wavfile
import librosa
import os
print(os.listdir("../input"))
INPUT_LIB = '../input/'
audio_train_files = os.listdir('../input/audio_train')
audio_test_files = os.listdir('../input/audio_test')
train = pd.read_csv('../input/train.csv')
submission = pd.read_csv("../input/sample_submission.csv", index_col='fname')
train_audio_path = '../input/audio_train/'
filename = '/001ca53d.wav' # Hi-hat
sample_rate, samples = wavfile.read(str(train_audio_path) + filename)
#sample_rate = 16000
print(samples)
print("Size of training data",train.shape)
train.head()
submission.head()
def clean_filename(fname, string):
file_name = fname.split('/')[1]
if file_name[:2] == '__':
file_name = string + file_name
return file_name
def load_wav_file(name, path):
_, b = wavfile.read(path + name)
assert _ == SAMPLE_RATE
return b
train_data = pd.DataFrame({'file_name' : train['fname'],
'target' : train['label']})
train_data['time_series'] = train_data['file_name'].apply(load_wav_file,
path=INPUT_LIB + 'audio_train/')
train_data['nframes'] = train_data['time_series'].apply(len)
train_data.head()
print("Size of training data after some preprocessing : ",train_data.shape)
# missing data in training data set
total = train_data.isnull().sum().sort_values(ascending = False)
percent = (train_data.isnull().sum()/train_data.isnull().count()).sort_values(ascending = False)
missing_train_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_train_data.head()
```
There is no missing data in training dataset
# Manually verified Audio
```
temp = train['manually_verified'].value_counts()
labels = temp.index
sizes = (temp / temp.sum())*100
trace = go.Pie(labels=labels, values=sizes, hoverinfo='label+percent')
layout = go.Layout(title='Manually varification of labels(0 - No, 1 - Yes)')
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
```
* Approximately 40 % labels are manually varified.
```
plt.figure(figsize=(12,8))
sns.distplot(train_data.nframes.values, bins=50, kde=False)
plt.xlabel('nframes', fontsize=12)
plt.title("Histogram of #frames")
plt.show()
plt.figure(figsize=(17,8))
boxplot = sns.boxplot(x="target", y="nframes", data=train_data)
boxplot.set(xlabel='', ylabel='')
plt.title('Distribution of audio frames, per label', fontsize=17)
plt.xticks(rotation=80, fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Label name')
plt.ylabel('nframes')
plt.show()
print("Total number of labels in training data : ",len(train_data['target'].value_counts()))
print("Labels are : ", train_data['target'].unique())
plt.figure(figsize=(15,8))
audio_type = train_data['target'].value_counts().head(30)
sns.barplot(audio_type.values, audio_type.index)
for i, v in enumerate(audio_type.values):
plt.text(0.8,i,v,color='k',fontsize=12)
plt.xticks(rotation='vertical')
plt.xlabel('Frequency')
plt.ylabel('Label Name')
plt.title("Top 30 labels with their frequencies in training data")
plt.show()
```
### Total number of labels are 41
```
temp = train_data.sort_values(by='target')
temp.head()
```
## Now look at some labels waveform :
1. Acoustic_guitar
2. Applause
3. Bark
## 1. Acoustic_guitar
```
print("Acoustic_guitar : ")
fig, ax = plt.subplots(10, 4, figsize = (12, 16))
for i in range(40):
ax[i//4, i%4].plot(temp['time_series'][i])
ax[i//4, i%4].set_title(temp['file_name'][i][:-4])
ax[i//4, i%4].get_xaxis().set_ticks([])
fig.savefig("AudioWaveform", dpi=900)
```
## 2. Applause
```
print("Applause : ")
fig, ax = plt.subplots(10, 4, figsize = (12, 16))
for i in range(40):
ax[i//4, i%4].plot(temp['time_series'][i+300])
ax[i//4, i%4].set_title(temp['file_name'][i+300][:-4])
ax[i//4, i%4].get_xaxis().set_ticks([])
```
## 3. Bark
```
print("Bark : ")
fig, ax = plt.subplots(10, 4, figsize = (12, 16))
for i in range(40):
ax[i//4, i%4].plot(temp['time_series'][i+600])
ax[i//4, i%4].set_title(temp['file_name'][i+600][:-4])
ax[i//4, i%4].get_xaxis().set_ticks([])
from wordcloud import WordCloud
wordcloud = WordCloud(max_font_size=50, width=600, height=300).generate(' '.join(train_data.target))
plt.figure(figsize=(15,8))
plt.imshow(wordcloud)
plt.title("Wordcloud for Labels", fontsize=35)
plt.axis("off")
plt.show()
#fig.savefig("LabelsWordCloud", dpi=900)
```
# Spectrogram
```
def log_specgram(audio, sample_rate, window_size=20,
step_size=10, eps=1e-10):
nperseg = int(round(window_size * sample_rate / 1e3))
noverlap = int(round(step_size * sample_rate / 1e3))
freqs, times, spec = signal.spectrogram(audio,
fs=sample_rate,
window='hann',
nperseg=nperseg,
noverlap=noverlap,
detrend=False)
return freqs, times, np.log(spec.T.astype(np.float32) + eps)
freqs, times, spectrogram = log_specgram(samples, sample_rate)
fig = plt.figure(figsize=(18, 8))
ax2 = fig.add_subplot(211)
ax2.imshow(spectrogram.T, aspect='auto', origin='lower',
extent=[times.min(), times.max(), freqs.min(), freqs.max()])
ax2.set_yticks(freqs[::40])
ax2.set_xticks(times[::40])
ax2.set_title('Spectrogram of Hi-hat ' + filename)
ax2.set_ylabel('Freqs in Hz')
ax2.set_xlabel('Seconds')
```
# Specgtrogram of "Hi-Hat" in 3d
If we use spectrogram as an input features for NN, we have to remember to normalize features.
```
mean = np.mean(spectrogram, axis=0)
std = np.std(spectrogram, axis=0)
spectrogram = (spectrogram - mean) / std
data = [go.Surface(z=spectrogram.T)]
layout = go.Layout(
title='Specgtrogram of "Hi-Hat" in 3d',
scene = dict(
yaxis = dict(title='Frequencies', range=freqs),
xaxis = dict(title='Time', range=times),
zaxis = dict(title='Log amplitude'),
),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
```
# More To Come. Stayed Tuned !!
|
github_jupyter
|
### DemIntro02:
# Rational Expectations Agricultural Market Model
#### Preliminary task:
Load required modules
```
from compecon.quad import qnwlogn
from compecon.tools import discmoments
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('dark')
%matplotlib notebook
```
Generate yield distribution
```
sigma2 = 0.2 ** 2
y, w = qnwlogn(25, -0.5 * sigma2, sigma2)
```
## Compute rational expectations equilibrium using function iteration, iterating on acreage planted
```
A = lambda aa, pp: 0.5 + 0.5 * np.dot(w, np.maximum(1.5 - 0.5 * aa * y, pp))
ptarg = 1
a = 1
print('{:^6} {:^10} {:^10}\n{}'.format('iter', 'a', "|a' - a|",'-' * 27))
for it in range(50):
aold = a
a = A(a, ptarg)
print('{:^6} {:^10.4f} {:^10.1e}'.format(it, a, np.linalg.norm(a - aold)))
if np.linalg.norm(a - aold) < 1.e-8:
break
```
Intermediate outputs
```
q = a * y # quantity produced in each state
p = 1.5 - 0.5 * a * y # market price in each state
f = np.maximum(p, ptarg) # farm price in each state
r = f * q # farm revenue in each state
g = (f - p) * q #government expenditures
```
Print results
```
varnames = ['Market Price', 'Farm Price', 'Farm Revenue', 'Government Expenditures']
xavg, xstd = discmoments(w, np.vstack((p, f, r, g)))
print('\n{:^24} {:^8} {:^8}\n{}'.format('Variable', 'Expect', 'Std Dev','-'*42))
for varname, av, sd in zip(varnames, xavg, xstd):
print('{:24} {:8.4f} {:8.4f}'.format(varname, av, sd))
```
## Generate fixed-point mapping
```
aeq = a
a = np.linspace(0, 2, 100)
g = np.array([A(k, ptarg) for k in a])
```
Graph rational expectations equilibrium
```
fig1 = plt.figure(figsize=[6, 6])
ax = fig1.add_subplot(111, title='Rational expectations equilibrium', aspect=1,
xlabel='Acreage Planted', xticks=[0, aeq, 2], xticklabels=['0', '$a^{*}$', '2'],
ylabel='Rational Acreage Planted', yticks=[0, aeq, 2],yticklabels=['0', '$a^{*}$', '2'])
ax.plot(a, g, 'b', linewidth=4)
ax.plot(a, a, ':', color='grey', linewidth=2)
ax.plot([0, aeq, aeq], [aeq, aeq, 0], 'r--', linewidth=3)
ax.plot([aeq], [aeq], 'ro', markersize=12)
ax.text(0.05, 0, '45${}^o$', color='grey')
ax.text(1.85, aeq - 0.15,'$g(a)$', color='blue')
fig1.show()
```
## Compute rational expectations equilibrium as a function of the target price
```
nplot = 50
ptarg = np.linspace(0, 2, nplot)
a = 1
Ep, Ef, Er, Eg, Sp, Sf, Sr, Sg = (np.empty(nplot) for k in range(8))
for ip in range(nplot):
for it in range(50):
aold = a
a = A(a, ptarg[ip])
if np.linalg.norm((a - aold) < 1.e-10):
break
q = a * y # quantity produced
p = 1.5 - 0.5 * a * y # market price
f = np.maximum(p, ptarg[ip]) # farm price
r = f * q # farm revenue
g = (f - p) * q # government expenditures
xavg, xstd = discmoments(w, np.vstack((p, f, r, g)))
Ep[ip], Ef[ip], Er[ip], Eg[ip] = tuple(xavg)
Sp[ip], Sf[ip], Sr[ip], Sg[ip] = tuple(xstd)
zeroline = lambda y: plt.axhline(y[0], linestyle=':', color='gray', hold=True)
```
Graph expected prices vs target price
```
fig2 = plt.figure(figsize=[8, 6])
ax1 = fig2.add_subplot(121, title='Expected price',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Expectation', yticks=[0.5, 1, 1.5, 2], ylim=[0.5, 2.0])
zeroline(Ep)
ax1.plot(ptarg, Ep, linewidth=4, label='Market Price')
ax1.plot(ptarg, Ef, linewidth=4, label='Farm Price')
ax1.legend(loc='upper left')
```
Graph expected prices vs target price
```
ax2 = fig2.add_subplot(122, title='Price variabilities',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Standard deviation', yticks=[0, 0.1, 0.2]) #plt.ylim(0.5, 2.0)
zeroline(Sf)
ax2.plot(ptarg, Sp, linewidth=4, label='Market Price')
ax2.plot(ptarg, Sf, linewidth=4, label='Farm Price')
ax2.legend(loc='upper left')
fig2.show()
```
Graph expected farm revenue vs target price
```
fig3 = plt.figure(figsize=[12, 6])
ax1 = fig3.add_subplot(131, title='Expected revenue',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Expectation', yticks=[1, 2, 3], ylim=[0.8, 3.0])
zeroline(Er)
ax1.plot(ptarg, Er, linewidth=4)
```
Graph standard deviation of farm revenue vs target price
```
ax2 = fig3.add_subplot(132, title='Farm Revenue Variability',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Standard deviation', yticks=[0, 0.2, 0.4])
zeroline(Sr)
ax2.plot(ptarg, Sr, linewidth=4)
```
Graph expected government expenditures vs target price
```
ax3 = fig3.add_subplot(133, title='Expected Government Expenditures',
xlabel='Target price', xticks=[0, 1, 2],
ylabel='Expectation', yticks=[0, 1, 2], ylim=[-0.05, 2.0])
zeroline(Eg)
ax3.plot(ptarg, Eg, linewidth=4)
plt.show()
```
|
github_jupyter
|
# Baseline model classification
The purpose of this notebook is to make predictions for all six categories on the given dataset using some set of rules.
<br>Let's assume that human labellers have labelled these comments based on the certain kind of words present in the comments. So it is worth exploring the comments to check the kind of words used under every category and how many times that word occurred in that category. So in this notebook, six datasets are created from the main dataset, to make the analysis easy for each category. After this, counting and storing the most frequently used words under each category is done. For each category, then we are checking the presence of `top n` words from the frequently used word dictionary, in the comments, to make the prediction.
### 1. Import libraries and load data
For preparation lets import the required libraries and the data
```
import os
dir_path = os.path.dirname(os.getcwd())
import numpy as np
import pandas as pd
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
import string
import operator
import pickle
import sys
sys.path.append(os.path.join(dir_path, "src"))
from clean_comments import clean
train_path = os.path.join(dir_path, 'data', 'raw', 'train.csv')
## Load dataset
df = pd.read_csv(train_path)
```
### <br>2. Datasets for each category
Dataset with toxic comments
```
#extract dataset with toxic label
df_toxic = df[df['toxic'] == 1]
#Reseting the index
df_toxic.set_index(['id'], inplace = True)
df_toxic.reset_index(level =['id'], inplace = True)
```
Dataset of severe toxic comments
```
#extract dataset with Severe toxic label
df_severe_toxic = df[df['severe_toxic'] == 1]
#Reseting the index
df_severe_toxic.set_index(['id'], inplace = True)
df_severe_toxic.reset_index(level =['id'], inplace = True)
```
Dataset with obscene comment
```
#extract dataset with obscens label
df_obscene = df[df['obscene'] == 1]
#Reseting the index
df_obscene.set_index(['id'], inplace = True)
df_obscene.reset_index(level =['id'], inplace = True)
#df_obscene =df_obscene.drop('comment_text', axis=1)
```
Dataset with comments labeled as "identity_hate"
```
df_identity_hate = df[df['identity_hate'] == 1]
#Reseting the index
df_identity_hate.set_index(['id'], inplace = True)
df_identity_hate.reset_index(level =['id'], inplace = True)
```
Dataset with all the threat comments
```
df_threat = df[df['threat'] == 1]
#Reseting the index
df_threat.set_index(['id'], inplace = True)
df_threat.reset_index(level =['id'], inplace = True)
```
Dataset of comments with "Insult" label
```
df_insult = df[df['insult'] == 1]
#Reseting the index
df_insult.set_index(['id'], inplace = True)
df_insult.reset_index(level =['id'], inplace = True)
```
Dataset with comments which have all six labels
```
df_6 = df[(df['toxic']==1) & (df['severe_toxic']==1) &
(df['obscene']==1) & (df['threat']==1)&
(df['insult']==1)& (df['identity_hate']==1)]
df_6.set_index(['id'], inplace = True)
df_6.reset_index(level =['id'], inplace = True)
# df6 = df_6.drop('comment_text', axis=1)
```
### <br> 3. Preperation of vocab
```
### frequent_words function take dataset as an input and returns two arguments -
### all_words and counts.
### all_words gives all the words occuring in the provided dataset
### counts gives dictionary with keys as a words those exists in the entire dataset and values
### as a count of existance of these words in the dataset.
def frequent_words(data):
all_word = []
counts = dict()
for i in range (0,len(data)):
### Load input
input_str = data.comment_text[i]
### Clean input data
processed_text = clean(input_str)
### perform tokenization
tokened_text = word_tokenize(processed_text)
### remove stop words
comment_word = []
for word in tokened_text:
if word not in stopwords.words('english'):
comment_word.append(word)
#print(len(comment_word))
all_word.extend(comment_word)
for word in all_word:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return all_word, counts
## descend_order_dict funtion takes dataframe as an input and outputs sorted vocab dictionary
## with the values sorted in descending order (keys are words and values are word count)
def descend_order_dict(data):
all_words, word_count = frequent_words(data)
sorted_dict = dict( sorted(word_count.items(), key=operator.itemgetter(1),reverse=True))
return sorted_dict
label_sequence = df.columns.drop("id")
label_sequence = label_sequence.drop("comment_text").tolist()
label_sequence
```
#### <br>Getting the vocab used in each category in descending order its count
For **`toxic`** category
```
descend_order_toxic_dict = descend_order_dict(df_toxic)
```
These are the words most frequently used in toxic comments
<br>For **`severe_toxic`** category
```
descend_order_severe_toxic_dict =descend_order_dict(df_severe_toxic)
```
These are the words most frequently used in severe toxic comments
<br>For **`obscene`** category
```
descend_order_obscene_dict = descend_order_dict(df_obscene)
```
These are the words most frequently used in obscene comments
<br>For **`threat`** category
```
descend_order_threat_dict = descend_order_dict(df_threat)
```
These are the words most frequently used in severe threat comments
<br>For **`insult`** category
```
descend_order_insult_dict = descend_order_dict(df_insult)
```
These are the words most frequently used in comments labeled as an insult
<br>For **`identity_hate`** category
```
descend_order_id_hate_dict = descend_order_dict(df_identity_hate)
```
These are the most frequently used words in the comments labeled as identity_hate
<br> For comments when all categories are 1
```
descend_order_all_label_dict = descend_order_dict(df_6)
```
These are the most frequently used words in the comments labeled as identity_hate
#### <br> Picking up the top n words from the descend vocab dictionary
In this code, top 3 words are considered to make the prediction.
```
# list(descend_order_all_label_dict.keys())[3]
## combining descend vocab dictionaries of all the categories in one dictionary
## with categories as their keys
all_label_descend_vocab = {'toxic':descend_order_toxic_dict,
'severe_toxic':descend_order_severe_toxic_dict,
'obscene':descend_order_obscene_dict,
'threat':descend_order_threat_dict,
'insult':descend_order_insult_dict,
'id_hate':descend_order_id_hate_dict
}
## this function takes two arguments - all_label_freq_word and top n picks
## and outputs a dictionary with categories as keys and list of top 3 words as their values.
def dict_top_n_words(all_label_descend_vocab, n):
count = dict()
for label, words in all_label_descend_vocab.items():
word_list = []
for i in range (0,n):
word_list.append(list(words.keys())[i])
count[label] = word_list
return count
### top 3 words from all the vocabs
dict_top_n_words(all_label_descend_vocab,3)
```
### <br>4. Performance check of baseline Model
```
## Check if the any word from the top 3 words from the six categories exist in the comments
def word_intersection(input_str, n, all_words =all_label_descend_vocab):
toxic_pred = []
severe_toxic_pred = []
obscene_pred = []
threat_pred = []
insult_pred = []
id_hate_pred = []
rule_based_pred = [toxic_pred, severe_toxic_pred, obscene_pred, threat_pred,
insult_pred,id_hate_pred ]
# top_n_words = dict_top_n_words[all_label_freq_word,n]
for count,ele in enumerate(list(dict_top_n_words(all_label_descend_vocab,3).values())):
for word in ele:
if (word in input_str):
rule_based_pred[count].append(word)
#print(rule_based_pred)
for i in range (0,len(rule_based_pred)):
if len(rule_based_pred[i])== 0:
rule_based_pred[i]= 0
else:
rule_based_pred[i]= 1
return rule_based_pred
### Test
word_intersection(df['comment_text'][55], 3)
```
<br>Uncomment the below cell to get the prediction on the dataset but it is already saved in `rule_base_pred.pkl` in list form to save time
```
## store the values of predictions by running the word_intersection function on
## all the comments
# rule_base_pred = df['comment_text'].apply(lambda x: word_intersection(x,3))
```
After running above cell, we get the predictions on the entire dataset for each category in `rule_base_pred`, the orginal type of `rule_base_pred` is pandas.core.series.Series. This pandas series is converted into list and saved for future use. This `.pkl` fine can be loaded by running below cell.
```
### save rule_base_pred
# file_name = "rule_base_pred.pkl"
# open_file = open(file_name, "wb")
# pickle.dump(rule_base_pred, open_file)
# # open_file.close()
# open_file = open("rule_base_pred.pkl", "rb")
# pred_rule = pickle.load(open_file)
# open_file.close()
### Open the saved rule_base_pred.pkl
pkl_file = os.path.join(dir_path, 'model', 'rule_base_pred.pkl')
open_file = open(pkl_file, "rb")
pred_rule = pickle.load(open_file)
open_file.close()
## true prediction
y_true = df.drop(['id', 'comment_text'], axis=1)
## check the type
type(y_true), type(pred_rule)
```
<br>Uncomment pred_rule line in below cell to convert the type of predictions from panda series to list,if not using saved `rule_base_pred.pkl`
```
### Change the type to list
pred_true = y_true.values.tolist()
# pred_rule = rule_base_pred.values.tolist()
```
#### Compute accuracy of Baseline Model
```
## Accuracy check for decent and not-decent comments classification
count = 0
for i in range(0, len(df)):
if pred_true[i] == pred_rule[i]:
count = count+1
print("Overall accuracy of rule based classifier : {}".format((count/len(df))*100))
```
Based on the rule implimented here, baseline classifier is classifying decent and not-decent comments with the **accuracy of 76.6%**.Now we have to see if AI based models giver better performance than this.
```
## Category wise accuracy check
mean = []
for j in range(0, len(pred_true[0])):
count = 0
for i in range(0, len(df)):
if pred_true[i][j] == pred_rule[i][j]:
count = count+1
mean.append(count/len(df)*100)
print("Accuracy of rule based classifier in predicting {} comments : {}".format(label_sequence[j],(count/len(df))*100))
print("Mean accuracy : {}".format(np.array(mean).mean()))
```
Mean accuracy of our *rule-based-model* is 92.7%<br>
Minimum accuracy for predicting `toxic `, `severe_toxic `, `obscene `, `threat `, `insult `, or `identity_hate ` class of the Baseline model is more that 88%.
<br>Accuracies for:
<ol>
<li>`toxic `: 89.4%</li>
<li>`severe_toxic `: 88.2%</li>
<li>`obscene `: 96.3%</li>
<li>`threat `: 87.8%</li>
<li>`insult `: 95.8%</li>
<li>`identity_hate `: 98.3%</li>
</ol>
<br>In my opinion this model is doing quite good. As we know the dataset have more samples for toxic comments as compared to rest of the categories but this model still managed to predict with 89.4% of accuracy by just considering the top 3 words from its very large vocabulary. It may perform better if we consider more than 3 words from its vocab, because top 3 words not necessarily a true representaion of this category.
<br>On the other hand, `obscene `, `insult `, and `identity_hate ` have very good accuracy rates, seems like human labellers looked for these top 3 words to label comments under these categories.
<br>For `threat ` category, the model should perform well as the number of sample for this category is just 478, that means it has smaller vocab comparative to other classes. but seems like human labellers looked at more than these top 3 words of its vocab. It could be checked by tweaking the number of top n words.
```
yp=np.array([np.array(xi) for xi in pred_rule])
type(yp)
# type(y[0])
yp.shape
yt=np.array([np.array(xi) for xi in pred_true])
type(yt)
yt.shape
from sklearn.metrics import jaccard_score
print("Jaccard score is : {}".format(jaccard_score(yt,yp, average= 'weighted')))
```
Our `rule based model` is really bad seeing jaccard similarity
|
github_jupyter
|
<a href="https://colab.research.google.com/github/hansong0219/Advanced-DeepLearning-Study/blob/master/UNET/UNET_Build.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import os
import sys
from tensorflow.keras.layers import Input, Dropout, Concatenate
from tensorflow.keras.layers import Conv2DTranspose, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import LeakyReLU, Activation
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
from tensorflow.keras.losses import BinaryCrossentropy
import matplotlib.pyplot as plt
import tensorflow as tf
def down_sample(layer_inputs,filters, size, apply_batchnorm=True):
initializer = tf.random_normal_initializer(0.,0.02)
d = Conv2D(filters, size, strides=2,padding='same', kernel_initializer=initializer, use_bias=False)(layer_inputs)
if apply_batchnorm:
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
return d
def up_sample(layer_inputs, skip_input,filters, size, dropout_rate=0):
initializer = tf.random_normal_initializer(0.,0.02)
u = Conv2DTranspose(filters, size, strides=2,padding='same', kernel_initializer=initializer,use_bias=False)(layer_inputs)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = tf.keras.layers.ReLU()(u)
u = Concatenate()([u, skip_input])
return u
def Build_UNET():
input_shape = (256,256,3)
output_channel = 3
inputs = Input(shape=input_shape,name="inputs")
d1 = down_sample(inputs, 64, 4, apply_batchnorm=False) #(128,128,3)
d2 = down_sample(d1, 128, 4) #(64,64,128)
d3 = down_sample(d2, 256, 4)
d4 = down_sample(d3, 512, 4)
d5 = down_sample(d4, 512, 4)
d6 = down_sample(d5, 512, 4)
d7 = down_sample(d6, 512, 4)
d8 = down_sample(d7, 512, 4)
u7 = up_sample(d8, d7, 512, 4, dropout_rate = 0.5)
u6 = up_sample(u7, d6, 512, 4, dropout_rate = 0.5)
u5 = up_sample(u6, d5, 512, 4, dropout_rate = 0.5)
u4 = up_sample(u5, d4, 512, 4)
u3 = up_sample(u4, d3, 256, 4)
u2 = up_sample(u3, d2, 128, 4)
u1 = up_sample(u2, d1, 64, 4)
initializer = tf.random_normal_initializer(0.,0.02)
outputs = Conv2DTranspose(output_channel,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=initializer,
activation='tanh')(u1)
return Model(inputs, outputs)
unet = Build_UNET()
optimizer = Adam(1e-4, beta_1=0.5)
unet.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
unet.summary()
plot_model(unet, show_shapes=True, dpi=64)
loss=BinaryCrossentropy(from_logits=True)
optimizer = Adam(1e-4, beta_1=0.5)
unet.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])
```
|
github_jupyter
|
# Bayesian GAN
Bayesian GAN (Saatchi and Wilson, 2017) is a Bayesian formulation of Generative Adversarial Networks (Goodfellow, 2014) where we learn the **distributions** of the generator parameters $\theta_g$ and the discriminator parameters $\theta_d$ instead of optimizing for point estimates. The benefits of the Bayesian approach include the flexibility to model **multimodality** in the parameter space, as well as the ability to **prevent mode collapse** in the maximum likelihood (non-Bayesian) case.
We learn Bayesian GAN via an approximate inference algorithm called **Stochastic Gradient Hamiltonian Monte Carlo (SGHMC)** which is a gradient-based MCMC methods whose samples approximate the true posterior distributions of $\theta_g$ and $\theta_d$.
The Bayesian GAN training process starts from sampling noise $z$ from a fixed distribution (typically standard d-dim normal). The noise is fed to the generator where the parameters $\theta_g$ are sampled from the posterior distribution $p(\theta_g | D)$. The generated image given the parameters $\theta_g$ ($G(z|\theta_g)$) as well as the real data are presented to the discriminator, whose parameters are sample from its posterior distribution $p(\theta_d|D)$. We update the posteriors using the gradients $\frac{\partial \log p(\theta_g|D) }{\partial \theta_g }$ and $\frac{\partial \log p(\theta_d|D) }{\partial \theta_d }$ with Stochastic Gradient Hamiltonian Monte Carlo (SGHMC). Next section explains the intuition behind SGHMC.

<img src="figs/graphics_bayesgan.pdf">
# Learning Posterior Distributions
There are many approaches to estimate the posterior distribution of model parameters, namely, Markov Chain Monte Carlo (MCMC), Variational Inference (VI), Approximate Bayesian Computation (ABC), etc. Bayesian GAN uses SGHMC (Chen, 2014), a stochastic version of HMC (Neal, 2012), which is an MCMC method that (1) uses gradient to perform sampling efficiently (2) stochastic gradient from minibatch to handle large amount of data.
Below we show the visualization of samples generated from HMC. Once the algorithm runs for a while, we can see that the high density region has higher concentration of points. HMC can also handle multimodality (the second visualization).
```
from IPython.display import HTML
HTML('<iframe width="1000" height="400" src="https://chi-feng.github.io/mcmc-demo/app.html#HamiltonianMC,banana" frameborder="0" allowfullscreen></iframe>')
```
Hamiltonian Monte Carlo allows us to learn arbitrary distributions, including multimodal distributions where other Bayesian approach such as variational inference cannot model.
```
HTML('<iframe width="1000" height="400" src="https://chi-feng.github.io/mcmc-demo/app.html#HamiltonianMC,multimodal" frameborder="0" allowfullscreen></iframe>')
```
# Training
We show that Bayesian GAN can capture the data distribution by measuring its performance in the semi-supervised setting. We will perform the posterior update as outline in Algorithm 1 in Saatchi (2017). This algorithm can be implemented quite simply by adding noise to standard optimizers such as SGD with momentum and keep track of the parameters we sample from the posterior.

### SGHMC by Optimizing a Noisy Loss
First, observe that the update rules are similar to momentum SGD except for the noise $\boldsymbol{n}$. In fact, without $\boldsymbol{n}$, this is equivalent to performing momentum SGD with the loss is $- \sum_{i=1}{J_g} \sum_{k=1}^{J_d} \log \text{posterior} $. We will describe the case where $J_g = J_d=1$ for simplicity.
We use the main loss $\mathcal{L} = - \log p(\theta | ..)$ and add a noise loss $\mathcal{L}_\text{noise} = \frac{1}{\eta} \theta \cdot \boldsymbol{n}$ where $\boldsymbol{n} \sim \mathcal{N}(0, 2 \alpha \eta I)$ so that optimizing the loss function $\mathcal{L} + \mathcal{L}_\text{noise}$ with momentum SGD is equivalent to performing the SGHMC update step.
Below (Equation 3 and 4) are the posterior probabilities where each error term corresponds its negative log probability.

```
!pip install tensorboard_logger
from __future__ import print_function
import os, pickle
import numpy as np
import random, math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from statsutil import AverageMeter, accuracy
from tensorboard_logger import configure, log_value
# Default Parameters
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cifar10')
parser.add_argument('--imageSize', type=int, default=32)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--niter', type=int, default=2, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--cuda', type=int, default=1, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--outf', default='modelfiles/pytorch_demo3', help='folder to output images and model checkpoints')
parser.add_argument('--numz', type=int, default=1, help='The number of set of z to marginalize over.')
parser.add_argument('--num_mcmc', type=int, default=10, help='The number of MCMC chains to run in parallel')
parser.add_argument('--num_semi', type=int, default=4000, help='The number of semi-supervised samples')
parser.add_argument('--gnoise_alpha', type=float, default=0.0001, help='')
parser.add_argument('--dnoise_alpha', type=float, default=0.0001, help='')
parser.add_argument('--d_optim', type=str, default='adam', choices=['adam', 'sgd'], help='')
parser.add_argument('--g_optim', type=str, default='adam', choices=['adam', 'sgd'], help='')
parser.add_argument('--stats_interval', type=int, default=10, help='Calculate test accuracy every interval')
parser.add_argument('--tensorboard', type=int, default=1, help='')
parser.add_argument('--bayes', type=int, default=1, help='Do Bayesian GAN or normal GAN')
import sys; sys.argv=['']; del sys
opt = parser.parse_args()
try:
os.makedirs(opt.outf)
except OSError:
print("Error Making Directory", opt.outf)
pass
if opt.tensorboard: configure(opt.outf)
# First, we construct the data loader for full training set
# as well as the data loader of a partial training set for semi-supervised learning
# transformation operator
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
transform_opt = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# get training set and test set
dataset = dset.CIFAR10(root=os.environ['CIFAR10_PATH'], download=True,
transform=transform_opt)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=1)
from partial_dataset import PartialDataset
# partial dataset for semi-supervised training
dataset_partial = PartialDataset(dataset, opt.num_semi)
# test set for evaluation
dataset_test = dset.CIFAR10(root=os.environ['CIFAR10_PATH'],
train=False,
transform=transform_opt)
dataloader_test = torch.utils.data.DataLoader(dataset_test,
batch_size=opt.batchSize, shuffle=False, pin_memory=True, num_workers=1)
dataloader_semi = torch.utils.data.DataLoader(dataset_partial, batch_size=opt.batchSize,
shuffle=True, num_workers=1)
# Now we initialize the distributions of G and D
##### Generator ######
# opt.num_mcmc is the number of MCMC chains that we run in parallel
# opt.numz is the number of noise batches that we use. We also use different parameter samples for different batches
# we construct opt.numz * opt.num_mcmc initial generator parameters
# We will keep sampling parameters from the posterior starting from this set
# Keeping track of many MCMC chains can be done quite elegantly in Pytorch
from models.discriminators import _netD
from models.generators import _netG
from statsutil import weights_init
netGs = []
for _idxz in range(opt.numz):
for _idxm in range(opt.num_mcmc):
netG = _netG(opt.ngpu, nz=opt.nz)
netG.apply(weights_init)
netGs.append(netG)
##### Discriminator ######
# We will use 1 chain of MCMCs for the discriminator
# The number of classes for semi-supervised case is 11; that is,
# index 0 for fake data and 1-10 for the 10 classes of CIFAR.
num_classes = 11
netD = _netD(opt.ngpu, num_classes=num_classes)
# In order to calculate errG or errD_real, we need to sum the probabilities over all the classes (1 to K)
# ComplementCrossEntropyLoss is a loss function that performs this task
# We can specify a default except_index that corresponds to a fake label. In this case, we use index=0
from ComplementCrossEntropyLoss import ComplementCrossEntropyLoss
criterion = nn.CrossEntropyLoss()
# use the default index = 0 - equivalent to summing all other probabilities
criterion_comp = ComplementCrossEntropyLoss(except_index=0)
from models.distributions import Normal
from models.bayes import NoiseLoss, PriorLoss
# Finally, initialize the ``optimizers''
# Since we keep track of a set of parameters, we also need a set of
# ``optimizers''
if opt.d_optim == 'adam':
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.999))
elif opt.d_optim == 'sgd':
optimizerD = torch.optim.SGD(netD.parameters(), lr=opt.lr,
momentum=0.9,
nesterov=True,
weight_decay=1e-4)
optimizerGs = []
for netG in netGs:
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.999))
optimizerGs.append(optimizerG)
# since the log posterior is the average per sample, we also scale down the prior and the noise
gprior_criterion = PriorLoss(prior_std=1., observed=1000.)
gnoise_criterion = NoiseLoss(params=netGs[0].parameters(), scale=math.sqrt(2*opt.gnoise_alpha/opt.lr), observed=1000.)
dprior_criterion = PriorLoss(prior_std=1., observed=50000.)
dnoise_criterion = NoiseLoss(params=netD.parameters(), scale=math.sqrt(2*opt.dnoise_alpha*opt.lr), observed=50000.)
# Fixed noise for data generation
fixed_noise = torch.FloatTensor(opt.batchSize, opt.nz, 1, 1).normal_(0, 1).cuda()
fixed_noise = Variable(fixed_noise)
# initialize input variables and use CUDA (optional)
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, opt.nz, 1, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
for netG in netGs:
netG.cuda()
criterion.cuda()
criterion_comp.cuda()
input, label = input.cuda(), label.cuda()
noise = noise.cuda()
# fully supervised
netD_fullsup = _netD(opt.ngpu, num_classes=num_classes)
netD_fullsup.apply(weights_init)
criterion_fullsup = nn.CrossEntropyLoss()
if opt.d_optim == 'adam':
optimizerD_fullsup = optim.Adam(netD_fullsup.parameters(), lr=opt.lr, betas=(0.5, 0.999))
else:
optimizerD_fullsup = optim.SGD(netD_fullsup.parameters(), lr=opt.lr,
momentum=0.9,
nesterov=True,
weight_decay=1e-4)
if opt.cuda:
netD_fullsup.cuda()
criterion_fullsup.cuda()
# We define a class to calculate the accuracy on test set
# to test the performance of semi-supervised training
def get_test_accuracy(model_d, iteration, label='semi'):
# don't forget to do model_d.eval() before doing evaluation
top1 = AverageMeter()
for i, (input, target) in enumerate(dataloader_test):
target = target.cuda()
input = input.cuda()
input_var = torch.autograd.Variable(input.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
output = model_d(input_var)
probs = output.data[:, 1:] # discard the zeroth index
prec1 = accuracy(probs, target, topk=(1,))[0]
top1.update(prec1[0], input.size(0))
if i % 50 == 0:
print("{} Test: [{}/{}]\t Prec@1 {top1.val:.3f} ({top1.avg:.3f})"\
.format(label, i, len(dataloader_test), top1=top1))
print('{label} Test Prec@1 {top1.avg:.2f}'.format(label=label, top1=top1))
log_value('test_acc_{}'.format(label), top1.avg, iteration)
iteration = 0
for epoch in range(opt.niter):
top1 = AverageMeter()
top1_weakD = AverageMeter()
for i, data in enumerate(dataloader):
iteration += 1
#######
# 1. real input
netD.zero_grad()
_input, _ = data
batch_size = _input.size(0)
if opt.cuda:
_input = _input.cuda()
input.resize_as_(_input).copy_(_input)
label.resize_(batch_size).fill_(real_label)
inputv = Variable(input)
labelv = Variable(label)
output = netD(inputv)
errD_real = criterion_comp(output)
errD_real.backward()
# calculate D_x, the probability that real data are classified
D_x = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()
#######
# 2. Generated input
fakes = []
for _idxz in range(opt.numz):
noise.resize_(batch_size, opt.nz, 1, 1).normal_(0, 1)
noisev = Variable(noise)
for _idxm in range(opt.num_mcmc):
idx = _idxz*opt.num_mcmc + _idxm
netG = netGs[idx]
_fake = netG(noisev)
fakes.append(_fake)
fake = torch.cat(fakes)
output = netD(fake.detach())
labelv = Variable(torch.LongTensor(fake.data.shape[0]).cuda().fill_(fake_label))
errD_fake = criterion(output, labelv)
errD_fake.backward()
D_G_z1 = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()
#######
# 3. Labeled Data Part (for semi-supervised learning)
for ii, (input_sup, target_sup) in enumerate(dataloader_semi):
input_sup, target_sup = input_sup.cuda(), target_sup.cuda()
break
input_sup_v = Variable(input_sup.cuda())
# convert target indicies from 0 to 9 to 1 to 10
target_sup_v = Variable( (target_sup + 1).cuda())
output_sup = netD(input_sup_v)
err_sup = criterion(output_sup, target_sup_v)
err_sup.backward()
prec1 = accuracy(output_sup.data, target_sup + 1, topk=(1,))[0]
top1.update(prec1[0], input_sup.size(0))
if opt.bayes:
errD_prior = dprior_criterion(netD.parameters())
errD_prior.backward()
errD_noise = dnoise_criterion(netD.parameters())
errD_noise.backward()
errD = errD_real + errD_fake + err_sup + errD_prior + errD_noise
else:
errD = errD_real + errD_fake + err_sup
optimizerD.step()
# 4. Generator
for netG in netGs:
netG.zero_grad()
labelv = Variable(torch.FloatTensor(fake.data.shape[0]).cuda().fill_(real_label))
output = netD(fake)
errG = criterion_comp(output)
if opt.bayes:
for netG in netGs:
errG += gprior_criterion(netG.parameters())
errG += gnoise_criterion(netG.parameters())
errG.backward()
D_G_z2 = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()
for optimizerG in optimizerGs:
optimizerG.step()
# 5. Fully supervised training (running in parallel for comparison)
netD_fullsup.zero_grad()
input_fullsup = Variable(input_sup)
target_fullsup = Variable((target_sup + 1))
output_fullsup = netD_fullsup(input_fullsup)
err_fullsup = criterion_fullsup(output_fullsup, target_fullsup)
optimizerD_fullsup.zero_grad()
err_fullsup.backward()
optimizerD_fullsup.step()
# 6. get test accuracy after every interval
if iteration % opt.stats_interval == 0:
# get test accuracy on train and test
netD.eval()
get_test_accuracy(netD, iteration, label='semi')
get_test_accuracy(netD_fullsup, iteration, label='sup')
netD.train()
# 7. Report for this iteration
cur_val, ave_val = top1.val, top1.avg
log_value('train_acc', top1.avg, iteration)
print('[%d/%d][%d/%d] Loss_D: %.2f Loss_G: %.2f D(x): %.2f D(G(z)): %.2f / %.2f | Acc %.1f / %.1f'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2, cur_val, ave_val))
# after each epoch, save images
vutils.save_image(_input,
'%s/real_samples.png' % opt.outf,
normalize=True)
for _zid in range(opt.numz):
for _mid in range(opt.num_mcmc):
idx = _zid*opt.num_mcmc + _mid
netG = netGs[idx]
fake = netG(fixed_noise)
vutils.save_image(fake.data,
'%s/fake_samples_epoch_%03d_G_z%02d_m%02d.png' % (opt.outf, epoch, _zid, _mid),
normalize=True)
for ii, netG in enumerate(netGs):
torch.save(netG.state_dict(), '%s/netG%d_epoch_%d.pth' % (opt.outf, ii, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD_fullsup.state_dict(), '%s/netD_fullsup_epoch_%d.pth' % (opt.outf, epoch))
from tensorflow.python.summary import event_accumulator
import pandas as pd
from plotnine import *
ea = event_accumulator.EventAccumulator(opt.outf)
ea.Reload()
_df1 = pd.DataFrame(ea.Scalars('test_acc_semi'))
_df2 = pd.DataFrame(ea.Scalars('test_acc_sup'))
df = pd.DataFrame()
df['Iteration'] = pd.concat([_df1['step'], _df2['step']])
df['Accuracy'] = pd.concat([_df1['value'], _df2['value']])
df['Classification'] = ['BayesGAN']*len(_df1['step']) + ['Baseline']*len(_df2['step'])
```
The results show that the Bayesian discriminator trained with the Bayesian generator outperforms the discriminator trained on partial data.
```
%matplotlib inline
p = ggplot(df, aes(x='Iteration', y='Accuracy', color='Classification', label='Classification')) + geom_point(size=0.5)
print(p)
```
After training for 50 epochs, below are the samples generator by four different parameters $\theta_g$'s. Note that different parameters tend to have different artistic styles.




Note: This code is adapted from the implementation by Saatchai and Wilson in Tensorflow (https://github.com/andrewgordonwilson/bayesgan) and the DCGAN code from Pytorch examples (https://github.com/pytorch/examples/tree/master/dcgan).
|
github_jupyter
|
```
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
from fasterai.visualize import *
plt.style.use('dark_background')
#Adjust render_factor (int) if image doesn't look quite right (max 64 on 11GB GPU). The default here works for most photos.
#It literally just is a number multiplied by 16 to get the square render resolution.
#Note that this doesn't affect the resolution of the final output- the output is the same resolution as the input.
#Example: render_factor=21 => color is rendered at 16x21 = 336x336 px.
render_factor=35
vis = get_image_colorizer(render_factor=render_factor, artistic=False)
#vis = get_video_colorizer(render_factor=render_factor).vis
vis.plot_transformed_image("test_images/poolparty.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1852GatekeepersWindsor.jpg", render_factor=44, compare=True)
vis.plot_transformed_image("test_images/Chief.jpg", render_factor=10, compare=True)
vis.plot_transformed_image("test_images/1850SchoolForGirls.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/AtlanticCityBeach1905.jpg", render_factor=32, compare=True)
vis.plot_transformed_image("test_images/CottonMillWorkers1913.jpg", compare=True)
vis.plot_transformed_image("test_images/BrooklynNavyYardHospital.jpg", compare=True)
vis.plot_transformed_image("test_images/FinnishPeasant1867.jpg", compare=True)
vis.plot_transformed_image("test_images/AtlanticCity1905.png", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/PushingCart.jpg", render_factor=24, compare=True)
vis.plot_transformed_image("test_images/Drive1905.jpg", compare=True)
vis.plot_transformed_image("test_images/IronLung.png", render_factor=26, compare=True)
vis.plot_transformed_image("test_images/FamilyWithDog.jpg", compare=True)
vis.plot_transformed_image("test_images/DayAtSeaBelgium.jpg", compare=True)
vis.plot_transformed_image("test_images/marilyn_woods.jpg", render_factor=16, compare=True)
vis.plot_transformed_image("test_images/OldWomanSweden1904.jpg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/WomenTapingPlanes.jpg", compare=True)
vis.plot_transformed_image("test_images/overmiller.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/BritishDispatchRider.jpg", render_factor=16, compare=True)
vis.plot_transformed_image("test_images/MuseauNacionalDosCoches.jpg", render_factor=19, compare=True)
vis.plot_transformed_image("test_images/abe.jpg", render_factor=13, compare=True)
vis.plot_transformed_image("test_images/RossCorbettHouseCork.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/HPLabelleOfficeMontreal.jpg", render_factor=44, compare=True)
vis.plot_transformed_image("test_images/einstein_beach.jpg", render_factor=32, compare=True)
vis.plot_transformed_image("test_images/airmen1943.jpg", compare=True)
vis.plot_transformed_image("test_images/20sWoman.jpg", render_factor=24, compare=True)
vis.plot_transformed_image("test_images/egypt-1.jpg", render_factor=18, compare=True)
vis.plot_transformed_image("test_images/Rutherford_Hayes.jpg", compare=True)
vis.plot_transformed_image("test_images/einstein_portrait.jpg", render_factor=15, compare=True)
vis.plot_transformed_image("test_images/pinkerton.jpg", render_factor=7, compare=True)
vis.plot_transformed_image("test_images/WaltWhitman.jpg", render_factor=9, compare=True)
vis.plot_transformed_image("test_images/dorothea-lange.jpg", render_factor=18, compare=True)
vis.plot_transformed_image("test_images/Hemmingway2.jpg", render_factor=22, compare=True)
vis.plot_transformed_image("test_images/hemmingway.jpg", render_factor=14, compare=True)
vis.plot_transformed_image("test_images/smoking_kid.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/teddy_rubble.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/dustbowl_2.jpg", render_factor=16, compare=True)
vis.plot_transformed_image("test_images/camera_man.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/migrant_mother.jpg", render_factor=32, compare=True)
vis.plot_transformed_image("test_images/marktwain.jpg", render_factor=14, compare=True)
vis.plot_transformed_image("test_images/HelenKeller.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/Evelyn_Nesbit.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/Eddie-Adams.jpg", compare=True)
vis.plot_transformed_image("test_images/soldier_kids.jpg", compare=True)
vis.plot_transformed_image("test_images/AnselAdamsYosemite.jpg", compare=True)
vis.plot_transformed_image("test_images/unnamed.jpg", render_factor=28, compare=True)
vis.plot_transformed_image("test_images/workers_canyon.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/CottonMill.jpg", compare=True)
vis.plot_transformed_image("test_images/JudyGarland.jpeg", compare=True)
vis.plot_transformed_image("test_images/kids_pit.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/last_samurai.jpg", render_factor=22, compare=True)
vis.plot_transformed_image("test_images/AnselAdamsWhiteChurch.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/opium.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/dorothea_lange_2.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/rgs.jpg", compare=True)
vis.plot_transformed_image("test_images/wh-auden.jpg", compare=True)
vis.plot_transformed_image("test_images/w-b-yeats.jpg", compare=True)
vis.plot_transformed_image("test_images/marilyn_portrait.jpg", compare=True)
vis.plot_transformed_image("test_images/wilson-slaverevivalmeeting.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/ww1_trench.jpg", render_factor=18, compare=True)
vis.plot_transformed_image("test_images/women-bikers.png", render_factor=23, compare=True)
vis.plot_transformed_image("test_images/Unidentified1855.jpg", render_factor=19, compare=True)
vis.plot_transformed_image("test_images/skycrapper_lunch.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/sioux.jpg", render_factor=28, compare=True)
vis.plot_transformed_image("test_images/school_kids.jpg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/royal_family.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/redwood_lumberjacks.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/poverty.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/paperboy.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/NativeAmericans.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/helmut_newton-.jpg", compare=True)
vis.plot_transformed_image("test_images/Greece1911.jpg", render_factor=44, compare=True)
vis.plot_transformed_image("test_images/FatMenClub.jpg", render_factor=18, compare=True)
vis.plot_transformed_image("test_images/EgyptColosus.jpg", compare=True)
vis.plot_transformed_image("test_images/egypt-2.jpg", compare=True)
vis.plot_transformed_image("test_images/dustbowl_sd.jpg", compare=True)
vis.plot_transformed_image("test_images/dustbowl_people.jpg", render_factor=24, compare=True)
vis.plot_transformed_image("test_images/dustbowl_5.jpg", compare=True)
vis.plot_transformed_image("test_images/dustbowl_1.jpg", compare=True)
vis.plot_transformed_image("test_images/DriveThroughGiantTree.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/covered-wagons-traveling.jpg", compare=True)
vis.plot_transformed_image("test_images/civil-war_2.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/civil_war_4.jpg", compare=True)
vis.plot_transformed_image("test_images/civil_war_3.jpg", render_factor=28, compare=True)
vis.plot_transformed_image("test_images/civil_war.jpg", compare=True)
vis.plot_transformed_image("test_images/BritishSlum.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/bicycles.jpg", render_factor=27, compare=True)
vis.plot_transformed_image("test_images/brooklyn_girls_1940s.jpg", compare=True)
vis.plot_transformed_image("test_images/40sCouple.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/1946Wedding.jpg", compare=True)
vis.plot_transformed_image("test_images/Dolores1920s.jpg", render_factor=18, compare=True)
vis.plot_transformed_image("test_images/TitanicGym.jpg", render_factor=26, compare=True)
vis.plot_transformed_image("test_images/FrenchVillage1950s.jpg", render_factor=41, compare=True)
vis.plot_transformed_image("test_images/FrenchVillage1950s.jpg", render_factor=32, compare=True)
vis.plot_transformed_image("test_images/ClassDivide1930sBrittain.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1870sSphinx.jpg", compare=True)
vis.plot_transformed_image("test_images/1890Surfer.png", render_factor=37, compare=True)
vis.plot_transformed_image("test_images/TV1930s.jpg", render_factor=43, compare=True)
vis.plot_transformed_image("test_images/1864UnionSoldier.jpg", compare=True)
vis.plot_transformed_image("test_images/1890sMedStudents.jpg", render_factor=18, compare=True)
vis.plot_transformed_image("test_images/BellyLaughWWI.jpg", compare=True)
vis.plot_transformed_image("test_images/PiggyBackRide.jpg", compare=True)
vis.plot_transformed_image("test_images/HealingTree.jpg", compare=True)
vis.plot_transformed_image("test_images/ManPile.jpg", compare=True)
vis.plot_transformed_image("test_images/1910Bike.jpg", compare=True)
vis.plot_transformed_image("test_images/FreeportIL.jpg", compare=True)
vis.plot_transformed_image("test_images/DutchBabyCoupleEllis.jpg", compare=True)
vis.plot_transformed_image("test_images/InuitWoman1903.png", compare=True)
vis.plot_transformed_image("test_images/1920sDancing.jpg", compare=True)
vis.plot_transformed_image("test_images/AirmanDad.jpg", render_factor=13, compare=True)
vis.plot_transformed_image("test_images/1910Racket.png", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/1880Paris.jpg", render_factor=16, compare=True)
vis.plot_transformed_image("test_images/Deadwood1860s.jpg", render_factor=13, compare=True)
vis.plot_transformed_image("test_images/1860sSamauris.jpg", render_factor=43, compare=True)
vis.plot_transformed_image("test_images/LondonUnderground1860.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/Mid1800sSisters.jpg", compare=True)
vis.plot_transformed_image("test_images/1860Girls.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/SanFran1851.jpg", render_factor=44, compare=True)
vis.plot_transformed_image("test_images/Kabuki1870s.png", render_factor=8, compare=True)
vis.plot_transformed_image("test_images/Mormons1870s.jpg", render_factor=44, compare=True)
vis.plot_transformed_image("test_images/EgyptianWomenLate1800s.jpg", render_factor=44, compare=True)
vis.plot_transformed_image("test_images/PicadillyLate1800s.jpg", render_factor=26, compare=True)
vis.plot_transformed_image("test_images/SutroBaths1880s.jpg", compare=True)
vis.plot_transformed_image("test_images/1880sBrooklynBridge.jpg", compare=True)
vis.plot_transformed_image("test_images/ChinaOpiumc1880.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/Locomotive1880s.jpg", render_factor=9, compare=True)
vis.plot_transformed_image("test_images/ViennaBoys1880s.png", compare=True)
vis.plot_transformed_image("test_images/VictorianDragQueen1880s.png", compare=True)
vis.plot_transformed_image("test_images/Sami1880s.jpg", render_factor=44, compare=True)
vis.plot_transformed_image("test_images/ArkansasCowboys1880s.jpg", render_factor=22, compare=True)
vis.plot_transformed_image("test_images/Ballet1890Russia.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/Rottindean1890s.png", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/1890sPingPong.jpg", compare=True)
vis.plot_transformed_image("test_images/London1937.png", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/Harlem1932.jpg", render_factor=37, compare=True)
vis.plot_transformed_image("test_images/OregonTrail1870s.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/EasterNyc1911.jpg", render_factor=19, compare=True)
vis.plot_transformed_image("test_images/1899NycBlizzard.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/Edinburgh1920s.jpg", render_factor=17, compare=True)
vis.plot_transformed_image("test_images/1890sShoeShopOhio.jpg", render_factor=46, compare=True)
vis.plot_transformed_image("test_images/1890sTouristsEgypt.png", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/1938Reading.jpg", render_factor=19, compare=True)
vis.plot_transformed_image("test_images/1850Geography.jpg", compare=True)
vis.plot_transformed_image("test_images/1901Electrophone.jpg", render_factor=10, compare=True)
for i in range(8, 47):
vis.plot_transformed_image("test_images/1901Electrophone.jpg", render_factor=i, compare=True)
vis.plot_transformed_image("test_images/Texas1938Woman.png", render_factor=38, compare=True)
vis.plot_transformed_image("test_images/MaioreWoman1895NZ.jpg", compare=True)
vis.plot_transformed_image("test_images/WestVirginiaHouse.jpg", compare=True)
vis.plot_transformed_image("test_images/1920sGuadalope.jpg", compare=True)
vis.plot_transformed_image("test_images/1909Chicago.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1920sFarmKid.jpg", compare=True)
vis.plot_transformed_image("test_images/ParisLate1800s.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1900sDaytonaBeach.png", render_factor=23, compare=True)
vis.plot_transformed_image("test_images/1930sGeorgia.jpg", compare=True)
vis.plot_transformed_image("test_images/NorwegianBride1920s.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/Depression.jpg", compare=True)
vis.plot_transformed_image("test_images/1888Slum.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/LivingRoom1920Sweden.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1896NewsBoyGirl.jpg", compare=True)
vis.plot_transformed_image("test_images/PetDucks1927.jpg", compare=True)
vis.plot_transformed_image("test_images/1899SodaFountain.jpg", render_factor=46, compare=True)
vis.plot_transformed_image("test_images/TimesSquare1955.jpg", compare=True)
vis.plot_transformed_image("test_images/PuppyGify.jpg", compare=True)
vis.plot_transformed_image("test_images/1890CliffHouseSF.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/1908FamilyPhoto.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1900sSaloon.jpg", render_factor=43, compare=True)
vis.plot_transformed_image("test_images/1890BostonHospital.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/1870Girl.jpg", compare=True)
vis.plot_transformed_image("test_images/AustriaHungaryWomen1890s.jpg", compare=True)
vis.plot_transformed_image("test_images/Shack.jpg",render_factor=42, compare=True)
vis.plot_transformed_image("test_images/Apsaroke1908.png", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/1948CarsGrandma.jpg", compare=True)
vis.plot_transformed_image("test_images/PlanesManhattan1931.jpg", compare=True)
vis.plot_transformed_image("test_images/WorriedKid1940sNyc.jpg", compare=True)
vis.plot_transformed_image("test_images/1920sFamilyPhoto.jpg", compare=True)
vis.plot_transformed_image("test_images/CatWash1931.jpg", compare=True)
vis.plot_transformed_image("test_images/1940sBeerRiver.jpg", compare=True)
vis.plot_transformed_image("test_images/VictorianLivingRoom.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1897BlindmansBluff.jpg", compare=True)
vis.plot_transformed_image("test_images/1874Mexico.png", compare=True)
vis.plot_transformed_image("test_images/MadisonSquare1900.jpg", render_factor=46, compare=True)
vis.plot_transformed_image("test_images/1867MusicianConstantinople.jpg", compare=True)
vis.plot_transformed_image("test_images/1925Girl.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/1907Cowboys.jpg", render_factor=28, compare=True)
vis.plot_transformed_image("test_images/WWIIPeeps.jpg", render_factor=37, compare=True)
vis.plot_transformed_image("test_images/BabyBigBoots.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/1895BikeMaidens.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/IrishLate1800s.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/LibraryOfCongress1910.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/1875Olds.jpg", render_factor=16, compare=True)
vis.plot_transformed_image("test_images/SenecaNative1908.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/WWIHospital.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/1892WaterLillies.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/GreekImmigrants1905.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/FatMensShop.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/KidCage1930s.png", compare=True)
vis.plot_transformed_image("test_images/FarmWomen1895.jpg", compare=True)
vis.plot_transformed_image("test_images/NewZealand1860s.jpg", compare=True)
vis.plot_transformed_image("test_images/JerseyShore1905.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/LondonKidsEarly1900s.jpg", compare=True)
vis.plot_transformed_image("test_images/NYStreetClean1906.jpg", compare=True)
vis.plot_transformed_image("test_images/Boston1937.jpg", compare=True)
vis.plot_transformed_image("test_images/Cork1905.jpg", render_factor=28, compare=True)
vis.plot_transformed_image("test_images/BoxedBedEarly1900s.jpg", compare=True)
vis.plot_transformed_image("test_images/ZoologischerGarten1898.jpg", compare=True)
vis.plot_transformed_image("test_images/EmpireState1930.jpg", compare=True)
vis.plot_transformed_image("test_images/Agamemnon1919.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/AppalachianLoggers1901.jpg", compare=True)
vis.plot_transformed_image("test_images/WWISikhs.jpg", compare=True)
vis.plot_transformed_image("test_images/MementoMori1865.jpg", compare=True)
vis.plot_transformed_image("test_images/RepBrennanRadio1922.jpg", render_factor=43, compare=True)
vis.plot_transformed_image("test_images/Late1800sNative.jpg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/GasPrices1939.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/1933RockefellerCenter.jpg", compare=True)
vis.plot_transformed_image("test_images/Scotland1919.jpg", compare=True)
vis.plot_transformed_image("test_images/1920CobblersShopLondon.jpg", compare=True)
vis.plot_transformed_image("test_images/1909ParisFirstFemaleTaxisDriver.jpg", compare=True)
vis.plot_transformed_image("test_images/HoovervilleSeattle1932.jpg", compare=True)
vis.plot_transformed_image("test_images/ElephantLondon1934.png", compare=True)
vis.plot_transformed_image("test_images/Jane_Addams.jpg", compare=True)
vis.plot_transformed_image("test_images/AnselAdamsAdobe.jpg", compare=True)
vis.plot_transformed_image("test_images/CricketLondon1930.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/Donegal1907Yarn.jpg", render_factor=32, compare=True)
vis.plot_transformed_image("test_images/AnselAdamsChurch.jpg", compare=True)
vis.plot_transformed_image("test_images/BreadDelivery1920sIreland.jpg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/BritishTeaBombay1890s.png", compare=True)
vis.plot_transformed_image("test_images/CafeParis1928.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/BigManTavern1908NYC.jpg", compare=True)
vis.plot_transformed_image("test_images/Cars1890sIreland.jpg", compare=True)
vis.plot_transformed_image("test_images/GalwayIreland1902.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/HomeIreland1924.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/HydeParkLondon1920s.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/1929LondonOverFleetSt.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/AccordianKid1900Paris.jpg", compare=True)
vis.plot_transformed_image("test_images/AnselAdamsBuildings.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/AthleticClubParis1913.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/BombedLibraryLondon1940.jpg", compare=True)
vis.plot_transformed_image("test_images/Boston1937.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/BoulevardDuTemple1838.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/BumperCarsParis1930.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/CafeTerrace1925Paris.jpg", render_factor=24, compare=True)
vis.plot_transformed_image("test_images/CoalDeliveryParis1915.jpg", render_factor=37, compare=True)
vis.plot_transformed_image("test_images/CorkKids1910.jpg", render_factor=32, compare=True)
vis.plot_transformed_image("test_images/DeepSeaDiver1915.png", render_factor=16, compare=True)
vis.plot_transformed_image("test_images/EastEndLondonStreetKids1901.jpg", compare=True)
vis.plot_transformed_image("test_images/FreightTrainTeens1934.jpg", compare=True)
vis.plot_transformed_image("test_images/HarrodsLondon1920.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/HerbSeller1899Paris.jpg", render_factor=17, compare=True)
vis.plot_transformed_image("test_images/CalcuttaPoliceman1920.jpg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/ElectricScooter1915.jpeg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/GreatGrandparentsIrelandEarly1900s.jpg", compare=True)
vis.plot_transformed_image("test_images/HalloweenEarly1900s.jpg", render_factor=11, compare=True)
vis.plot_transformed_image("test_images/IceManLondon1919.jpg", compare=True)
vis.plot_transformed_image("test_images/LeBonMarcheParis1875.jpg", compare=True)
vis.plot_transformed_image("test_images/LittleAirplane1934.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/RoyalUniversityMedStudent1900Ireland.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/LewisTomalinLondon1895.png", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/SunHelmetsLondon1933.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/Killarney1910.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/LondonSheep1920s.png", compare=True)
vis.plot_transformed_image("test_images/PostOfficeVermont1914.png", compare=True)
vis.plot_transformed_image("test_images/ServantsBessboroughHouse1908Ireland.jpg", compare=True)
vis.plot_transformed_image("test_images/WaterfordIreland1909.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/Lisbon1919.jpg", compare=True)
vis.plot_transformed_image("test_images/London1918WartimeClothesManufacture.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/LondonHeatWave1935.png", compare=True)
vis.plot_transformed_image("test_images/LondonsSmallestShop1900.jpg", compare=True)
vis.plot_transformed_image("test_images/MetropolitanDistrictRailway1869London.jpg", compare=True)
vis.plot_transformed_image("test_images/NativeWoman1926.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/PaddysMarketCork1900s.jpg", compare=True)
vis.plot_transformed_image("test_images/Paris1920Cart.jpg", compare=True)
vis.plot_transformed_image("test_images/ParisLadies1910.jpg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/ParisLadies1930s.jpg", compare=True)
vis.plot_transformed_image("test_images/Sphinx.jpeg", compare=True)
vis.plot_transformed_image("test_images/TheatreGroupBombay1875.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/WorldsFair1900Paris.jpg", compare=True)
vis.plot_transformed_image("test_images/London1850Coach.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/London1900EastEndBlacksmith.jpg", compare=True)
vis.plot_transformed_image("test_images/London1930sCheetah.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/LondonFireBrigadeMember1926.jpg", compare=True)
vis.plot_transformed_image("test_images/LondonGarbageTruck1910.jpg", compare=True)
vis.plot_transformed_image("test_images/LondonRailwayWork1931.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/LondonStreets1900.jpg", compare=True)
vis.plot_transformed_image("test_images/MuffinManlLondon1910.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/NativeCouple1912.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/NewspaperCivilWar1863.jpg", compare=True)
vis.plot_transformed_image("test_images/PaddingtonStationLondon1907.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/Paris1899StreetDig.jpg", compare=True)
vis.plot_transformed_image("test_images/Paris1926.jpg", compare=True)
vis.plot_transformed_image("test_images/ParisWomenFurs1920s.jpg", render_factor=21, compare=True)
vis.plot_transformed_image("test_images/PeddlerParis1899.jpg", compare=True)
vis.plot_transformed_image("test_images/SchoolKidsConnemaraIreland1901.jpg", compare=True)
vis.plot_transformed_image("test_images/SecondHandClothesLondonLate1800s.jpg", render_factor=33, compare=True)
vis.plot_transformed_image("test_images/SoapBoxRacerParis1920s.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/SoccerMotorcycles1923London.jpg", compare=True)
vis.plot_transformed_image("test_images/WalkingLibraryLondon1930.jpg", compare=True)
vis.plot_transformed_image("test_images/LondonStreetDoctor1877.png", render_factor=38, compare=True)
vis.plot_transformed_image("test_images/jacksonville.jpg", compare=True)
vis.plot_transformed_image("test_images/ZebraCarriageLondon1900.jpg", compare=True)
vis.plot_transformed_image("test_images/StreetGramaphonePlayerLondon1920s.png", compare=True)
vis.plot_transformed_image("test_images/YaleBranchBarnardsExpress.jpg", compare=True)
vis.plot_transformed_image("test_images/SynagogueInterior.PNG", compare=True)
vis.plot_transformed_image("test_images/ArmisticeDay1918.jpg", compare=True)
vis.plot_transformed_image("test_images/FlyingMachinesParis1909.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/GreatAunt1920.jpg", compare=True)
vis.plot_transformed_image("test_images/NewBrunswick1915.jpg", compare=True)
vis.plot_transformed_image("test_images/ShoeMakerLate1800s.jpg", compare=True)
vis.plot_transformed_image("test_images/SpottedBull1908.jpg", compare=True)
vis.plot_transformed_image("test_images/TouristsGermany1904.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/TunisianStudents1914.jpg", compare=True)
vis.plot_transformed_image("test_images/Yorktown1862.jpg", compare=True)
vis.plot_transformed_image("test_images/LondonFashion1911.png", compare=True)
vis.plot_transformed_image("test_images/1939GypsyKids.jpg", render_factor=37, compare=True)
vis.plot_transformed_image("test_images/1936OpiumShanghai.jpg", compare=True)
vis.plot_transformed_image("test_images/1923HollandTunnel.jpg", compare=True)
vis.plot_transformed_image("test_images/1939YakimaWAGirl.jpg", compare=True)
vis.plot_transformed_image("test_images/GoldenGateConstruction.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/PostCivilWarAncestors.jpg", compare=True)
vis.plot_transformed_image("test_images/1939SewingBike.png", compare=True)
vis.plot_transformed_image("test_images/1930MaineSchoolBus.jpg", compare=True)
vis.plot_transformed_image("test_images/1913NewYorkConstruction.jpg", compare=True)
vis.plot_transformed_image("test_images/1945HiroshimaChild.jpg", compare=True)
vis.plot_transformed_image("test_images/1941GeorgiaFarmhouse.jpg", render_factor=43, compare=True)
vis.plot_transformed_image("test_images/1934UmbriaItaly.jpg", render_factor=21)
vis.plot_transformed_image("test_images/1900sLadiesTeaParty.jpg", compare=True)
vis.plot_transformed_image("test_images/1919WWIAviationOxygenMask.jpg", compare=True)
vis.plot_transformed_image("test_images/1900NJThanksgiving.jpg", compare=True)
vis.plot_transformed_image("test_images/1940Connecticut.jpg", render_factor=43, compare=True)
vis.plot_transformed_image("test_images/1940Connecticut.jpg", render_factor=i, compare=True)
vis.plot_transformed_image("test_images/1911ThanksgivingMaskers.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/1910ThanksgivingMaskersII.jpg", compare=True)
vis.plot_transformed_image("test_images/1936PetToad.jpg", compare=True)
vis.plot_transformed_image("test_images/1908RookeriesLondon.jpg", compare=True)
vis.plot_transformed_image("test_images/1890sChineseImmigrants.jpg", render_factor=25, compare=True)
vis.plot_transformed_image("test_images/1897VancouverAmberlamps.jpg", compare=True)
vis.plot_transformed_image("test_images/1929VictorianCosplayLondon.jpg", render_factor=35, compare=True)
vis.plot_transformed_image("test_images/1959ParisFriends.png", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/1925GypsyCampMaryland.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/1941PoolTableGeorgia.jpg", render_factor=45, compare=True)
vis.plot_transformed_image("test_images/1900ParkDog.jpg", compare=True)
vis.plot_transformed_image("test_images/1886Hoop.jpg", compare=True)
vis.plot_transformed_image("test_images/1950sLondonPoliceChild.jpg", compare=True)
vis.plot_transformed_image("test_images/1886ProspectPark.jpg", compare=True)
vis.plot_transformed_image("test_images/1930sRooftopPoland.jpg", compare=True)
vis.plot_transformed_image("test_images/1919RevereBeach.jpg", compare=True)
vis.plot_transformed_image("test_images/1936ParisCafe.jpg", render_factor=46, compare=True)
vis.plot_transformed_image("test_images/1902FrenchYellowBellies.jpg", compare=True)
vis.plot_transformed_image("test_images/1940PAFamily.jpg", render_factor=42, compare=True)
vis.plot_transformed_image("test_images/1910Finland.jpg", render_factor=40, compare=True)
vis.plot_transformed_image("test_images/ZebraCarriageLondon1900.jpg", compare=True)
vis.plot_transformed_image("test_images/1904ChineseMan.jpg", compare=True)
vis.plot_transformed_image("test_images/CrystalPalaceLondon1854.PNG", compare=True)
vis.plot_transformed_image("test_images/James1.jpg", render_factor=15, compare=True)
vis.plot_transformed_image("test_images/James2.jpg", render_factor=20, compare=True)
vis.plot_transformed_image("test_images/James3.jpg", render_factor=19, compare=True)
vis.plot_transformed_image("test_images/James4.jpg", render_factor=30, compare=True)
vis.plot_transformed_image("test_images/James5.jpg", render_factor=32, compare=True)
vis.plot_transformed_image("test_images/James6.jpg", render_factor=28, compare=True)
```
|
github_jupyter
|
# AutoRec: Rating Prediction with Autoencoders
Although the matrix factorization model achieves decent performance on the rating prediction task, it is essentially a linear model. Thus, such models are not capable of capturing complex nonlinear and intricate relationships that may be predictive of users' preferences. In this section, we introduce a nonlinear neural network collaborative filtering model, AutoRec :cite:`Sedhain.Menon.Sanner.ea.2015`. It identifies collaborative filtering (CF) with an autoencoder architecture and aims to integrate nonlinear transformations into CF on the basis of explicit feedback. Neural networks have been proven to be capable of approximating any continuous function, making it suitable to address the limitation of matrix factorization and enrich the expressiveness of matrix factorization.
On one hand, AutoRec has the same structure as an autoencoder which consists of an input layer, a hidden layer, and a reconstruction (output) layer. An autoencoder is a neural network that learns to copy its input to its output in order to code the inputs into the hidden (and usually low-dimensional) representations. In AutoRec, instead of explicitly embedding users/items into low-dimensional space, it uses the column/row of the interaction matrix as the input, then reconstructs the interaction matrix in the output layer.
On the other hand, AutoRec differs from a traditional autoencoder: rather than learning the hidden representations, AutoRec focuses on learning/reconstructing the output layer. It uses a partially observed interaction matrix as the input, aiming to reconstruct a completed rating matrix. In the meantime, the missing entries of the input are filled in the output layer via reconstruction for the purpose of recommendation.
There are two variants of AutoRec: user-based and item-based. For brevity, here we only introduce the item-based AutoRec. User-based AutoRec can be derived accordingly.
## Model
Let $\mathbf{R}_{*i}$ denote the $i^\mathrm{th}$ column of the rating matrix, where unknown ratings are set to zeros by default. The neural architecture is defined as:
$$
h(\mathbf{R}_{*i}) = f(\mathbf{W} \cdot g(\mathbf{V} \mathbf{R}_{*i} + \mu) + b)
$$
where $f(\cdot)$ and $g(\cdot)$ represent activation functions, $\mathbf{W}$ and $\mathbf{V}$ are weight matrices, $\mu$ and $b$ are biases. Let $h( \cdot )$ denote the whole network of AutoRec. The output $h(\mathbf{R}_{*i})$ is the reconstruction of the $i^\mathrm{th}$ column of the rating matrix.
The following objective function aims to minimize the reconstruction error:
$$
\underset{\mathbf{W},\mathbf{V},\mu, b}{\mathrm{argmin}} \sum_{i=1}^M{\parallel \mathbf{R}_{*i} - h(\mathbf{R}_{*i})\parallel_{\mathcal{O}}^2} +\lambda(\| \mathbf{W} \|_F^2 + \| \mathbf{V}\|_F^2)
$$
where $\| \cdot \|_{\mathcal{O}}$ means only the contribution of observed ratings are considered, that is, only weights that are associated with observed inputs are updated during back-propagation.
```
import mxnet as mx
from mxnet import autograd, gluon, np, npx
from mxnet.gluon import nn
from d2l import mxnet as d2l
npx.set_np()
```
## Implementing the Model
A typical autoencoder consists of an encoder and a decoder. The encoder projects the input to hidden representations and the decoder maps the hidden layer to the reconstruction layer. We follow this practice and create the encoder and decoder with dense layers. The activation of encoder is set to `sigmoid` by default and no activation is applied for decoder. Dropout is included after the encoding transformation to reduce over-fitting. The gradients of unobserved inputs are masked out to ensure that only observed ratings contribute to the model learning process.
```
class AutoRec(nn.Block):
def __init__(self, num_hidden, num_users, dropout=0.05):
super(AutoRec, self).__init__()
self.encoder = nn.Dense(num_hidden, activation='sigmoid',
use_bias=True)
self.decoder = nn.Dense(num_users, use_bias=True)
self.dropout = nn.Dropout(dropout)
def forward(self, input):
hidden = self.dropout(self.encoder(input))
pred = self.decoder(hidden)
if autograd.is_training(): # Mask the gradient during training
return pred * np.sign(input)
else:
return pred
```
## Reimplementing the Evaluator
Since the input and output have been changed, we need to reimplement the evaluation function, while we still use RMSE as the accuracy measure.
```
def evaluator(network, inter_matrix, test_data, devices):
scores = []
for values in inter_matrix:
feat = gluon.utils.split_and_load(values, devices, even_split=False)
scores.extend([network(i).asnumpy() for i in feat])
recons = np.array([item for sublist in scores for item in sublist])
# Calculate the test RMSE
rmse = np.sqrt(np.sum(np.square(test_data - np.sign(test_data) * recons))
/ np.sum(np.sign(test_data)))
return float(rmse)
```
## Training and Evaluating the Model
Now, let us train and evaluate AutoRec on the MovieLens dataset. We can clearly see that the test RMSE is lower than the matrix factorization model, confirming the effectiveness of neural networks in the rating prediction task.
```
devices = d2l.try_all_gpus()
# Load the MovieLens 100K dataset
df, num_users, num_items = d2l.read_data_ml100k()
train_data, test_data = d2l.split_data_ml100k(df, num_users, num_items)
_, _, _, train_inter_mat = d2l.load_data_ml100k(train_data, num_users,
num_items)
_, _, _, test_inter_mat = d2l.load_data_ml100k(test_data, num_users,
num_items)
train_iter = gluon.data.DataLoader(train_inter_mat, shuffle=True,
last_batch="rollover", batch_size=256,
num_workers=d2l.get_dataloader_workers())
test_iter = gluon.data.DataLoader(np.array(train_inter_mat), shuffle=False,
last_batch="keep", batch_size=1024,
num_workers=d2l.get_dataloader_workers())
# Model initialization, training, and evaluation
net = AutoRec(500, num_users)
net.initialize(ctx=devices, force_reinit=True, init=mx.init.Normal(0.01))
lr, num_epochs, wd, optimizer = 0.002, 25, 1e-5, 'adam'
loss = gluon.loss.L2Loss()
trainer = gluon.Trainer(net.collect_params(), optimizer,
{"learning_rate": lr, 'wd': wd})
d2l.train_recsys_rating(net, train_iter, test_iter, loss, trainer, num_epochs,
devices, evaluator, inter_mat=test_inter_mat)
```
## Summary
* We can frame the matrix factorization algorithm with autoencoders, while integrating non-linear layers and dropout regularization.
* Experiments on the MovieLens 100K dataset show that AutoRec achieves superior performance than matrix factorization.
## Exercises
* Vary the hidden dimension of AutoRec to see its impact on the model performance.
* Try to add more hidden layers. Is it helpful to improve the model performance?
* Can you find a better combination of decoder and encoder activation functions?
[Discussions](https://discuss.d2l.ai/t/401)
|
github_jupyter
|
# Automatic generation of Notebook using PyCropML
This notebook implements a crop model.
### Model Cumulttfrom
```
model_cumulttfrom <- function (calendarMoments_t1 = c('Sowing'),
calendarCumuls_t1 = c(0.0),
cumulTT = 8.0){
#'- Name: CumulTTFrom -Version: 1.0, -Time step: 1
#'- Description:
#' * Title: CumulTTFrom Model
#' * Author: Pierre Martre
#' * Reference: Modeling development phase in the
#' Wheat Simulation Model SiriusQuality.
#' See documentation at http://www1.clermont.inra.fr/siriusquality/?page_id=427
#' * Institution: INRA Montpellier
#' * Abstract: Calculate CumulTT
#'- inputs:
#' * name: calendarMoments_t1
#' ** description : List containing appearance of each stage at previous day
#' ** variablecategory : state
#' ** datatype : STRINGLIST
#' ** default : ['Sowing']
#' ** unit :
#' ** inputtype : variable
#' * name: calendarCumuls_t1
#' ** description : list containing for each stage occured its cumulated thermal times at previous day
#' ** variablecategory : state
#' ** datatype : DOUBLELIST
#' ** default : [0.0]
#' ** unit : °C d
#' ** inputtype : variable
#' * name: cumulTT
#' ** description : cumul TT at current date
#' ** datatype : DOUBLE
#' ** variablecategory : auxiliary
#' ** min : -200
#' ** max : 10000
#' ** default : 8.0
#' ** unit : °C d
#' ** inputtype : variable
#'- outputs:
#' * name: cumulTTFromZC_65
#' ** description : cumul TT from Anthesis to current date
#' ** variablecategory : auxiliary
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 5000
#' ** unit : °C d
#' * name: cumulTTFromZC_39
#' ** description : cumul TT from FlagLeafLiguleJustVisible to current date
#' ** variablecategory : auxiliary
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 5000
#' ** unit : °C d
#' * name: cumulTTFromZC_91
#' ** description : cumul TT from EndGrainFilling to current date
#' ** variablecategory : auxiliary
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 5000
#' ** unit : °C d
cumulTTFromZC_65 <- 0.0
cumulTTFromZC_39 <- 0.0
cumulTTFromZC_91 <- 0.0
if ('Anthesis' %in% calendarMoments_t1)
{
cumulTTFromZC_65 <- cumulTT - calendarCumuls_t1[which(calendarMoments_t1 %in% 'Anthesis')]
}
if ('FlagLeafLiguleJustVisible' %in% calendarMoments_t1)
{
cumulTTFromZC_39 <- cumulTT - calendarCumuls_t1[which(calendarMoments_t1 %in% 'FlagLeafLiguleJustVisible')]
}
if ('EndGrainFilling' %in% calendarMoments_t1)
{
cumulTTFromZC_91 <- cumulTT - calendarCumuls_t1[which(calendarMoments_t1 %in% 'EndGrainFilling')]
}
return (list ("cumulTTFromZC_65" = cumulTTFromZC_65,"cumulTTFromZC_39" = cumulTTFromZC_39,"cumulTTFromZC_91" = cumulTTFromZC_91))
}
library(assertthat)
test_test_wheat1<-function(){
params= model_cumulttfrom(
calendarMoments_t1 = c("Sowing","Emergence","FloralInitiation","FlagLeafLiguleJustVisible","Heading","Anthesis"),
calendarCumuls_t1 = c(0.0,112.330110409888,354.582294511779,741.510096671757,853.999637026622,954.59002776961),
cumulTT = 972.970888983105
)
cumulTTFromZC_65_estimated = params$cumulTTFromZC_65
cumulTTFromZC_65_computed = 18.38
assert_that(all.equal(cumulTTFromZC_65_estimated, cumulTTFromZC_65_computed, scale=1, tol=0.2)==TRUE)
cumulTTFromZC_39_estimated = params$cumulTTFromZC_39
cumulTTFromZC_39_computed = 231.46
assert_that(all.equal(cumulTTFromZC_39_estimated, cumulTTFromZC_39_computed, scale=1, tol=0.2)==TRUE)
cumulTTFromZC_91_estimated = params$cumulTTFromZC_91
cumulTTFromZC_91_computed = 0
assert_that(all.equal(cumulTTFromZC_91_estimated, cumulTTFromZC_91_computed, scale=1, tol=0.2)==TRUE)
}
test_test_wheat1()
```
|
github_jupyter
|
```
import requests
import requests_cache
requests_cache.install_cache('calrecycle')
import pandas as pd
import time
URL = 'https://www2.calrecycle.ca.gov/LGCentral/DisposalReporting/Destination/CountywideSummary'
params = {'CountyID': 58, 'ReportFormat': 'XLS'}
resp = requests.post(URL, data=params)
resp
import io
def set_columns(df, columns=None, row_idx=None):
df = df.copy()
if row_idx:
columns = df.iloc[row_idx, :].tolist()
df.columns = columns
return df
(pd.read_excel(io.BytesIO(resp.content))
# .iloc[4,:].tolist()
.pipe(set_columns, row_idx=4)
.iloc[5:, :]
.dropna(axis=1, how='all')
.assign(is_data_row=lambda d: d['Destination Facility'].notnull())
.fillna(method='ffill')
.query('is_data_row')
)
def make_throttle_hook(timeout=1):
"""
Returns a response hook function which sleeps for `timeout` seconds if
response is not coming from the cache.
From https://requests-cache.readthedocs.io/en/latest/user_guide.html#usage
"""
def hook(response, *args, **kwargs):
if not getattr(response, 'from_cache', False):
print(f'{response} not found in cache. Timeout for {timeout:.3f} s.')
time.sleep(timeout)
return response
return hook
def get_session(rate_max=.5, timeout=None):
timeout = 1 / rate_max
s = requests_cache.CachedSession()
s.hooks = {'response': make_throttle_hook(timeout)}
return s
def process(df):
return (df
.pipe(set_columns, row_idx=4)
.iloc[5:, :]
.dropna(axis=1, how='all')
.assign(is_data_row=lambda d: d['Destination Facility'].notnull())
.fillna(method='ffill')
.query('is_data_row')
.drop(columns=['is_data_row'])
)
def get_df(resp):
if resp.ok:
return pd.read_excel(io.BytesIO(resp.content))
return pd.DataFrame()
# so ducky...
def get_report(county_id, session=requests):
params = {'CountyID': int(county_id), 'ReportFormat': 'XLS'}
# if "no record found", the server should return 404 instead of a 200 response with an empty XLS
resp = session.post(URL, data=params)
try:
df = get_df(resp).pipe(process).assign(county_id=county_id)
except Exception as e:
print(e)
else:
return df
def get_reports():
dfs = []
# sesh = get_session(rate_max=2)
ids = range(1, 58)
for county_id in ids:
df = get_report(county_id)
if df is not None:
dfs.append(df)
else:
print(f'county_id {county_id} not processed')
# TODO else append to missed ids?
return pd.concat(dfs)
def process_whole(df):
# Destination Facility Diposal Ton Quarter Report Year Total ADC Transformation Ton county_id
names = {
'Destination Facility': 'destination_facility',
'Diposal Ton': 'disposal',
'Report Year': 'report_year',
'Quarter': 'report_quarter',
'Total ADC': 'total_adc',
'Transformation Ton': 'transformation',
}
return (df
.rename(columns=names)
.fillna(0)
.astype({'report_quarter': int})
)
REPORTS = get_reports()
REPORTS = REPORTS.pipe(process_whole)
REPORTS
REPORTS.to_csv('/data/datasets/catdd/clean/calrecycle-disposal-reporting.csv')
```
|
github_jupyter
|
# A tutorial for the whitebox Python package
This notebook demonstrates the usage of the **whitebox** Python package for geospatial analysis, which is built on a stand-alone executable command-line program called [WhiteboxTools](https://github.com/jblindsay/whitebox-tools).
* Authors: Dr. John Lindsay (https://jblindsay.github.io/ghrg/index.html)
* Contributors: Dr. Qiusheng Wu (https://wetlands.io)
* GitHub repo: https://github.com/giswqs/whitebox-python
* WhiteboxTools: https://github.com/jblindsay/whitebox-tools
* User Manual: https://jblindsay.github.io/wbt_book
* PyPI: https://pypi.org/project/whitebox/
* Documentation: https://whitebox.readthedocs.io
* Binder: https://gishub.org/whitebox-cloud
* Free software: [MIT license](https://opensource.org/licenses/MIT)
This tutorial can be accessed in three ways:
* HTML version: https://gishub.org/whitebox-html
* Viewable Notebook: https://gishub.org/whitebox-notebook
* Interactive Notebook: https://gishub.org/whitebox-cloud
**Launch this tutorial as an interactive Jupyter Notebook on the cloud - [MyBinder.org](https://gishub.org/whitebox-cloud).**

## Table of Content
* [Installation](#Installation)
* [About whitebox](#About-whitebox)
* [Getting data](#Getting-data)
* [Using whitebox](#Using-whitebox)
* [Displaying results](#Displaying-results)
* [whitebox GUI](#whitebox-GUI)
* [Citing whitebox](#Citing-whitebox)
* [Credits](#Credits)
* [Contact](#Contact)
## Installation
**whitebox** supports a variety of platforms, including Microsoft Windows, macOS, and Linux operating systems. Note that you will need to have **Python 3.x** installed. Python 2.x is not supported. The **whitebox** Python package can be installed using the following command:
`pip install whitebox`
If you have installed **whitebox** Python package before and want to upgrade to the latest version, you can use the following command:
`pip install whitebox -U`
If you encounter any installation issues, please check [Troubleshooting](https://github.com/giswqs/whitebox#troubleshooting) on the **whitebox** GitHub page and [Report Bugs](https://github.com/giswqs/whitebox#reporting-bugs).
## About whitebox
**import whitebox and call WhiteboxTools()**
```
import whitebox
wbt = whitebox.WhiteboxTools()
```
**Prints the whitebox-tools help...a listing of available commands**
```
print(wbt.help())
```
**Prints the whitebox-tools license**
```
print(wbt.license())
```
**Prints the whitebox-tools version**
```
print("Version information: {}".format(wbt.version()))
```
**Print the help for a specific tool.**
```
print(wbt.tool_help("ElevPercentile"))
```
**Tool names in the whitebox Python package can be called either using the snake_case or CamelCase convention (e.g. lidar_info or LidarInfo). The example below uses snake_case.**
```
import os, pkg_resources
# identify the sample data directory of the package
data_dir = os.path.dirname(pkg_resources.resource_filename("whitebox", 'testdata/'))
# set whitebox working directory
wbt.set_working_dir(data_dir)
wbt.verbose = False
# call whiteboxtools
wbt.feature_preserving_smoothing("DEM.tif", "smoothed.tif", filter=9)
wbt.breach_depressions("smoothed.tif", "breached.tif")
wbt.d_inf_flow_accumulation("breached.tif", "flow_accum.tif")
```
**You can search tools using keywords. For example, the script below searches and lists tools with 'lidar' or 'LAS' in tool name or description.**
```
lidar_tools = wbt.list_tools(['lidar', 'LAS'])
for index, tool in enumerate(lidar_tools):
print("{} {}: {} ...".format(str(index+1).zfill(3), tool, lidar_tools[tool][:45]))
```
**List all available tools in whitebox-tools**. Currently, **whitebox** contains 372 tools. More tools will be added as they become available.
```
all_tools = wbt.list_tools()
for index, tool in enumerate(all_tools):
print("{} {}: {} ...".format(str(index+1).zfill(3), tool, all_tools[tool][:45]))
```
## Getting data
This section demonstrates two ways to get data into Binder so that you can test **whitebox** on the cloud using your own data.
* [Getting data from direct URLs](#Getting-data-from-direct-URLs)
* [Getting data from Google Drive](#Getting-data-from-Google-Drive)
### Getting data from direct URLs
If you have data hosted on your own HTTP server or GitHub, you should be able to get direct URLs. With a direct URL, users can automatically download the data when the URL is clicked. For example https://github.com/giswqs/whitebox/raw/master/examples/testdata.zip
Import the following Python libraries and start getting data from direct URLs.
```
import os
import zipfile
import tarfile
import shutil
import urllib.request
```
Create a folder named *whitebox* under the user home folder and set it as the working directory.
```
work_dir = os.path.join(os.path.expanduser("~"), 'whitebox')
if not os.path.exists(work_dir):
os.mkdir(work_dir)
os.chdir(work_dir)
print("Working directory: {}".format(work_dir))
```
Replace the following URL with your own direct URL hosting your data.
```
url = "https://github.com/giswqs/whitebox/raw/master/examples/testdata.zip"
```
Download data the from the above URL and unzip the file if needed.
```
# download the file
zip_name = os.path.basename(url)
zip_path = os.path.join(work_dir, zip_name)
print('Downloading {} ...'.format(zip_name))
urllib.request.urlretrieve(url, zip_path)
print('Downloading done.'.format(zip_name))
# if it is a zip file
if '.zip' in zip_name:
print("Decompressing {} ...".format(zip_name))
with zipfile.ZipFile(zip_name, "r") as zip_ref:
zip_ref.extractall(work_dir)
print('Decompressing done.')
# if it is a tar file
if '.tar' in zip_name:
print("Decompressing {} ...".format(zip_name))
with tarfile.open(zip_name, "r") as tar_ref:
tar_ref.extractall(work_dir)
print('Decompressing done.')
print('Data directory: {}'.format(os.path.splitext(zip_path)[0]))
```
You have successfully downloaded data to Binder. Therefore, you can skip to [Using whitebox](#Using-whitebox) and start testing whitebox with your own data.
### Getting data from Google Drive
Alternatively, you can upload data to [Google Drive](https://www.google.com/drive/) and then [share files publicly from Google Drive](https://support.google.com/drive/answer/2494822?co=GENIE.Platform%3DDesktop&hl=en). Once the file is shared publicly, you should be able to get a shareable URL. For example, https://drive.google.com/file/d/1xgxMLRh_jOLRNq-f3T_LXAaSuv9g_JnV.
To download files from Google Drive to Binder, you can use the Python package called [google-drive-downloader](https://github.com/ndrplz/google-drive-downloader), which can be installed using the following command:
`pip install googledrivedownloader requests`
**Replace the following URL with your own shareable URL from Google Drive.**
```
gfile_url = 'https://drive.google.com/file/d/1xgxMLRh_jOLRNq-f3T_LXAaSuv9g_JnV'
```
**Extract the file id from the above URL.**
```
file_id = gfile_url.split('/')[5] #'1xgxMLRh_jOLRNq-f3T_LXAaSuv9g_JnV'
print('Google Drive file id: {}'.format(file_id))
```
**Download the shared file from Google Drive.**
```
from google_drive_downloader import GoogleDriveDownloader as gdd
dest_path = './testdata.zip' # choose a name for the downloaded file
gdd.download_file_from_google_drive(file_id, dest_path, unzip=True)
```
You have successfully downloaded data from Google Drive to Binder. You can now continue to [Using whitebox](#Using-whitebox) and start testing whitebox with your own data.
## Using whitebox
Here you can specify where your data are located. In this example, we will use [DEM.tif](https://github.com/giswqs/whitebox/blob/master/examples/testdata/DEM.tif), which has been downloaded to the testdata folder.
**List data under the data folder.**
```
data_dir = './testdata/'
print(os.listdir(data_dir))
```
In this simple example, we smooth [DEM.tif](https://github.com/giswqs/whitebox/blob/master/examples/testdata/DEM.tif) using a [feature preserving denoising](https://github.com/jblindsay/whitebox-tools/blob/master/src/tools/terrain_analysis/feature_preserving_denoise.rs) algorithm. Then, we fill depressions in the DEM using a [depression breaching](https://github.com/jblindsay/whitebox-tools/blob/master/src/tools/hydro_analysis/breach_depressions.rs) algorithm. Finally, we calculate [flow accumulation](https://github.com/jblindsay/whitebox-tools/blob/master/src/tools/hydro_analysis/dinf_flow_accum.rs) based on the depressionless DEM.
```
import whitebox
wbt = whitebox.WhiteboxTools()
# set whitebox working directory
wbt.set_working_dir(data_dir)
wbt.verbose = False
# call whiteboxtool
wbt.feature_preserving_smoothing("DEM.tif", "smoothed.tif", filter=9)
wbt.breach_depressions("smoothed.tif", "breached.tif")
wbt.d_inf_flow_accumulation("breached.tif", "flow_accum.tif")
```
## Displaying results
This section demonstrates how to display images on Jupyter Notebook. Three Python packages are used here, including [matplotlib](https://matplotlib.org/), [imageio](https://imageio.readthedocs.io/en/stable/installation.html), and [tifffile](https://pypi.org/project/tifffile/). These three packages can be installed using the following command:
`pip install matplotlib imageio tifffile`
**Import the libraries.**
```
# comment out the third line (%matplotlib inline) if you run the tutorial in other IDEs other than Jupyter Notebook
import matplotlib.pyplot as plt
import imageio
%matplotlib inline
```
**Display one single image.**
```
raster = imageio.imread(os.path.join(data_dir, 'DEM.tif'))
plt.imshow(raster)
plt.show()
```
**Read images as numpy arrays.**
```
original = imageio.imread(os.path.join(data_dir, 'DEM.tif'))
smoothed = imageio.imread(os.path.join(data_dir, 'smoothed.tif'))
breached = imageio.imread(os.path.join(data_dir, 'breached.tif'))
flow_accum = imageio.imread(os.path.join(data_dir, 'flow_accum.tif'))
```
**Display multiple images in one plot.**
```
fig=plt.figure(figsize=(16,11))
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title('Original DEM')
plt.imshow(original)
ax2 = fig.add_subplot(2, 2, 2)
ax2.set_title('Smoothed DEM')
plt.imshow(smoothed)
ax3 = fig.add_subplot(2, 2, 3)
ax3.set_title('Breached DEM')
plt.imshow(breached)
ax4 = fig.add_subplot(2, 2, 4)
ax4.set_title('Flow Accumulation')
plt.imshow(flow_accum)
plt.show()
```
## whitebox GUI
WhiteboxTools also provides a Graphical User Interface (GUI) - **WhiteboxTools Runner**, which can be invoked using the following Python script. *__Note that the GUI might not work in Jupyter notebooks deployed on the cloud (e.g., MyBinder.org), but it should work on Jupyter notebooks on local computers.__*
```python
import whitebox
whitebox.Runner()
```

## Citing whitebox
If you use the **whitebox** Python package for your research and publications, please consider citing the following papers to give Prof. [John Lindsay](http://www.uoguelph.ca/~hydrogeo/index.html) credits for his tremendous efforts in developing [Whitebox GAT](https://github.com/jblindsay/whitebox-geospatial-analysis-tools) and [WhiteboxTools](https://github.com/jblindsay/whitebox-tools). Without his work, this **whitebox** Python package would not exist!
* Lindsay, J. B. (2016). Whitebox GAT: A case study in geomorphometric analysis. Computers & Geosciences, 95, 75-84. http://dx.doi.org/10.1016/j.cageo.2016.07.003
## Credits
This interactive notebook is made possible by [MyBinder.org](https://mybinder.org/). Big thanks to [MyBinder.org](https://mybinder.org/) for developing the amazing binder platform, which is extremely valuable for reproducible research!
This tutorial made use a number of open-source Python packages, including [ Cookiecutter](https://github.com/audreyr/cookiecutter), [numpy](http://www.numpy.org/), [matplotlib](https://matplotlib.org/), [imageio](https://imageio.readthedocs.io/en/stable/installation.html), [tifffile](https://pypi.org/project/tifffile/), and [google-drive-downloader](https://github.com/ndrplz/google-drive-downloader). Thanks to all developers of these wonderful Python packages!
## Contact
If you have any questions regarding this tutorial or the **whitebox** Python package, you can contact me (Dr. Qiusheng Wu) at [email protected] or https://wetlands.io/#contact
|
github_jupyter
|
```
import lifelines
import pymc as pm
import pyBMA
import matplotlib.pyplot as plt
import numpy as np
from math import log
from datetime import datetime
import pandas as pd
%matplotlib inline
```
The first step in any data analysis is acquiring and munging the data
An example data set can be found at:
https://jakecoltman.gitlab.io/website/post/pydata/
Download the file output.txt and transform it into a format like below where the event column should be 0 if there's only one entry for an id, and 1 if there are two entries:
End date = datetime.datetime(2016, 5, 3, 20, 36, 8, 92165)
id,time_to_convert,age,male,event,search,brand
```
running_id = 0
output = [[0]]
with open("E:/output.txt") as file_open:
for row in file_open.read().split("\n"):
cols = row.split(",")
if cols[0] == output[-1][0]:
output[-1].append(cols[1])
output[-1].append(True)
else:
output.append(cols)
output = output[1:]
for row in output:
if len(row) == 6:
row += [datetime(2016, 5, 3, 20, 36, 8, 92165), False]
output = output[1:-1]
def convert_to_days(dt):
day_diff = dt / np.timedelta64(1, 'D')
if day_diff == 0:
return 23.0
else:
return day_diff
df = pd.DataFrame(output, columns=["id", "advert_time", "male","age","search","brand","conversion_time","event"])
df["lifetime"] = pd.to_datetime(df["conversion_time"]) - pd.to_datetime(df["advert_time"])
df["lifetime"] = df["lifetime"].apply(convert_to_days)
df["male"] = df["male"].astype(int)
df["search"] = df["search"].astype(int)
df["brand"] = df["brand"].astype(int)
df["age"] = df["age"].astype(int)
df["event"] = df["event"].astype(int)
df = df.drop('advert_time', 1)
df = df.drop('conversion_time', 1)
df = df.set_index("id")
df = df.dropna(thresh=2)
df.median()
df
###Parametric Bayes
#Shout out to Cam Davidson-Pilon
## Example fully worked model using toy data
## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html
## Note that we've made some corrections
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
alpha = pm.Uniform("alpha", 0,50)
beta = pm.Uniform("beta", 0,50)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000)
pm.Matplot.plot(mcmc)
mcmc.trace("alpha")[:]
```
Problems:
2 - Try to fit your data from section 1
3 - Use the results to plot the distribution of the median
--------
4 - Try adjusting the number of samples, the burn parameter and the amount of thinning to correct get good answers
5 - Try adjusting the prior and see how it affects the estimate
--------
6 - Try to fit a different distribution to the data
7 - Compare answers
Bonus - test the hypothesis that the true median is greater than a certain amount
For question 2, note that the median of a Weibull is:
$$β(log 2)^{1/α}$$
```
#Solution to question 4:
def weibull_median(alpha, beta):
return beta * ((log(2)) ** ( 1 / alpha))
plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))])
#Solution to question 4:
### Increasing the burn parameter allows us to discard results before convergence
### Thinning the results removes autocorrelation
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000, burn = 3000, thin = 20)
pm.Matplot.plot(mcmc)
#Solution to Q5
## Adjusting the priors impacts the overall result
## If we give a looser, less informative prior then we end up with a broader, shorter distribution
## If we give much more informative priors, then we get a tighter, taller distribution
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
## Note the narrowing of the prior
alpha = pm.Normal("alpha", 1.7, 10000)
beta = pm.Normal("beta", 18.5, 10000)
####Uncomment this to see the result of looser priors
## Note this ends up pretty much the same as we're already very loose
#alpha = pm.Uniform("alpha", 0, 30)
#beta = pm.Uniform("beta", 0, 30)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000, burn = 5000, thin = 20)
pm.Matplot.plot(mcmc)
#plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))])
## Solution to bonus
## Super easy to do in the Bayesian framework, all we need to do is look at what % of samples
## meet our criteria
medians = [weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]
testing_value = 15.6
number_of_greater_samples = sum([x >= testing_value for x in medians])
100 * (number_of_greater_samples / len(medians))
#Cox model
```
If we want to look at covariates, we need a new approach. We'll use Cox proprtional hazards. More information here.
```
#Fitting solution
cf = lifelines.CoxPHFitter()
cf.fit(df, 'lifetime', event_col = 'event')
cf.summary
```
Once we've fit the data, we need to do something useful with it. Try to do the following things:
1 - Plot the baseline survival function
2 - Predict the functions for a particular set of features
3 - Plot the survival function for two different set of features
4 - For your results in part 3 caculate how much more likely a death event is for one than the other for a given period of time
```
#Solution to 1
fig, axis = plt.subplots(nrows=1, ncols=1)
cf.baseline_survival_.plot(ax = axis, title = "Baseline Survival")
# Solution to prediction
regressors = np.array([[1,45,0,0]])
survival = cf.predict_survival_function(regressors)
survival
#Solution to plotting multiple regressors
fig, axis = plt.subplots(nrows=1, ncols=1, sharex=True)
regressor1 = np.array([[1,45,0,1]])
regressor2 = np.array([[1,23,1,1]])
survival_1 = cf.predict_survival_function(regressor1)
survival_2 = cf.predict_survival_function(regressor2)
plt.plot(survival_1,label = "32 year old male")
plt.plot(survival_2,label = "46 year old female")
plt.legend(loc = "lower left")
#Difference in survival
odds = survival_1 / survival_2
plt.plot(odds, c = "red")
```
Model selection
Difficult to do with classic tools (here)
Problem:
1 - Calculate the BMA coefficient values
2 - Compare these results to past the lifelines results
3 - Try running with different priors
```
##Solution to 1
from pyBMA import CoxPHFitter
bmaCox = pyBMA.CoxPHFitter.CoxPHFitter()
bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.5]*4)
print(bmaCox.summary)
#Low probability for everything favours parsimonious models
bmaCox = pyBMA.CoxPHFitter.CoxPHFitter()
bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.1]*4)
print(bmaCox.summary)
#Low probability for everything favours parsimonious models
bmaCox = pyBMA.CoxPHFitter.CoxPHFitter()
bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.9]*4)
print(bmaCox.summary)
#Low probability for everything favours parsimonious models
bmaCox = pyBMA.CoxPHFitter.CoxPHFitter()
bmaCox.fit(df, "lifetime", event_col= "event", priors= [0.3, 0.9, 0.001, 0.3])
print(bmaCox.summary)
```
|
github_jupyter
|
```
import os
import random
import shutil
from shutil import copyfile
import csv
root_dir = "ISAFE MAIN DATABASE FOR PUBLIC/"
data = "Database/"
global_emotion_dir = "emotions_5/"
# global_emotion_dir = "emotions/"
subject_list = os.path.join(root_dir, data)
x = os.listdir(subject_list)
csv_file = "ISAFE MAIN DATABASE FOR PUBLIC\Annotations\self-annotation.csv"
labels_dictionary = {}
with open(csv_file) as rf:
rows = csv.reader(rf, delimiter=",")
for row in rows:
labels_dictionary[row[0]]=row[1]
def parse_labels(directory, cut_images):
li = os.listdir(directory)
string_directory = str(directory)
label_key = string_directory[-6:]
if not "S" in label_key:
label_key = "S"+label_key
for item in li:
path = os.path.join(directory,item)
if os.path.isdir(path):
parse_labels(path, cut_images)
elif item.endswith(".jpg"):
if cut_images:
if (item.endswith("_0.jpg") or item.endswith("_1.jpg") or
item.endswith("_2.jpg") or
item.endswith("_3.jpg") or
item.endswith("_4.jpg") or
item.endswith("_5.jpg") or
item.endswith("_6.jpg") or
item.endswith("_7.jpg") or
item.endswith("_8.jpg") or
item.endswith("_9.jpg") or
item.endswith("_10.jpg") or
item.endswith("_11.jpg") or
item.endswith("_12.jpg") or
item.endswith("_13.jpg") or
item.endswith("_14.jpg")):
continue
randint = random.random()
whydoineedtodothisshit = label_key.replace("\\", "/")
emotion = labels_dictionary[whydoineedtodothisshit]
identifier = label_key.replace("\\", "_")
pic_id = identifier+item
# randomizes the images in real time
if randint < 0.8:
train_test_validate = "train"
elif randint >= 0.8 and randint < 0.9:
train_test_validate = "validation"
else:
train_test_validate = "test"
if emotion == "1":
emotion_ = "joy"
copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
elif emotion == "2":
emotion_ = "sadness"
copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
elif emotion == "3":
# 3 = surprise
emotion_ = "surprise_fear"
copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
elif emotion == "4":
# 4 = disgust
emotion_ = "anger_disgust"
copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
elif emotion == "5":
# 5=fear
emotion_ = "surprise_fear"
copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
elif emotion == "6":
#6=anger
emotion_ = "anger_disgust"
copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
# elif emotion == "7":
# unceratin, I do not have a classification for this
# emotion_ = "joy"
# copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
elif emotion == "8":
emotion_ = "neutral"
copy_files(item, pic_id, directory, emotion_, global_emotion_dir, train_test_validate)
else:
continue
def copy_files(pic, pic_id, orignal_dir, emotion_, global_emotion_dir, ttv):
file_ = os.path.join(orignal_dir, pic)
ttv_dir = os.path.join(global_emotion_dir, ttv)
emotion_dir = os.path.join(ttv_dir, emotion_)
dest_ = os.path.join(emotion_dir, pic_id)
if os.path.getsize(file_) != 0:
copyfile(file_, dest_)
for root, dirs, files in os.walk("emotions_copy_test_dir"):
for x in files:
os.remove(os.path.join(root, x))
parse_labels(subject_list, True)
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
import datetime
from pandas.tseries.frequencies import to_offset
import niftyutils
from niftyutils import load_nifty_data
import matplotlib.pyplot as plt
start_date = datetime.datetime(2005,8,1)
end_date = datetime.datetime(2020,9,25)
nifty_data = load_nifty_data(start_date,end_date)
```
## Daily Return Distribution (For 15 years)
```
daily_returns = (nifty_data['Close']/nifty_data['Close'].shift(1) - 1)*100
daily_returns = daily_returns.dropna()
daily_returns.describe()
plt.figure(figsize=[8,7])
plt.style.use("bmh")
plt.hist(daily_returns, density = True, bins=20, color='#2ab0ff',alpha=0.55)
plt.xlabel('% return', fontsize=15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tick_params(left = False, bottom = False)
plt.title('NIFTY daily % returns ({} samples)'.format(len(daily_returns)),fontsize=15)
plt.grid(False)
plt.show()
custom_bins = [daily_returns.min(),-2.5,-2,-1.5,-1,0.-0.75,0.75,1.0,1.5,2.0,2.5,daily_returns.max()]
categorized_daily_returns = pd.cut(daily_returns, bins=custom_bins)
categorized_daily_returns.value_counts(normalize=True,sort=False)
custom_bins_compact = [daily_returns.min(),-3,-1.5,-1.0,1.0,1.5,3.0,daily_returns.max()]
categorized_daily_returns = pd.cut(daily_returns, bins=custom_bins_compact)
categorized_daily_returns.value_counts(normalize=True,sort=False)
```
## Weekly Return Distribution (For 15 years)
```
weekly_nifty_data = nifty_data.resample('W').agg(niftyutils.OHLC_CONVERSION_DICT)
weekly_nifty_data.index = weekly_nifty_data.index - to_offset('6D')
weekly_returns = (weekly_nifty_data['Close']/weekly_nifty_data['Close'].shift(1) - 1)*100
weekly_returns = weekly_returns.dropna().rename('returns')
weekly_returns.describe()
plt.figure(figsize=[8,7])
plt.style.use("bmh")
plt.hist(weekly_returns, density = True, bins=20, color='#2ab0ff',alpha=0.55)
plt.xlabel('% return', fontsize=15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tick_params(left = False, bottom = False)
plt.title('NIFTY weekly % returns ({} samples)'.format(len(weekly_returns)),fontsize=15)
plt.grid(False)
plt.show()
custom_bins_compact = [weekly_returns.min(),-5,-2.5,2.5,5,weekly_returns.max()]
categorized_weekly_returns = pd.cut(weekly_returns, bins=custom_bins_compact)
categorized_weekly_returns.value_counts(normalize=True,sort=False)
custom_bins_labels = ['-ve Extreme','-ve','normal','+ve','+ve Extreme']
return_categories = pd.cut(weekly_returns, bins=custom_bins_compact,labels=custom_bins_labels).rename('category')
weekly_returns_categorized = pd.concat([weekly_returns, return_categories], axis=1)
```
|
github_jupyter
|
# Modeling and Simulation in Python
Chapter 3
Copyright 2017 Allen Downey
License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
```
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim library
from modsim import *
# set the random number generator
np.random.seed(7)
```
## More than one State object
Here's the code from the previous chapter, with two changes:
1. I've added DocStrings that explain what each function does, and what parameters it takes.
2. I've added a parameter named `state` to the functions so they work with whatever `State` object we give them, instead of always using `bikeshare`. That makes it possible to work with more than one `State` object.
```
def step(state, p1, p2):
"""Simulate one minute of time.
state: bikeshare State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
"""
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
state.wellesley -= 1
state.olin += 1
def decorate_bikeshare():
"""Add a title and label the axes."""
decorate(title='Olin-Wellesley Bikeshare',
xlabel='Time step (min)',
ylabel='Number of bikes')
```
And here's `run_simulation`, which is a solution to the exercise at the end of the previous notebook.
```
def run_simulation(state, p1, p2, num_steps):
"""Simulate the given number of time steps.
state: State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
num_steps: number of time steps
"""
results = TimeSeries()
for i in range(num_steps):
step(state, p1, p2)
results[i] = state.olin
plot(results, label='Olin')
```
Now we can create more than one `State` object:
```
bikeshare1 = State(olin=10, wellesley=2)
bikeshare2 = State(olin=2, wellesley=10)
```
Whenever we call a function, we indicate which `State` object to work with:
```
bike_to_olin(bikeshare1)
bike_to_wellesley(bikeshare2)
```
And you can confirm that the different objects are getting updated independently:
```
bikeshare1
bikeshare2
```
## Negative bikes
In the code we have so far, the number of bikes at one of the locations can go negative, and the number of bikes at the other location can exceed the actual number of bikes in the system.
If you run this simulation a few times, it happens often.
```
bikeshare = State(olin=10, wellesley=2)
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
```
We can fix this problem using the `return` statement to exit the function early if an update would cause negative bikes.
```
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
if state.olin == 0:
return
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
if state.wellesley == 0:
return
state.wellesley -= 1
state.olin += 1
```
Now if you run the simulation again, it should behave.
```
bikeshare = State(olin=10, wellesley=2)
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
```
## Comparison operators
The `if` statements in the previous section used the comparison operator `==`. The other comparison operators are listed in the book.
It is easy to confuse the comparison operator `==` with the assignment operator `=`.
Remember that `=` creates a variable or gives an existing variable a new value.
```
x = 5
```
Whereas `==` compares two values and returns `True` if they are equal.
```
x == 5
```
You can use `==` in an `if` statement.
```
if x == 5:
print('yes, x is 5')
```
But if you use `=` in an `if` statement, you get an error.
```
# If you remove the # from the if statement and run it, you'll get
# SyntaxError: invalid syntax
#if x = 5:
# print('yes, x is 5')
```
**Exercise:** Add an `else` clause to the `if` statement above, and print an appropriate message.
Replace the `==` operator with one or two of the other comparison operators, and confirm they do what you expect.
## Metrics
Now that we have a working simulation, we'll use it to evaluate alternative designs and see how good or bad they are. The metric we'll use is the number of customers who arrive and find no bikes available, which might indicate a design problem.
First we'll make a new `State` object that creates and initializes additional state variables to keep track of the metrics.
```
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0)
```
Next we need versions of `bike_to_wellesley` and `bike_to_olin` that update the metrics.
```
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
if state.olin == 0:
state.olin_empty += 1
return
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
if state.wellesley == 0:
state.wellesley_empty += 1
return
state.wellesley -= 1
state.olin += 1
```
Now when we run a simulation, it keeps track of unhappy customers.
```
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
```
After the simulation, we can print the number of unhappy customers at each location.
```
bikeshare.olin_empty
bikeshare.wellesley_empty
```
## Exercises
**Exercise:** As another metric, we might be interested in the time until the first customer arrives and doesn't find a bike. To make that work, we have to add a "clock" to keep track of how many time steps have elapsed:
1. Create a new `State` object with an additional state variable, `clock`, initialized to 0.
2. Write a modified version of `step` that adds one to the clock each time it is invoked.
Test your code by running the simulation and check the value of `clock` at the end.
```
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0,
clock=0)
# Solution
def step(state, p1, p2):
"""Simulate one minute of time.
state: bikeshare State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
"""
state.clock += 1
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
# Solution
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
# Solution
bikeshare
```
**Exercise:** Continuing the previous exercise, let's record the time when the first customer arrives and doesn't find a bike.
1. Create a new `State` object with an additional state variable, `t_first_empty`, initialized to -1 as a special value to indicate that it has not been set.
2. Write a modified version of `step` that checks whether`olin_empty` and `wellesley_empty` are 0. If not, it should set `t_first_empty` to `clock` (but only if `t_first_empty` has not already been set).
Test your code by running the simulation and printing the values of `olin_empty`, `wellesley_empty`, and `t_first_empty` at the end.
```
# Solution
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0,
clock=0, t_first_empty=-1)
# Solution
def step(state, p1, p2):
"""Simulate one minute of time.
state: bikeshare State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
"""
state.clock += 1
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
if state.t_first_empty != -1:
return
if state.olin_empty + state.wellesley_empty > 0:
state.t_first_empty = state.clock
# Solution
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
# Solution
bikeshare
```
|
github_jupyter
|
# Tutorial: PyTorch
```
__author__ = "Ignacio Cases"
__version__ = "CS224u, Stanford, Spring 2021"
```
## Contents
1. [Motivation](#Motivation)
1. [Importing PyTorch](#Importing-PyTorch)
1. [Tensors](#Tensors)
1. [Tensor creation](#Tensor-creation)
1. [Operations on tensors](#Operations-on-tensors)
1. [GPU computation](#GPU-computation)
1. [Neural network foundations](#Neural-network-foundations)
1. [Automatic differentiation](#Automatic-differentiation)
1. [Modules](#Modules)
1. [Sequential](#Sequential)
1. [Criteria and loss functions](#Criteria-and-loss-functions)
1. [Optimization](#Optimization)
1. [Training a simple model](#Training-a-simple-model)
1. [Reproducibility](#Reproducibility)
1. [References](#References)
## Motivation
PyTorch is a Python package designed to carry out scientific computation. We use PyTorch in a range of different environments: local model development, large-scale deployments on big clusters, and even _inference_ in embedded, low-power systems. While similar in many aspects to NumPy, PyTorch enables us to perform fast and efficient training of deep learning and reinforcement learning models not only on the CPU but also on a GPU or other ASICs (Application Specific Integrated Circuits) for AI, such as Tensor Processing Units (TPU).
## Importing PyTorch
This tutorial assumes a working installation of PyTorch using your `nlu` environment, but the content applies to any regular installation of PyTorch. If you don't have a working installation of PyTorch, please follow the instructions in [the setup notebook](setup.ipynb).
To get started working with PyTorch we simply begin by importing the torch module:
```
import torch
```
**Side note**: why not `import pytorch`? The name of the package is `torch` for historical reasons: `torch` is the orginal name of the ancestor of the PyTorch library that got started back in 2002 as a C library with Lua scripting. It was only much later that the original `torch` was ported to Python. The PyTorch project decided to prefix the Py to make clear that this library refers to the Python version, as it was confusing back then to know which `torch` one was referring to. All the internal references to the library use just `torch`. It's possible that PyTorch will be renamed at some point, as the original `torch` is no longer maintained and there is no longer confusion.
We can see the version installed and determine whether or not we have a GPU-enabled PyTorch install by issuing
```
print("PyTorch version {}".format(torch.__version__))
print("GPU-enabled installation? {}".format(torch.cuda.is_available()))
```
PyTorch has good [documentation](https://pytorch.org/docs/stable/index.html) but it can take some time to familiarize oneself with the structure of the package; it's worth the effort to do so!
We will also make use of other imports:
```
import numpy as np
```
## Tensors
Tensors collections of numbers represented as an array, and are the basic building blocks in PyTorch.
You are probably already familiar with several types of tensors:
- A scalar, a single number, is a zero-th order tensor.
- A column vector $v$ of dimensionality $d_c \times 1$ is a tensor of order 1.
- A row vector $x$ of dimensionality $1 \times d_r$ is a tensor of order 1.
- A matrix $A$ of dimensionality $d_r \times d_c$ is a tensor of order 2.
- A cube $T$ of dimensionality $d_r \times d_c \times d_d$ is a tensor of order 3.
Tensors are the fundamental blocks that carry information in our mathematical models, and they are composed using several operations to create mathematical graphs in which information can flow (propagate) forward (functional application) and backwards (using the chain rule).
We have seen multidimensional arrays in NumPy. These NumPy objects are also a representation of tensors.
**Side note**: what is a tensor __really__? Tensors are important mathematical objects with applications in multiple domains in mathematics and physics. The term "tensor" comes from the usage of these mathematical objects to describe the stretching of a volume of matter under *tension*. They are central objects of study in a subfield of mathematics known as differential geometry, which deals with the geometry of continuous vector spaces. As a very high-level summary (and as first approximation), tensors are defined as multi-linear "machines" that have a number of slots (their order, a.k.a. rank), taking a number of "column" vectors and "row" vectors *to produce a scalar*. For example, a tensor $\mathbf{A}$ (represented by a matrix with rows and columns that you could write on a sheet of paper) can be thought of having two slots. So when $\mathbf{A}$ acts upon a column vector $\mathbf{v}$ and a row vector $\mathbf{x}$, it returns a scalar:
$$\mathbf{A}(\mathbf{x}, \mathbf{v}) = s$$
If $\mathbf{A}$ only acts on the column vector, for example, the result will be another column tensor $\mathbf{u}$ of one order less than the order of $\mathbf{A}$. Thus, when $\mathbf{v}$ acts is similar to "removing" its slot:
$$\mathbf{u} = \mathbf{A}(\mathbf{v})$$
The resulting $\mathbf{u}$ can later interact with another row vector to produce a scalar or be used in any other way.
This can be a very powerful way of thinking about tensors, as their slots can guide you when writing code, especially given that PyTorch has a _functional_ approach to modules in which this view is very much highlighted. As we will see below, these simple equations above have a completely straightforward representation in the code. In the end, most of what our models will do is to process the input using this type of functional application so that we end up having a tensor output and a scalar value that measures how good our output is with respect to the real output value in the dataset.
### Tensor creation
Let's get started with tensors in PyTorch. The framework supports eight different types ([Lapan 2018](#References)):
- 3 float types (16-bit, 32-bit, 64-bit): `torch.FloatTensor` is the class name for the commonly used 32-bit tensor.
- 5 integer types (signed 8-bit, unsigned 8-bit, 16-bit, 32-bit, 64-bit): common tensors of these types are the 8-bit unsigned tensor `torch.ByteTensor` and the 64-bit `torch.LongTensor`.
There are three fundamental ways to create tensors in PyTorch ([Lapan 2018](#References)):
- Call a tensor constructor of a given type, which will create a non-initialized tensor. So we then need to fill this tensor later to be able to use it.
- Call a built-in method in the `torch` module that returns a tensor that is already initialized.
- Use the PyTorch–NumPy bridge.
#### Calling the constructor
Let's first create a 2 x 3 dimensional tensor of the type `float`:
```
t = torch.FloatTensor(2, 3)
print(t)
print(t.size())
```
Note that we specified the dimensions as the arguments to the constructor by passing the numbers directly – and not a list or a tuple, which would have very different outcomes as we will see below! We can always inspect the size of the tensor using the `size()` method.
The constructor method allocates space in memory for this tensor. However, the tensor is *non-initialized*. In order to initialize it, we need to call any of the tensor initialization methods of the basic tensor types. For example, the tensor we just created has a built-in method `zero_()`:
```
t.zero_()
```
The underscore after the method name is important: it means that the operation happens _in place_: the returned object is the same object but now with different content. A very handy way to construct a tensor using the constructor happens when we have available the content we want to put in the tensor in the form of a Python iterable. In this case, we just pass it as the argument to the constructor:
```
torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
```
#### Calling a method in the torch module
A very convenient way to create tensors, in addition to using the constructor method, is to use one of the multiple methods provided in the `torch` module. In particular, the `tensor` method allows us to pass a number or iterable as the argument to get the appropriately typed tensor:
```
tl = torch.tensor([1, 2, 3])
t = torch.tensor([1., 2., 3.])
print("A 64-bit integer tensor: {}, {}".format(tl, tl.type()))
print("A 32-bit float tensor: {}, {}".format(t, t.type()))
```
We can create a similar 2x3 tensor to the one above by using the `torch.zeros()` method, passing a sequence of dimensions to it:
```
t = torch.zeros(2, 3)
print(t)
```
There are many methods for creating tensors. We list some useful ones:
```
t_zeros = torch.zeros_like(t) # zeros_like returns a new tensor
t_ones = torch.ones(2, 3) # creates a tensor with 1s
t_fives = torch.empty(2, 3).fill_(5) # creates a non-initialized tensor and fills it with 5
t_random = torch.rand(2, 3) # creates a uniform random tensor
t_normal = torch.randn(2, 3) # creates a normal random tensor
print(t_zeros)
print(t_ones)
print(t_fives)
print(t_random)
print(t_normal)
```
We now see emerging two important paradigms in PyTorch. The _imperative_ approach to performing operations, using _inplace_ methods, is in marked contrast with an additional paradigm also used in PyTorch, the _functional_ approach, where the returned object is a copy of the original object. Both paradigms have their specific use cases as we will be seeing below. The rule of thumb is that _inplace_ methods are faster and don't require extra memory allocation in general, but they can be tricky to understand (keep this in mind regarding the computational graph that we will see below). _Functional_ methods make the code referentially transparent, which is a highly desired property that makes it easier to understand the underlying math, but we rely on the efficiency of the implementation:
```
# creates a new copy of the tensor that is still linked to
# the computational graph (see below)
t1 = torch.clone(t)
assert id(t) != id(t1), 'Functional methods create a new copy of the tensor'
# To create a new _independent_ copy, we do need to detach
# from the graph
t1 = torch.clone(t).detach()
```
#### Using the PyTorch–NumPy bridge
A quite useful feature of PyTorch is its almost seamless integration with NumPy, which allows us to perform operations on NumPy and interact from PyTorch with the large number of NumPy libraries as well. Converting a NumPy multi-dimensional array into a PyTorch tensor is very simple: we only need to call the `tensor` method with NumPy objects as the argument:
```
# Create a new multi-dimensional array in NumPy with the np datatype (np.float32)
a = np.array([1., 2., 3.])
# Convert the array to a torch tensor
t = torch.tensor(a)
print("NumPy array: {}, type: {}".format(a, a.dtype))
print("Torch tensor: {}, type: {}".format(t, t.dtype))
```
We can also seamlessly convert a PyTorch tensor into a NumPy array:
```
t.numpy()
```
**Side note**: why not `torch.from_numpy(a)`? The `from_numpy()` method is depecrated in favor of `tensor()`, which is a more capable method in the torch package. `from_numpy()` is only there for backwards compatibility. It can be a little bit quirky, so I recommend using the newer method in PyTorch >= 0.4.
#### Indexing
Indexing works as expected with NumPy:
```
t = torch.randn(2, 3)
t[ : , 0]
```
PyTorch also supports indexing using long tensors, for example:
```
t = torch.randn(5, 6)
print(t)
i = torch.tensor([1, 3])
j = torch.tensor([4, 5])
print(t[i]) # selects rows 1 and 3
print(t[i, j]) # selects (1, 4) and (3, 5)
```
#### Type conversion
Each tensor has a set of convenient methods to convert types. For example, if we want to convert the tensor above to a 32-bit float tensor, we use the method `.float()`:
```
t = t.float() # converts to 32-bit float
print(t)
t = t.double() # converts to 64-bit float
print(t)
t = t.byte() # converts to unsigned 8-bit integer
print(t)
```
### Operations on tensors
Now that we know how to create tensors, let's create some of the fundamental tensors and see some common operations on them:
```
# Scalars =: creates a tensor with a scalar
# (zero-th order tensor, i.e. just a number)
s = torch.tensor(42)
print(s)
```
**Tip**: a very convenient to access scalars is with `.item()`:
```
s.item()
```
Let's see higher-order tensors – remember we can always inspect the dimensionality of a tensor using the `.size()` method:
```
# Row vector
x = torch.randn(1,3)
print("Row vector\n{}\nwith size {}".format(x, x.size()))
# Column vector
v = torch.randn(3,1)
print("Column vector\n{}\nwith size {}".format(v, v.size()))
# Matrix
A = torch.randn(3, 3)
print("Matrix\n{}\nwith size {}".format(A, A.size()))
```
A common operation is matrix-vector multiplication (and in general tensor-tensor multiplication). For example, the product $\mathbf{A}\mathbf{v} + \mathbf{b}$ is as follows:
```
u = torch.matmul(A, v)
print(u)
b = torch.randn(3,1)
y = u + b # we can also do torch.add(u, b)
print(y)
```
where we retrieve the expected result (a column vector of dimensions 3x1). We can of course compose operations:
```
s = torch.matmul(x, torch.matmul(A, v))
print(s.item())
```
There are many functions implemented for every tensor, and we encourage you to study the documentation. Some of the most common ones:
```
# common tensor methods (they also have the counterpart in
# the torch package, e.g. as torch.sum(t))
t = torch.randn(2,3)
t.sum(dim=0)
t.t() # transpose
t.numel() # number of elements in tensor
t.nonzero() # indices of non-zero elements
t.view(-1, 2) # reorganizes the tensor to these dimensions
t.squeeze() # removes size 1 dimensions
t.unsqueeze(0) # inserts a dimension
# operations in the package
torch.arange(0, 10) # tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
torch.eye(3, 3) # creates a 3x3 matrix with 1s in the diagonal (identity in this case)
t = torch.arange(0, 3)
torch.cat((t, t)) # tensor([0, 1, 2, 0, 1, 2])
torch.stack((t, t)) # tensor([[0, 1, 2],
# [0, 1, 2]])
```
## GPU computation
Deep Learning frameworks take advantage of the powerful computational capabilities of modern graphic processing units (GPUs). GPUs were originally designed to perform frequent operations for graphics very efficiently and fast, such as linear algebra operations, which makes them ideal for our interests. PyTorch makes it very easy to use the GPU: the common scenario is to tell the framework that we want to instantiate a tensor with a type that makes it a GPU tensor, or move a given CPU tensor to the GPU. All the tensors that we have seen above are CPU tensors, and PyTorch has the counterparts for GPU tensors in the `torch.cuda` module. Let's see how this works.
A common way to explicitly declare the tensor type as a GPU tensor is through the use of the constructor method for tensor creation inside the `torch.cuda` module:
```
try:
t_gpu = torch.cuda.FloatTensor(3, 3) # creation of a GPU tensor
t_gpu.zero_() # initialization to zero
except TypeError as err:
print(err)
```
However, a more common approach that gives us flexibility is through the use of devices. A device in PyTorch refers to either the CPU (indicated by the string "cpu") or one of the possible GPU cards in the machine (indicated by the string "cuda:$n$", where $n$ is the index of the card). Let's create a random gaussian matrix using a method from the `torch` package, and set the computational device to be the GPU by specifying the `device` to be `cuda:0`, the first GPU card in our machine (this code will fail if you don't have a GPU, but we will work around that below):
```
try:
t_gpu = torch.randn(3, 3, device="cuda:0")
except AssertionError as err:
print(err)
t_gpu = None
t_gpu
```
As you can notice, the tensor now has the explicit device set to be a CUDA device, not a CPU device. Let's now create a tensor in the CPU and move it to the GPU:
```
# we could also state explicitly the device to be the
# CPU with torch.randn(3,3,device="cpu")
t = torch.randn(3, 3)
t
```
In this case, the device is the CPU, but PyTorch does not explicitly say that given that this is the default behavior. To copy the tensor to the GPU we use the `.to()` method that every tensor implements, passing the device as an argument. This method creates a copy in the specified device or, if the tensor already resides in that device, it returns the original tensor ([Lapan 2018](#References)):
```
try:
t_gpu = t.to("cuda:0") # copies the tensor from CPU to GPU
# note that if we do now t_to_gpu.to("cuda:0") it will
# return the same tensor without doing anything else
# as this tensor already resides on the GPU
print(t_gpu)
print(t_gpu.device)
except AssertionError as err:
print(err)
```
**Tip**: When we program PyTorch models, we will have to specify the device in several places (not so many, but definitely more than once). A good practice that is consistent accross the implementation and makes the code more portable is to declare early in the code a device variable by querying the framework if there is a GPU available that we can use. We can do this by writing
```
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print(device)
```
We can then use `device` as an argument of the `.to()` method in the rest of our code:
```
# moves t to the device (this code will **not** fail if the
# local machine has not access to a GPU)
t.to(device)
```
**Side note**: having good GPU backend support is a critical aspect of a deep learning framework. Some models depend crucially on performing computations on a GPU. Most frameworks, including PyTorch, only provide good support for GPUs manufactured by Nvidia. This is mostly due to the heavy investment this company made on CUDA (Compute Unified Device Architecture), the underlying parallel computing platform that enables this type of scientific computing (and the reason for the device label), with specific implementations targeted to Deep Neural Networks as cuDNN. Other GPU manufacturers, most notably AMD, are making efforts to towards enabling ML computing in their cards, but their support is still partial.
## Neural network foundations
Computing gradients is a crucial feature in deep learning, given that the training procedure of neural networks relies on optimization techniques that update the parameters of the model by using the gradient information of a scalar magnitude – the loss function. How is it possible to compute the derivatives? There are different methods, namely
- **Symbolic Differentiation**: given a symbolic expression, the software provides the derivative by performing symbolic transformations (e.g. Wolfram Alpha). The benefits are clear, but it is not always possible to compute an analytical expression.
- **Numerical Differentiation**: computes the derivatives using expressions that are suitable to be evaluated numerically, using the finite differences method to several orders of approximation. A big drawback is that these methods are slow.
- **Automatic Differentiation**: a library adds to the set of functional primitives an implementation of the derivative for each of these functions. Thus, if the library contains the function $sin(x)$, it also implements the derivative of this function, $\frac{d}{dx}sin(x) = cos(x)$. Then, given a composition of functions, the library can compute the derivative with respect a variable by successive application of the chain rule, a method that is known in deep learning as backpropagation.
### Automatic differentiation
Modern deep learning libraries are capable of performing automatic differentiation. The two main approaches to computing the graph are _static_ and _dynamic_ processing ([Lapan 2018](#References)):
- **Static graphs**: the deep learning framework converts the computational graph into a static representation that cannot be modified. This allows the library developers to do very aggressive optimizations on this static graph ahead of computation time, pruning some areas and transforming others so that the final product is highly optimized and fast. The drawback is that some models can be really hard to implement with this approach. For example, TensorFlow uses static graphs. Having static graphs is part of the reason why TensorFlow has excellent support for sequence processing, which makes it very popular in NLP.
- **Dynamic graphs**: the framework does not create a graph ahead of computation, but records the operations that are performed, which can be quite different for different inputs. When it is time to compute the gradients, it unrolls the graph and perform the computations. A major benefit of this approach is that implementing complex models can be easier in this paradigm. This flexibility comes at the expense of the major drawback of this approach: speed. Dynamic graphs cannot leverage the same level of ahead-of-time optimization as static graphs, which makes them slower. PyTorch uses dynamic graphs as the underlying paradigm for gradient computation.
Here is simple graph to compute $y = wx + b$ (from [Rao and MacMahan 2019](#References-and-Further-Reading)):
<img src="fig/simple_computation_graph.png" width=500 />
PyTorch computes the graph using the Autograd system. Autograd records a graph when performing the forward pass (function application), keeping track of all the tensors defined as inputs. These are the leaves of the graph. The output tensors are the roots of the graph. By navigating this graph from root to leaves, the gradients are automatically computed using the chain rule. In summary,
- Forward pass (the successive function application) goes from leaves to root. We use the `apply` method in PyTorch.
- Once the forward pass is completed, Autograd has recorded the graph and the backward pass (chain rule) can be done. We use the method `backwards` on the root of the graph.
### Modules
The base implementation for all neural network models in PyTorch is the class `Module` in the package `torch.nn`:
```
import torch.nn as nn
```
All our models subclass this base `nn.Module` class, which provides an interface to important methods used for constructing and working with our models, and which contains sensible initializations for our models. Modules can contain other modules (and usually do).
Let's see a simple, custom implementation of a multi-layer feed forward network. In the example below, our simple mathematical model is
$$\mathbf{y} = \mathbf{U}(f(\mathbf{W}(\mathbf{x})))$$
where $f$ is a non-linear function (a `ReLU`), is directly translated into a similar expression in PyTorch. To do that, we simply subclass `nn.Module`, register the two affine transformations and the non-linearity, and implement their composition within the `forward` method:
```
class MyCustomModule(nn.Module):
def __init__(self, n_inputs, n_hidden, n_output_classes):
# call super to initialize the class above in the hierarchy
super(MyCustomModule, self).__init__()
# first affine transformation
self.W = nn.Linear(n_inputs, n_hidden)
# non-linearity (here it is also a layer!)
self.f = nn.ReLU()
# final affine transformation
self.U = nn.Linear(n_hidden, n_output_classes)
def forward(self, x):
y = self.U(self.f(self.W(x)))
return y
```
Then, we can use our new module as follows:
```
# set the network's architectural parameters
n_inputs = 3
n_hidden= 4
n_output_classes = 2
# instantiate the model
model = MyCustomModule(n_inputs, n_hidden, n_output_classes)
# create a simple input tensor
# size is [1,3]: a mini-batch of one example,
# this example having dimension 3
x = torch.FloatTensor([[0.3, 0.8, -0.4]])
# compute the model output by **applying** the input to the module
y = model(x)
# inspect the output
print(y)
```
As we see, the output is a tensor with its gradient function attached – Autograd tracks it for us.
**Tip**: modules overrides the `__call__()` method, where the framework does some work. Thus, instead of directly calling the `forward()` method, we apply the input to the model instead.
### Sequential
A powerful class in the `nn` package is `Sequential`, which allows us to express the code above more succinctly:
```
class MyCustomModule(nn.Module):
def __init__(self, n_inputs, n_hidden, n_output_classes):
super(MyCustomModule, self).__init__()
self.network = nn.Sequential(
nn.Linear(n_inputs, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_output_classes))
def forward(self, x):
y = self.network(x)
return y
```
As you can imagine, this can be handy when we have a large number of layers for which the actual names are not that meaningful. It also improves readability:
```
class MyCustomModule(nn.Module):
def __init__(self, n_inputs, n_hidden, n_output_classes):
super(MyCustomModule, self).__init__()
self.p_keep = 0.7
self.network = nn.Sequential(
nn.Linear(n_inputs, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, 2*n_hidden),
nn.ReLU(),
nn.Linear(2*n_hidden, n_output_classes),
# dropout argument is probability of dropping
nn.Dropout(1 - self.p_keep),
# applies softmax in the data dimension
nn.Softmax(dim=1)
)
def forward(self, x):
y = self.network(x)
return y
```
**Side note**: Another important package in `torch.nn` is `Functional`, typically imported as `F`. Functional contains many useful functions, from non-linear activations to convolutional, dropout, and even distance functions. Many of these functions have counterpart implementations as layers in the `nn` package so that they can be easily used in pipelines like the one above implemented using `nn.Sequential`.
```
import torch.nn.functional as F
y = F.relu(torch.FloatTensor([[-5, -1, 0, 5]]))
y
```
### Criteria and loss functions
PyTorch has implementations for the most common criteria in the `torch.nn` package. You may notice that, as with many of the other functions, there are two implementations of loss functions: the reference functions in `torch.nn.functional` and practical class in `torch.nn`, which are the ones we typically use. Probably the two most common ones are ([Lapan 2018](#References)):
- `nn.MSELoss` (mean squared error): squared $L_2$ norm used for regression.
- `nn.CrossEntropyLoss`: criterion used for classification as the result of combining `nn.LogSoftmax()` and `nn.NLLLoss()` (negative log likelihood), operating on the input scores directly. When possible, we recommend using this class instead of using a softmax layer plus a log conversion and `nn.NLLLoss`, given that the `LossSoftmax` implementation guards against common numerical errors, resulting in less instabilities.
Once our model produces a prediction, we pass it to the criteria to obtain a measure of the loss:
```
# the true label (in this case, 2) from our dataset wrapped
# as a tensor of minibatch size of 1
y_gold = torch.tensor([1])
# our simple classification criterion for this simple example
criterion = nn.CrossEntropyLoss()
# forward pass of our model (remember, using apply instead of forward)
y = model(x)
# apply the criterion to get the loss corresponding to the pair (x, y)
# with respect to the real y (y_gold)
loss = criterion(y, y_gold)
# the loss contains a gradient function that we can use to compute
# the gradient dL/dw (gradient with respect to the parameters
# for a given fixed input)
print(loss)
```
### Optimization
Once we have computed the loss for a training example or minibatch of examples, we update the parameters of the model guided by the information contained in the gradient. The role of updating the parameters belongs to the optimizer, and PyTorch has a number of implementations available right away – and if you don't find your preferred optimizer as part of the library, chances are that you will find an existing implementation. Also, coding your own optimizer is indeed quite easy in PyTorch.
**Side Note** The following is a summary of the most common optimizers. It is intended to serve as a reference (I use this table myself quite a lot). In practice, most people pick an optimizer that has been proven to behave well on a given domain, but optimizers are also a very active area of research on numerical analysis, so it is a good idea to pay some attention to this subfield. We recommend using second-order dynamics with an adaptive time step:
- First-order dynamics
- Search direction only: `optim.SGD`
- Adaptive: `optim.RMSprop`, `optim.Adagrad`, `optim.Adadelta`
- Second-order dynamics
- Search direction only: Momentum `optim.SGD(momentum=0.9)`, Nesterov, `optim.SGD(nesterov=True)`
- Adaptive: `optim.Adam`, `optim.Adamax` (Adam with $L_\infty$)
### Training a simple model
In order to illustrate the different concepts and techniques above, let's put them together in a very simple example: our objective will be to fit a very simple non-linear function, a sine wave:
$$y = a \sin(x + \phi)$$
where $a, \phi$ are the given amplitude and phase of the sine function. Our objective is to learn to adjust this function using a feed forward network, this is:
$$ \hat{y} = f(x)$$
such that the error between $y$ and $\hat{y}$ is minimal according to our criterion. A natural criterion is to minimize the squared distance between the actual value of the sine wave and the value predicted by our function approximator, measured using the $L_2$ norm.
**Side Note**: Although this example is easy, simple variations of this setting can pose a big challenge, and are used currently to illustrate difficult problems in learning, especially in a very active subfield known as meta-learning.
Let's import all the modules that we are going to need:
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import numpy as np
import matplotlib.pyplot as plt
import math
```
Early on the code, we define the device that we want to use:
```
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
```
Let's fix $a=1$, $\phi=1$ and generate traning data in the interval $x \in [0,2\pi)$ using NumPy:
```
M = 1200
# sample from the x axis M points
x = np.random.rand(M) * 2*math.pi
# add noise
eta = np.random.rand(M) * 0.01
# compute the function
y = np.sin(x) + eta
# plot
_ = plt.scatter(x,y)
# use the NumPy-PyTorch bridge
x_train = torch.tensor(x[0:1000]).float().view(-1, 1).to(device)
y_train = torch.tensor(y[0:1000]).float().view(-1, 1).to(device)
x_test = torch.tensor(x[1000:]).float().view(-1, 1).to(device)
y_test = torch.tensor(y[1000:]).float().view(-1, 1).to(device)
class SineDataset(data.Dataset):
def __init__(self, x, y):
super(SineDataset, self).__init__()
assert x.shape[0] == y.shape[0]
self.x = x
self.y = y
def __len__(self):
return self.y.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
sine_dataset = SineDataset(x_train, y_train)
sine_dataset_test = SineDataset(x_test, y_test)
sine_loader = torch.utils.data.DataLoader(
sine_dataset, batch_size=32, shuffle=True)
sine_loader_test = torch.utils.data.DataLoader(
sine_dataset_test, batch_size=32)
class SineModel(nn.Module):
def __init__(self):
super(SineModel, self).__init__()
self.network = nn.Sequential(
nn.Linear(1, 5),
nn.ReLU(),
nn.Linear(5, 5),
nn.ReLU(),
nn.Linear(5, 5),
nn.ReLU(),
nn.Linear(5, 1))
def forward(self, x):
return self.network(x)
# declare the model
model = SineModel().to(device)
# define the criterion
criterion = nn.MSELoss()
# select the optimizer and pass to it the parameters of the model it will optimize
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
epochs = 1000
# training loop
for epoch in range(epochs):
for i, (x_i, y_i) in enumerate(sine_loader):
y_hat_i = model(x_i) # forward pass
loss = criterion(y_hat_i, y_i) # compute the loss and perform the backward pass
optimizer.zero_grad() # cleans the gradients
loss.backward() # computes the gradients
optimizer.step() # update the parameters
if epoch % 20:
plt.scatter(x_i.data.cpu().numpy(), y_hat_i.data.cpu().numpy())
# testing
with torch.no_grad():
model.eval()
total_loss = 0.
for k, (x_k, y_k) in enumerate(sine_loader_test):
y_hat_k = model(x_k)
loss_test = criterion(y_hat_k, y_k)
total_loss += float(loss_test)
print(total_loss)
```
## Reproducibility
```
def enforce_reproducibility(seed=42):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
#
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
np.random.seed(seed)
enforce_reproducibility()
```
The function `utils.fix_random_seeds()` extends the above to the random seeds for NumPy and the Python `random` library.
## References
Lapan, Maxim (2018) *Deep Reinforcement Learning Hands-On*. Birmingham: Packt Publishing
Rao, Delip and Brian McMahan (2019) *Natural Language Processing with PyTorch*. Sebastopol, CA: O'Reilly Media
|
github_jupyter
|
# How to create Popups
## Simple popups
You can define your popup at the feature creation, but you can also overwrite them afterwards:
```
import folium
m = folium.Map([45, 0], zoom_start=4)
folium.Marker([45, -30], popup="inline implicit popup").add_to(m)
folium.CircleMarker(
location=[45, -10],
radius=25,
fill=True,
popup=folium.Popup("inline explicit Popup"),
).add_to(m)
ls = folium.PolyLine(
locations=[[43, 7], [43, 13], [47, 13], [47, 7], [43, 7]], color="red"
)
ls.add_child(folium.Popup("outline Popup on Polyline"))
ls.add_to(m)
gj = folium.GeoJson(
data={"type": "Polygon", "coordinates": [[[27, 43], [33, 43], [33, 47], [27, 47]]]}
)
gj.add_child(folium.Popup("outline Popup on GeoJSON"))
gj.add_to(m)
m
m = folium.Map([45, 0], zoom_start=2)
folium.Marker(
location=[45, -10],
popup=folium.Popup("Let's try quotes", parse_html=True, max_width=100),
).add_to(m)
folium.Marker(
location=[45, -30],
popup=folium.Popup(u"Ça c'est chouette", parse_html=True, max_width="100%"),
).add_to(m)
m
```
## Vega Popup
You may know that it's possible to create awesome Vega charts with (or without) `vincent`. If you're willing to put one inside a popup, it's possible thanks to `folium.Vega`.
```
import json
import numpy as np
import vincent
scatter_points = {
"x": np.random.uniform(size=(100,)),
"y": np.random.uniform(size=(100,)),
}
# Let's create the vincent chart.
scatter_chart = vincent.Scatter(scatter_points, iter_idx="x", width=600, height=300)
# Let's convert it to JSON.
scatter_json = scatter_chart.to_json()
# Let's convert it to dict.
scatter_dict = json.loads(scatter_json)
m = folium.Map([43, -100], zoom_start=4)
popup = folium.Popup()
folium.Vega(scatter_chart, height=350, width=650).add_to(popup)
folium.Marker([30, -120], popup=popup).add_to(m)
# Let's create a Vega popup based on scatter_json.
popup = folium.Popup(max_width=0)
folium.Vega(scatter_json, height=350, width=650).add_to(popup)
folium.Marker([30, -100], popup=popup).add_to(m)
# Let's create a Vega popup based on scatter_dict.
popup = folium.Popup(max_width=650)
folium.Vega(scatter_dict, height=350, width=650).add_to(popup)
folium.Marker([30, -80], popup=popup).add_to(m)
m
```
## Fancy HTML popup
```
import branca
m = folium.Map([43, -100], zoom_start=4)
html = """
<h1> This is a big popup</h1><br>
With a few lines of code...
<p>
<code>
from numpy import *<br>
exp(-2*pi)
</code>
</p>
"""
folium.Marker([30, -100], popup=html).add_to(m)
m
```
You can also put any HTML code inside of a Popup, thaks to the `IFrame` object.
```
m = folium.Map([43, -100], zoom_start=4)
html = """
<h1> This popup is an Iframe</h1><br>
With a few lines of code...
<p>
<code>
from numpy import *<br>
exp(-2*pi)
</code>
</p>
"""
iframe = branca.element.IFrame(html=html, width=500, height=300)
popup = folium.Popup(iframe, max_width=500)
folium.Marker([30, -100], popup=popup).add_to(m)
m
import pandas as pd
df = pd.DataFrame(
data=[["apple", "oranges"], ["other", "stuff"]], columns=["cats", "dogs"]
)
m = folium.Map([43, -100], zoom_start=4)
html = df.to_html(
classes="table table-striped table-hover table-condensed table-responsive"
)
popup = folium.Popup(html)
folium.Marker([30, -100], popup=popup).add_to(m)
m
```
Note that you can put another `Figure` into an `IFrame` ; this should let you do stange things...
```
# Let's create a Figure, with a map inside.
f = branca.element.Figure()
folium.Map([-25, 150], zoom_start=3).add_to(f)
# Let's put the figure into an IFrame.
iframe = branca.element.IFrame(width=500, height=300)
f.add_to(iframe)
# Let's put the IFrame in a Popup
popup = folium.Popup(iframe, max_width=2650)
# Let's create another map.
m = folium.Map([43, -100], zoom_start=4)
# Let's put the Popup on a marker, in the second map.
folium.Marker([30, -100], popup=popup).add_to(m)
# We get a map in a Popup. Not really useful, but powerful.
m
```
|
github_jupyter
|
##### Copyright 2018 The TensorFlow Probability Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Eight schools
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/probability/examples/Eight_Schools"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Eight_Schools.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Eight_Schools.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Eight_Schools.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
The eight schools problem ([Rubin 1981](https://www.jstor.org/stable/1164617)) considers the effectiveness of SAT coaching programs conducted in parallel at eight schools. It has become a classic problem ([Bayesian Data Analysis](http://www.stat.columbia.edu/~gelman/book/), [Stan](https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started)) that illustrates the usefulness of hierarchical modeling for sharing information between exchangeable groups.
The implemention below is an adaptation of an Edward 1.0 [tutorial](https://github.com/blei-lab/edward/blob/master/notebooks/eight_schools.ipynb).
# Imports
```
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
import warnings
tf.enable_v2_behavior()
plt.style.use("ggplot")
warnings.filterwarnings('ignore')
```
# The Data
From Bayesian Data Analysis, section 5.5 (Gelman et al. 2013):
> *A study was performed for the Educational Testing Service to analyze the effects of special coaching programs for SAT-V (Scholastic Aptitude Test-Verbal) in each of eight high schools. The outcome variable in each study was the score on a special administration of the SAT-V, a standardized multiple choice test administered by the Educational Testing Service and used to help colleges make admissions decisions; the scores can vary between 200 and 800, with mean about 500 and standard deviation about 100. The SAT examinations are designed to be resistant to short-term efforts directed specifically toward improving performance on the test; instead they are designed to reflect knowledge acquired and abilities developed over many years of education. Nevertheless, each of the eight schools in this study considered its short-term coaching program to be very successful at increasing SAT scores. Also, there was no prior reason to believe that any of the eight programs was more effective than any other or that some were more similar in effect to each other than to any other.*
For each of the eight schools ($J = 8$), we have an estimated treatment effect $y_j$ and a standard error of the effect estimate $\sigma_j$. The treatment effects in the study were obtained by a linear regression on the treatment group using PSAT-M and PSAT-V scores as control variables. As there was no prior belief that any of the schools were more or less similar or that any of the coaching programs would be more effective, we can consider the treatment effects as [exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables).
```
num_schools = 8 # number of schools
treatment_effects = np.array(
[28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float32) # treatment effects
treatment_stddevs = np.array(
[15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32) # treatment SE
fig, ax = plt.subplots()
plt.bar(range(num_schools), treatment_effects, yerr=treatment_stddevs)
plt.title("8 Schools treatment effects")
plt.xlabel("School")
plt.ylabel("Treatment effect")
fig.set_size_inches(10, 8)
plt.show()
```
# Model
To capture the data, we use a hierarchical normal model. It follows the generative process,
$$
\begin{align*}
\mu &\sim \text{Normal}(\text{loc}{=}0,\ \text{scale}{=}10) \\
\log\tau &\sim \text{Normal}(\text{loc}{=}5,\ \text{scale}{=}1) \\
\text{for } & i=1\ldots 8:\\
& \theta_i \sim \text{Normal}\left(\text{loc}{=}\mu,\ \text{scale}{=}\tau \right) \\
& y_i \sim \text{Normal}\left(\text{loc}{=}\theta_i,\ \text{scale}{=}\sigma_i \right)
\end{align*}
$$
where $\mu$ represents the prior average treatment effect and $\tau$ controls how much variance there is between schools. The $y_i$ and $\sigma_i$ are observed. As $\tau \rightarrow \infty$, the model approaches the no-pooling model, i.e., each of the school treatment effect estimates are allowed to be more independent. As $\tau \rightarrow 0$, the model approaches the complete-pooling model, i.e., all of the school treatment effects are closer to the group average $\mu$. To restrict the standard deviation to be positive, we draw $\tau$ from a lognormal distribution (which is equivalent to drawing $log(\tau)$ from a normal distribution).
Following [Diagnosing Biased Inference with Divergences](http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html), we transform the model above into an equivalent non-centered model:
$$
\begin{align*}
\mu &\sim \text{Normal}(\text{loc}{=}0,\ \text{scale}{=}10) \\
\log\tau &\sim \text{Normal}(\text{loc}{=}5,\ \text{scale}{=}1) \\
\text{for } & i=1\ldots 8:\\
& \theta_i' \sim \text{Normal}\left(\text{loc}{=}0,\ \text{scale}{=}1 \right) \\
& \theta_i = \mu + \tau \theta_i' \\
& y_i \sim \text{Normal}\left(\text{loc}{=}\theta_i,\ \text{scale}{=}\sigma_i \right)
\end{align*}
$$
We reify this model as a [JointDistributionSequential](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/JointDistributionSequential) instance:
```
model = tfd.JointDistributionSequential([
tfd.Normal(loc=0., scale=10., name="avg_effect"), # `mu` above
tfd.Normal(loc=5., scale=1., name="avg_stddev"), # `log(tau)` above
tfd.Independent(tfd.Normal(loc=tf.zeros(num_schools),
scale=tf.ones(num_schools),
name="school_effects_standard"), # `theta_prime`
reinterpreted_batch_ndims=1),
lambda school_effects_standard, avg_stddev, avg_effect: (
tfd.Independent(tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
tf.exp(avg_stddev[..., tf.newaxis]) *
school_effects_standard), # `theta` above
scale=treatment_stddevs),
name="treatment_effects", # `y` above
reinterpreted_batch_ndims=1))
])
def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard):
"""Unnormalized target density as a function of states."""
return model.log_prob((
avg_effect, avg_stddev, school_effects_standard, treatment_effects))
```
# Bayesian Inference
Given data, we perform Hamiltonian Monte Carlo (HMC) to calculate the posterior distribution over the model's parameters.
```
num_results = 5000
num_burnin_steps = 3000
# Improve performance by tracing the sampler using `tf.function`
# and compiling it using XLA.
@tf.function(autograph=False, experimental_compile=True)
def do_sampling():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.4,
num_leapfrog_steps=3))
states, kernel_results = do_sampling()
avg_effect, avg_stddev, school_effects_standard = states
school_effects_samples = (
avg_effect[:, np.newaxis] +
np.exp(avg_stddev)[:, np.newaxis] * school_effects_standard)
num_accepted = np.sum(kernel_results.is_accepted)
print('Acceptance rate: {}'.format(num_accepted / num_results))
fig, axes = plt.subplots(8, 2, sharex='col', sharey='col')
fig.set_size_inches(12, 10)
for i in range(num_schools):
axes[i][0].plot(school_effects_samples[:,i].numpy())
axes[i][0].title.set_text("School {} treatment effect chain".format(i))
sns.kdeplot(school_effects_samples[:,i].numpy(), ax=axes[i][1], shade=True)
axes[i][1].title.set_text("School {} treatment effect distribution".format(i))
axes[num_schools - 1][0].set_xlabel("Iteration")
axes[num_schools - 1][1].set_xlabel("School effect")
fig.tight_layout()
plt.show()
print("E[avg_effect] = {}".format(np.mean(avg_effect)))
print("E[avg_stddev] = {}".format(np.mean(avg_stddev)))
print("E[school_effects_standard] =")
print(np.mean(school_effects_standard[:, ]))
print("E[school_effects] =")
print(np.mean(school_effects_samples[:, ], axis=0))
# Compute the 95% interval for school_effects
school_effects_low = np.array([
np.percentile(school_effects_samples[:, i], 2.5) for i in range(num_schools)
])
school_effects_med = np.array([
np.percentile(school_effects_samples[:, i], 50) for i in range(num_schools)
])
school_effects_hi = np.array([
np.percentile(school_effects_samples[:, i], 97.5)
for i in range(num_schools)
])
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True)
ax.scatter(np.array(range(num_schools)), school_effects_med, color='red', s=60)
ax.scatter(
np.array(range(num_schools)) + 0.1, treatment_effects, color='blue', s=60)
plt.plot([-0.2, 7.4], [np.mean(avg_effect),
np.mean(avg_effect)], 'k', linestyle='--')
ax.errorbar(
np.array(range(8)),
school_effects_med,
yerr=[
school_effects_med - school_effects_low,
school_effects_hi - school_effects_med
],
fmt='none')
ax.legend(('avg_effect', 'HMC', 'Observed effect'), fontsize=14)
plt.xlabel('School')
plt.ylabel('Treatment effect')
plt.title('HMC estimated school treatment effects vs. observed data')
fig.set_size_inches(10, 8)
plt.show()
```
We can observe the shrinkage toward the group `avg_effect` above.
```
print("Inferred posterior mean: {0:.2f}".format(
np.mean(school_effects_samples[:,])))
print("Inferred posterior mean se: {0:.2f}".format(
np.std(school_effects_samples[:,])))
```
# Criticism
To get the posterior predictive distribution, i.e., a model of new data $y^*$ given the observed data $y$:
$$ p(y^*|y) \propto \int_\theta p(y^* | \theta)p(\theta |y)d\theta$$
we override the values of the random variables in the model to set them to the mean of the posterior distribution, and sample from that model to generate new data $y^*$.
```
sample_shape = [5000]
_, _, _, predictive_treatment_effects = model.sample(
value=(tf.broadcast_to(np.mean(avg_effect, 0), sample_shape),
tf.broadcast_to(np.mean(avg_stddev, 0), sample_shape),
tf.broadcast_to(np.mean(school_effects_standard, 0),
sample_shape + [num_schools]),
None))
fig, axes = plt.subplots(4, 2, sharex=True, sharey=True)
fig.set_size_inches(12, 10)
fig.tight_layout()
for i, ax in enumerate(axes):
sns.kdeplot(predictive_treatment_effects[:, 2*i].numpy(),
ax=ax[0], shade=True)
ax[0].title.set_text(
"School {} treatment effect posterior predictive".format(2*i))
sns.kdeplot(predictive_treatment_effects[:, 2*i + 1].numpy(),
ax=ax[1], shade=True)
ax[1].title.set_text(
"School {} treatment effect posterior predictive".format(2*i + 1))
plt.show()
# The mean predicted treatment effects for each of the eight schools.
prediction = np.mean(predictive_treatment_effects, axis=0)
```
We can look at the residuals between the treatment effects data and the predictions of the model posterior. These correspond with the plot above which shows the shrinkage of the estimated effects toward the population average.
```
treatment_effects - prediction
```
Because we have a distribution of predictions for each school, we can consider the distribution of residuals as well.
```
residuals = treatment_effects - predictive_treatment_effects
fig, axes = plt.subplots(4, 2, sharex=True, sharey=True)
fig.set_size_inches(12, 10)
fig.tight_layout()
for i, ax in enumerate(axes):
sns.kdeplot(residuals[:, 2*i].numpy(), ax=ax[0], shade=True)
ax[0].title.set_text(
"School {} treatment effect residuals".format(2*i))
sns.kdeplot(residuals[:, 2*i + 1].numpy(), ax=ax[1], shade=True)
ax[1].title.set_text(
"School {} treatment effect residuals".format(2*i + 1))
plt.show()
```
# Acknowledgements
This tutorial was originally written in Edward 1.0 ([source](https://github.com/blei-lab/edward/blob/master/notebooks/eight_schools.ipynb)). We thank all contributors to writing and revising that version.
# References
1. Donald B. Rubin. Estimation in parallel randomized experiments. Journal of Educational Statistics, 6(4):377-401, 1981.
2. Andrew Gelman, John Carlin, Hal Stern, David Dunson, Aki Vehtari, and Donald Rubin. Bayesian Data Analysis, Third Edition. Chapman and Hall/CRC, 2013.
|
github_jupyter
|
# CHAPTER 14 - Probabilistic Reasoning over Time
### George Tzanetakis, University of Victoria
## WORKPLAN
The section number is based on the 4th edition of the AIMA textbook and is the suggested
reading for this week. Each list entry provides just the additional sections. For example the Expected reading include the sections listed under Basic as well as the sections listed under Expected. Some additional readings are suggested for Advanced.
1. Basic: Sections **14.1**, **14.3, and **Summary**
2. Expected: Same as Basic plus 14.2
3. Advanced: All the chapter including bibligraphical and historical notes
## Time and Uncertainty
Agents operate over time. They need to maintain a **belief state** (a set of variables (or random variables) indexed by time) that represents which states of the world are currently possible. From the **belief** state and a transition model, the agent can predict how the world might evolve in the next time step. From the percepts observed and a **sensor** model, the agent can update the **belief state**.
* CSP: belief states are variables with domains
* Logic: logical formulaes which belief states are possible
* Probablities: probabilities which belief states are likely
* **Transition model:** describe the probability distribution of the variables at time $t$ given the state of the world at past time
* **Sensor model:** the probability of each percept at time $t$, given the current state of the world
* Dynamic Bayesian Networks
* Hidden Markov Models
* Kalman Filters
### States and Observations
**Discret-time** models, the world is views as a series of **time slices**
Each time slide contains a set of **random variables**, some observable and some not.
*Example scenario:* you are the security guard stationed at a secret underground installation.
You want to know whether it is raining today, but your only access to the outside world
occurs each morning when you see the director coming in with, or without an umbrella.
For each day $t$, the evidence set $E_t$ contains a single evidence variable $Umbrella_{t}$ or $U_t$.
The state set $S_t$ contains a single state variable $Rain_{t}$ or $R_t$.
<img src="images/rain_umbrella_hmm.png" width="75%"/>
### Transition and Sensor Models
**TRANSITION MODEL**
* General form: $P(X_t | X_{0:t-1})$
**Markov Assumption**: Andrei Markov (1856-1922) the current state only depends on a fixed number of previous states
* First-order markov process: $P(X_t | X_{0:t-1}) = P(X_t | X_{t-1})$
Time homegeneous process: the conditional transition probabilities is the same for all time steps $t$.
A Markov chain is a sequence of random variables
$X_1, X_2, X_3, . . .$ with the Markov property, namely that the probability of moving to the next state depends only on the present state and not on the previous states:
* $P(X_{n+1} = x|X_{1} = x_1,X_2 = x_2,...,X_n = x_n) = P(X_{n+1} = x|X_n = x_n)$
<img src="images/markov.png" width="30%"/>
The possible values of $X_i$ form a countable set $S$ called the state space of the chain. A **Markov Chain** can be specified by a transition matrix with the probabilities of going from a particular state to another state at every time step.
## Sensor model/observations
There are many application areas, for example speech recognition, in which we are interesting in modeling probability distributions over sequences of observations. We will denote the observation at time $t$ by the variable $Y_t$. The variable can be a symbol from a discrete alphabet or a continuous variable and we assume that the observations are sampled at discrete equally-spaced time intervals so $t$ can be an integer-valued time index.
## Inference in Temporal Models
* **Filtering:** we want to compute the posterior distribution over the current state, given all evidence to date. $P(X_t|e_{1:t})$. An almost identical calculation provides the likelihood of the evidence sequence $P(e_{1:T})$.
* **Prediction:** we want to computer the posterior distribution over the future state, given all evidence to date. $P(Xt+k|e_{1:t})$ for some $k > 0$.
* **Smoothing or hindsight:** computing the posterior distribution over a past state, given all evidence up to the present: $P(X_{t-k}|e_{1:t})$ for some $k < t$. It provides a better estimate of the state than what was available at the time, because it incorporates more evidence.
* **Most likely explanation:** Given a sequence of observations, we might wish to find the sequence of states that is most likely to have generated these observations. That is we wish to compute:
$argmax_{x_{1:t}} P(x_{1:t}|e_{1:t})$. This is the typical inference task in Speech Recognition using Hidden Markov Models.
### Sidenote: Speech Recognition
In phonology and linguistics, a phoneme is a unit of sound that can distinguish one word from another in a particular language. For example the english words **book** and **took** differ in one phoneme (the b vs t sound)
and contain the same two remaining phonemes the **oo** sound and **k** sound. There is a clear correspondence between the written alphabet symbols of a word and the corresponding phonemes but in English there is a lot of confusing variation. For example the writtern symbols **oo** correspond to a different phoneme in the word **door**. In languages like Spanish or Greek there is a stronger direct correspondance between the written symbols and phonemes making it possible to "read" a Greek text without making phoneme errors even if you
don't know the underlying words something much harder to do in English.
The task of speech recognition is to take as input an audio recording a human talking and convert that recording to written words. It is possible to convert written words to sequences of phonemes and vice versa using a phonetic dictionary. For example check: http://www.speech.cs.cmu.edu/cgi-bin/cmudict
There are different symbolic representations for phonemes. For example the international phonetic alphabet is an alphabetic system of phonetic notation based primarily on the Latin script that tries to cover the sounds of all languages around the world. Interesting sidenote: all babies are born with the ability to recongize and also reproduce all phonemes but as they age in a particular linguistic environment their ability gets restricted/pruned to the phonemes of the particular languages they are exposed to.
So once we have the phonetic dictionary our task becomes to convert an audio recording of a human voice to a sequence of phonemes that can then be converted to written words using a phonetic dictionary.
Without going into details we form different phonemes by appropriately shaping our mouths and tongue and using our vocal folds to produce pitched and unpitched phonemes/sounds (vowels and consonants). It is possible to compute features such as **Mel-Frequency Cepstral Coefficients (MFCC)** using Digital Signal Processing techniques that characterizes these configurations over short intervals of time (typically 20-40 milliseconds).
So now, the task of automatic speech recognition becomes given a time sequence of feature vectors (computed from the audio recording) find the most likely sequence of phonemes that produced that sequence of feature vectors.
Phonemes and especially vowels can have different durations so a particular word can be represented as a sequence of states corresponding to phonemes with repetitions. For example for the word **book** we might have the following sequence: $b,b,oo,oo,oo,oo,oo,oo,oo,oo,oo,oo,oo,k,k$ with informal state notation corresponding to the phonemes. Further complicating our task is the fact that depending on speakers and inflection there are many possible ways to render a particular phoneme. So we can also think of each phoneme as a distribution of feature vectors.
So let's look at some possible approaches to solve this problem in order of increasing complexity but
also improved accuracy:
1. We can train a classifiers that given a feature vector predicts the corresponding phoneme. However this approach does not take into account that different phonemes have different probabilities (for example the phoneme correpsonding to the written symbol $z$ is less likely than the phoneme corresponding to the vowel $a$ as in the word apple), different phonemes have different typical durations (for example vowels tend to be longer than consonants), and certain transitions between phonemes for example $z$ followed by $b$ are very unlikely if not impossible whereas other ones are are much more common for example $r$ followed by $a$ as in the word apple).
2. We can model the probabilities of diffeerent phonemes and their transitions as a first order Markove chain where the state is the phoneme and then the observation output of each state can be modelled as a continuous probability distribution over the **MFCCs** feature space. That way duration and transition information is taken into account when performing automatic speech recognition.
Automatic Speech Recognition Systems based on Hidden Markov Models (HMMs) dominated the field for about 20 years until they were superseded by deep learning models in the last decade or so. They are still widely used especially in situations with restricted computational resources where deep learning systems are not practical.
## Hidden Markov Models
Properties:
* The observation at time $t$ is generated by some random process whose state $S_t$ is hidden from the observer.
* The hidden states form a **Markov Chain** i.e given the value of $S_{t−1}$, the current state $S_t$ is independent of all states prior to $t − 1$. The outputs also satisfy a Markov property which is that given state $S_t$, the observation $Y_t$ is independent of all previous states and observations.
* The hidden state variable $S_t$ is discrete
We can write the joint distribution of a sequence of states and observations by using the Markov assumptions to factorize:
* $ P(S_{1:T},Y_{1:T}) = P(S_1)P(Y_1|S_1) \prod_{t=2}^{T}P(St|S_{t−1})P(Yt|St)$
where the notation $X_{1:T}$ indicates thesequence $X_1,X_2,...,X_T$.
We can view the Hiddean Markov Model graphically as a Bayesian network by unrolling over time - think of the HMM as a template for generating a Bayesian Network and the corresponding CPTs over time. In fact, it is possible
to perform the temporal inference tasks using exact or approximate inference of the corresponding Bayesian network but for **HMMs** there are significantly more efficient algorithms.
<img src="images/hmm2bayesnet.png" width="50%"/>
### Specifying an HMM
So all we need to do to specify an HMM are the following components:
* A probability distribution over the intial state $P(S_1)$
* The $K$ by $K$ state transition matrix $P(St|St−1)$, where $K$ is the number of states
* The $K$ by $L$ emission matrix $P(Yt|St)$ if $Y_t$ is discrete and has $L$ values, or the parameters $θ_t$ of some form of continuous probability density function if $Yt$ is continuous.
### Learning the transition and sensor models
In addition to these tasks, we need methods for learning the transition and sensor models from observations. The basic idea is that inference provides an estimate of what transitions actually occurred and what states generated the observations. These estimates can then be used to update the models and the process can be repeated. This is an instance of the expectation-maximization (EM) algorithm. We will talk about learning probabilistic models in Chapter 20 Learning Probabilistic Models.
### Sketch of filtering and prediction (Forward)
We perform recursive estimation. First the current state distribution is projected forward from $t$ to $t + 1$. Then it is updated using the new evidence $e_{t+1}$. We will not cover the details but it can be done by recursive application of Bayes rule and the Markov property of evidence and the sum/product rules.
We can think of the filtered estimate $P(X_t|e_{1:t})$ as a “message” that is propagated forward along the sequence, modified by each transition, and updated by each new observation.
### Sketch of smoothing (Backward)
There are two parts to computing the distribution over past states given evidence up to the present. The first is the evidence up to $k$, and then the evidence from $k + 1$ to $t$. The forward message can be computed as by filtering from $1$ to $k$. Using conditional independence and the sum and product rules we can form a backward message that runs backwards from $t$. It is possible to combine both steps in one pass to smooth the entire sequence. This is, not surprisingly, called the **Foward-Backward** algorithm.
### Finding the most likely sequence
View each sequence of states as a path through a graph whose nodes are the possible states at each time step. The task is to find the most likely path through this graph, where the likelihood of any path is the product of the transition probabilities along the path and the probabilities of the given observations at each state. Because of the **Markov** property there is a recursive relationshtip between the most likely paths to each state $x_{t+1}$ and most likely paths to each state $x_t$. By running forward along the sequence, and computing m messages at each time step we will have the probaiblity for the most likely sequence reaching each of the final states. Then we simply select the most likely one. This is called the **Vitterbi** algorithm.
### Markov Chains and Hidden Markov Models Example
We start with random variables and a simple independent, identically distributed model for weather. Then we look into how to form a Markov Chain to transition between states and finally we sample a Hidden Markov Model to show how the samples are generated based on the Markov Chain of the hidden states. The results are visualized as strips of colored rectangles. Experiments with the transition probabilities and the emission probabilities can lead to better understanding of how Hidden Markov Models work in terms of generating data.
```
%matplotlib inline
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
from hmmlearn import hmm
class Random_Variable:
def __init__(self, name, values, probability_distribution):
self.name = name
self.values = values
self.probability_distribution = probability_distribution
if all(type(item) is np.int64 for item in values):
self.type = 'numeric'
self.rv = stats.rv_discrete(name = name, values = (values, probability_distribution))
elif all(type(item) is str for item in values):
self.type = 'symbolic'
self.rv = stats.rv_discrete(name = name, values = (np.arange(len(values)), probability_distribution))
self.symbolic_values = values
else:
self.type = 'undefined'
def sample(self,size):
if (self.type =='numeric'):
return self.rv.rvs(size=size)
elif (self.type == 'symbolic'):
numeric_samples = self.rv.rvs(size=size)
mapped_samples = [self.values[x] for x in numeric_samples]
return mapped_samples
def probs(self):
return self.probability_distribution
def vals(self):
print(self.type)
return self.values
```
### Generating random weather samples with a IID model with no time dependencies
Let's first create some random samples of a symbolic random variable corresponding to the weather with two values Sunny (S) and cloudy (C) and generate random weather for 365 days. The assumption in this model is that the weather of each day is indepedent of the previous days and drawn from the same probability distribution.
```
values = ['S', 'C']
probabilities = [0.9, 0.1]
weather = Random_Variable('weather', values, probabilities)
samples = weather.sample(365)
print(",".join(samples))
```
Now let lets visualize these samples using yellow for sunny and grey for cloudy
```
state2color = {}
state2color['S'] = 'yellow'
state2color['C'] = 'grey'
def plot_weather_samples(samples, state2color, title):
colors = [state2color[x] for x in samples]
x = np.arange(0, len(colors))
y = np.ones(len(colors))
plt.figure(figsize=(10,1))
plt.bar(x, y, color=colors, width=1)
plt.title(title)
plot_weather_samples(samples, state2color, 'iid')
```
### Markov Chain
Now instead of independently sampling the weather random variable lets form a markov chain. The Markov chain will start at a particular state and then will either stay in the same state or transition to a different state based on a transition probability matrix. To accomplish that we basically create a random variable for each row of the transition matrix that basically corresponds to the probabilities of the transitions emanating fromt the state corresponding to that row. Then we can use the markov chain to generate sequences of samples and contrast these sequence with the iid weather model. By adjusting the transition probabilities you can in a probabilistic way control the different lengths of "stretches" of the same state.
```
def markov_chain(transmat, state, state_names, samples):
(rows, cols) = transmat.shape
rvs = []
values = list(np.arange(0,rows))
# create random variables for each row of transition matrix
for r in range(rows):
rv = Random_Variable("row" + str(r), values, transmat[r])
rvs.append(rv)
# start from initial state and then sample the appropriate
# random variable based on the state following the transitions
states = []
for n in range(samples):
state = rvs[state].sample(1)[0]
states.append(state_names[state])
return states
# transition matrices for the Markov Chain
transmat1 = np.array([[0.7, 0.3],
[0.2, 0.8]])
transmat2 = np.array([[0.9, 0.1],
[0.1, 0.9]])
transmat3 = np.array([[0.5, 0.5],
[0.5, 0.5]])
state2color = {}
state2color['S'] = 'yellow'
state2color['C'] = 'grey'
# plot the iid model too
samples = weather.sample(365)
plot_weather_samples(samples, state2color, 'iid')
samples1 = markov_chain(transmat1,0,['S','C'], 365)
plot_weather_samples(samples1, state2color, 'markov chain 1')
samples2 = markov_chain(transmat2,0,['S','C'],365)
plot_weather_samples(samples2, state2color, 'marov_chain 2')
samples3 = markov_chain(transmat3,0,['S','C'], 365)
plot_weather_samples(samples3, state2color, 'markov_chain 3')
```
### Generating samples using a Hidden Markov Model
Lets now look at how a Hidden Markov Model would work by having a Markov Chain to generate
a sequence of states and for each state having a different emission probability. When sunny we will output red or yellow with higher probabilities and when cloudy black or blue. First we will write the code directly and then we will use the hmmlearn package.
```
state2color = {}
state2color['S'] = 'yellow'
state2color['C'] = 'grey'
# generate random samples for a year
samples = weather.sample(365)
states = markov_chain(transmat1,0,['S','C'], 365)
plot_weather_samples(states, state2color, "markov chain 1")
# create two random variables one of the sunny state and one for the cloudy
sunny_colors = Random_Variable('sunny_colors', ['y', 'r', 'b', 'g'],
[0.6, 0.3, 0.1, 0.0])
cloudy_colors = Random_Variable('cloudy_colors', ['y', 'r', 'b', 'g'],
[0.0, 0.1, 0.4, 0.5])
def emit_obs(state, sunny_colors, cloudy_colors):
if (state == 'S'):
obs = sunny_colors.sample(1)[0]
else:
obs = cloudy_colors.sample(1)[0]
return obs
# iterate over the sequence of states and emit color based on the emission probabilities
obs = [emit_obs(s, sunny_colors, cloudy_colors) for s in states]
obs2color = {}
obs2color['y'] = 'yellow'
obs2color['r'] = 'red'
obs2color['b'] = 'blue'
obs2color['g'] = 'grey'
plot_weather_samples(obs, obs2color, "Observed sky color")
# let's zoom in a month
plot_weather_samples(states[0:30], state2color, 'states for a month')
plot_weather_samples(obs[0:30], obs2color, 'observations for a month')
```
### Multinomial HMM
Lets do the same generation process using the multinomail HMM model supported by the *hmmlearn* python package.
```
transmat = np.array([[0.7, 0.3],
[0.2, 0.8]])
start_prob = np.array([1.0, 0.0])
# yellow and red have high probs for sunny
# blue and grey have high probs for cloudy
emission_probs = np.array([[0.6, 0.3, 0.1, 0.0],
[0.0, 0.1, 0.4, 0.5]])
model = hmm.MultinomialHMM(n_components=2)
model.startprob_ = start_prob
model.transmat_ = transmat
model.emissionprob_ = emission_probs
# sample the model - X is the observed values
# and Z is the "hidden" states
X, Z = model.sample(365)
# we have to re-define state2color and obj2color as the hmm-learn
# package just outputs numbers for the states
state2color = {}
state2color[0] = 'yellow'
state2color[1] = 'grey'
plot_weather_samples(Z, state2color, 'states')
samples = [item for sublist in X for item in sublist]
obj2color = {}
obj2color[0] = 'yellow'
obj2color[1] = 'red'
obj2color[2] = 'blue'
obj2color[3] = 'grey'
plot_weather_samples(samples, obj2color, 'observations')
```
### Estimating the parameters of an HMM
Let's sample the generative HMM and get a sequence of 1000 observations. Now we can learn in an unsupervised way the paraemters of a two component multinomial HMM just using these observations. Then we can compare the learned parameters with the original parameters of the model used to generate the observations. Notice that the order of the components is different between the original and estimated models. Notice that hmmlearn does NOT directly support supervised training where you have both the labels and observations. It is possible to initialize a HMM model with some of the parameters and learn the others. For example you can initialize the transition matrix and learn the emission probabilities. That way you could implement supervised learning for a multinomial HMM. In many practical applications the hidden labels are not available and that's the hard case that is actually implemented in hmmlearn.
The following two cells take a few minutes to compute on a typical laptop.
```
# generate the samples
X, Z = model.sample(10000)
# learn a new model
estimated_model = hmm.MultinomialHMM(n_components=2, n_iter=10000).fit(X)
```
Let's compare the estimated model parameters with the original model.
```
print("Transition matrix")
print("Estimated model:")
print(estimated_model.transmat_)
print("Original model:")
print(model.transmat_)
print("Emission probabilities")
print("Estimated model")
print(estimated_model.emissionprob_)
print("Original model")
print(model.emissionprob_)
```
### Predicting a sequence of states given a sequence of observations
We can also use the trained HMM model to predict a sequence of hidden states given a sequence of observations. This is the task of maximum likelihood sequence estimation. For example in Speech Recognition it would correspond to estimating a sequence of phonemes (hidden states) from a sequence of observations (acoustic vectors).
This cell also takes a few minutes to compute. Note that whether the predicted or flipped predicted states correspond to the original depends on which state is selected as state0 and state1. So sometimes when you run the notebook the predicted states will be the right color some times the flipped states will be the right ones.
```
Z2 = estimated_model.predict(X)
state2color = {}
state2color[0] = 'yellow'
state2color[1] = 'grey'
plot_weather_samples(Z, state2color, 'Original states')
plot_weather_samples(Z2, state2color, 'Predicted states')
# note the reversal of colors for the states as the order of components is not the same.
# we can easily fix this by change the state2color
state2color = {}
state2color[1] = 'yellow'
state2color[0] = 'grey'
plot_weather_samples(Z2, state2color, 'Flipped Predicted states')
```
The estimated model can be sampled just like the original model
```
X, Z = estimated_model.sample(365)
state2color = {}
state2color[0] = 'yellow'
state2color[1] = 'grey'
plot_weather_samples(Z, state2color, 'states generated by estimated model ')
samples = [item for sublist in X for item in sublist]
obs2color = {}
obs2color[0] = 'yellow'
obs2color[1] = 'red'
obs2color[2] = 'blue'
obs2color[3] = 'grey'
plot_weather_samples(samples, obs2color, 'observations generated by estimated model')
```
### An example of filtering
<img src="images/rain_umbrella_hmm.png" width="75%"/>
* Day 0: no observations $P(R_0) = <0.5, 0.5>$
* Day 1: let's say umbrella appears, $U_{1} = true$.
* The prediction step from $t=0$ to $t=1$ is
$P(R_1) = \sum_{r_0} P(R_1 | r_0) P(r_0) = \langle 0.7, 0.3 \rangle \times 0.5 + \langle 0.3, 0.7 \rangle \times 0.5 = \langle 0.5, 0.5\rangle $
* The update step simply multiplies the probability of the evidence for $t=1$ and normalizes:
$P(R_1|u1) = \alpha P(u_{1} | R_{1}) P(R_1) = \alpha \langle 0.9, 0.2 \rangle \times \langle 0.5, 0.5 \rangle = \alpha \langle 0.45, 0.1 \rangle \approx \langle 0.818, 0.182 \rangle $
* Day 2: let's say umbrella appears, $U_{2} = true$.
* Prediction step from $t=1$ to $t=2$ is $P(R_1 | u1) = \alpha P(u_1 | R_1) P(R_1) = \langle 0.7, 0.3 \rangle \times 0.818 + \langle 0.3 0.7 \rangle \times 0.182 \approx \langle 0.627, 0.373 \rangle $
* Updating with evidence for t=2 gives: $P(R_2 | u_1, u_2) = \alpha P(u_2/R_2)P(R2|u_1)= \alpha \langle 0.9, 0.2 \rangle \times \langle 0.627, 0.373 \rangle = \alpha \langle 0.565, 0.0075 \rangle \approx \langle 0.883, 0.117 \rangle $
Intuitively, the probability of rain increases from day 1 to day 2 becaus ethe rain persists.
|
github_jupyter
|
# <span style="color:green"> Numerical Simulation Laboratory (NSL) </span>
## <span style="color:blue"> Numerical exercises 10</span>
### Exercise 10.1
By adapting your Genetic Algorithm code, developed during the Numerical Exercise 9, write a C++ code to solve the TSP with a **Simulated Annealing** (SA) algorithm. Apply your code to the optimization of a path among
- 30 cities randomly placed on a circumference
Show your results via:
- a picture of the length of the best path as a function of the iteration of your algorithm
- a picture of the best path
```
import numpy as np
import matplotlib.pyplot as plt
from os import system
from matplotlib.pyplot import figure
from time import time
start = time()
```
# CIRCUMFERENCE
```
form = 0
system('sh clean.sh')
system('./genetic.exe '+str(form))
results = np.loadtxt("results.txt", skiprows=1)
temps = results[:,0]
paths = results[:,1]
figure(figsize=(10,7), dpi=70)
plt.plot(temps, paths)
plt.axhline(2*np.pi, color='red', label='2π')
plt.xlabel("Temperature")
plt.ylabel("Path lengths")
plt.legend(loc='best')
plt.grid(True)
plt.show()
print('-- Best path reached:', paths[-1])
positions = np.loadtxt("best_conf.txt", skiprows=1)
x = positions[:,0]
y = positions[:,1]
figure(figsize=(10,10), dpi=70)
plt.plot(x, y, marker="o")
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
# SQUARE
```
form = 1
system('sh clean.sh')
system('./genetic.exe '+str(form))
results = np.loadtxt("results.txt", skiprows=1)
temps = results[:,0]
paths = results[:,1]
figure(figsize=(10,7), dpi=70)
plt.plot(temps, paths)
plt.xlabel("Temperature")
plt.ylabel("Path lengths")
plt.grid(True)
plt.show()
print('-- Best path reached:', paths[-1])
positions = np.loadtxt("best_conf.txt", skiprows=1)
x = positions[:,0]
y = positions[:,1]
figure(figsize=(10,10), dpi=70)
plt.plot(x, y, marker="o")
plt.xlabel('x')
plt.ylabel('y')
plt.show()
end = time()
print("-- Time for computation: ", int((end-start)*100.)/100., 'sec')
```
### Exercise 10.2
Parallelize with MPI libraries your Simulated Annealing code in order to solve the TSP by performing a *Random Search* with **parallel SA searches of the optimal path**:
each node should perform an independent SA search and only in the end you will compare the results of each node.
Apply your code to the *usual* TSP problems above.
_I run only on the square, bacause the circumference is too easy and every node gets the same result._
_This part of the exercise has been run separately using MPI. I didn't dare to attempt to use ```os.system``` with it._
```
best_conf = np.zeros((4, 31, 2))
results = np.zeros((4, 1000, 2))
for rank in range(0,4):
init = np.loadtxt("Parallel/best_conf"+str(rank)+".txt", skiprows=1)
init_2 = np.loadtxt("Parallel/results"+str(rank)+".txt", skiprows=1)
best_conf[rank, :, :] = init[:,:]
results[rank,:,:] = init_2[:,:]
for rank in range(0,4):
figure(figsize=(10,5), dpi=100)
plt.subplot(1,2,1)
plt.plot(best_conf[rank,:,0], best_conf[rank, :, 1], marker="o")
plt.title("Path rank: "+str(rank))
plt.xlabel("x")
plt.ylabel("y")
plt.subplot(1,2,2)
plt.plot(results[rank,:,0], results[rank,:,1])
plt.title("Path length to temperature, rank: "+str(rank))
plt.xlabel("Temperature")
plt.ylabel("Path length")
plt.show()
print("BEST: ", results[rank,-1,1], "------------ \n \n")
```
|
github_jupyter
|
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Plot style
sns.set()
%pylab inline
pylab.rcParams['figure.figsize'] = (4, 4)
# Avoid inaccurate floating values (for inverse matrices in dot product for instance)
# See https://stackoverflow.com/questions/24537791/numpy-matrix-inversion-rounding-errors
np.set_printoptions(suppress=True)
def plotVectors(vecs, cols, alpha=1):
"""
Plot set of vectors.
Parameters
----------
vecs : array-like
Coordinates of the vectors to plot. Each vectors is in an array. For
instance: [[1, 3], [2, 2]] can be used to plot 2 vectors.
cols : array-like
Colors of the vectors. For instance: ['red', 'blue'] will display the
first vector in red and the second in blue.
alpha : float
Opacity of vectors
Returns:
fig : instance of matplotlib.figure.Figure
The figure of the vectors
"""
plt.axvline(x=0, color='#A9A9A9', zorder=0)
plt.axhline(y=0, color='#A9A9A9', zorder=0)
for i in range(len(vecs)):
if (isinstance(alpha, list)):
alpha_i = alpha[i]
else:
alpha_i = alpha
x = np.concatenate([[0,0],vecs[i]])
plt.quiver([x[0]],
[x[1]],
[x[2]],
[x[3]],
angles='xy', scale_units='xy', scale=1, color=cols[i],
alpha=alpha_i)
```
$$
\newcommand\bs[1]{\boldsymbol{#1}}
\newcommand\norm[1]{\left\lVert#1\right\rVert}
$$
# Introduction
We will see some major concepts of linear algebra in this chapter. It is also quite heavy so hang on! We will start with getting some ideas on eigenvectors and eigenvalues. We will develop on the idea that a matrix can be seen as a linear transformation and that applying a matrix on its eigenvectors gives new vectors with the same direction. Then we will see how to express quadratic equations into the matrix form. We will see that the eigendecomposition of the matrix corresponding to a quadratic equation can be used to find the minimum and maximum of this function. As a bonus, we will also see how to visualize linear transformations in Python!
# 2.7 Eigendecomposition
The eigendecomposition is one form of matrix decomposition. Decomposing a matrix means that we want to find a product of matrices that is equal to the initial matrix. In the case of the eigendecomposition, we decompose the initial matrix into the product of its eigenvectors and eigenvalues. Before all, let's see what are eigenvectors and eigenvalues.
# Matrices as linear transformations
As we have seen in [2.3](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.3-Identity-and-Inverse-Matrices/) with the example of the identity matrix, you can think of matrices as linear transformations. Some matrices will rotate your space, others will rescale it etc. So when we apply a matrix to a vector, we end up with a transformed version of the vector. When we say that we 'apply' the matrix to the vector it means that we calculate the dot product of the matrix with the vector. We will start with a basic example of this kind of transformation.
### Example 1.
```
A = np.array([[-1, 3], [2, -2]])
A
v = np.array([[2], [1]])
v
```
Let's plot this vector:
```
plotVectors([v.flatten()], cols=['#1190FF'])
plt.ylim(-1, 4)
plt.xlim(-1, 4)
```
Now, we will apply the matrix $\bs{A}$ to this vector and plot the old vector (light blue) and the new one (orange):
```
Av = A.dot(v)
print(Av)
plotVectors([v.flatten(), Av.flatten()], cols=['#1190FF', '#FF9A13'])
plt.ylim(-1, 4)
plt.xlim(-1, 4)
```
We can see that applying the matrix $\bs{A}$ has the effect of modifying the vector.
Now that you can think of matrices as linear transformation recipes, let's see the case of a very special type of vector: the eigenvector.
# Eigenvectors and eigenvalues
We have seen an example of a vector transformed by a matrix. Now imagine that the transformation of the initial vector gives us a new vector that has the exact same direction. The scale can be different but the direction is the same. Applying the matrix didn't change the direction of the vector. This special vector is called an eigenvector of the matrix. We will see that finding the eigenvectors of a matrix can be very useful.
<span class='pquote'>
Imagine that the transformation of the initial vector by the matrix gives a new vector with the exact same direction. This vector is called an eigenvector of $\bs{A}$.
</span>
This means that $\bs{v}$ is a eigenvector of $\bs{A}$ if $\bs{v}$ and $\bs{Av}$ are in the same direction or to rephrase it if the vectors $\bs{Av}$ and $\bs{v}$ are parallel. The output vector is just a scaled version of the input vector. This scalling factor is $\lambda$ which is called the **eigenvalue** of $\bs{A}$.
$$
\bs{Av} = \lambda\bs{v}
$$
### Example 2.
Let's $\bs{A}$ be the following matrix:
$$
\bs{A}=
\begin{bmatrix}
5 & 1\\\\
3 & 3
\end{bmatrix}
$$
We know that one eigenvector of A is:
$$
\bs{v}=
\begin{bmatrix}
1\\\\
1
\end{bmatrix}
$$
We can check that $\bs{Av} = \lambda\bs{v}$:
$$
\begin{bmatrix}
5 & 1\\\\
3 & 3
\end{bmatrix}
\begin{bmatrix}
1\\\\
1
\end{bmatrix}=\begin{bmatrix}
6\\\\
6
\end{bmatrix}
$$
We can see that:
$$
6\times \begin{bmatrix}
1\\\\
1
\end{bmatrix} = \begin{bmatrix}
6\\\\
6
\end{bmatrix}
$$
which means that $\bs{v}$ is well an eigenvector of $\bs{A}$. Also, the corresponding eigenvalue is $\lambda=6$.
We can represent $\bs{v}$ and $\bs{Av}$ to check if their directions are the same:
```
A = np.array([[5, 1], [3, 3]])
A
v = np.array([[1], [1]])
v
Av = A.dot(v)
orange = '#FF9A13'
blue = '#1190FF'
plotVectors([Av.flatten(), v.flatten()], cols=[blue, orange])
plt.ylim(-1, 7)
plt.xlim(-1, 7)
```
We can see that their directions are the same!
Another eigenvector of $\bs{A}$ is
$$
\bs{v}=
\begin{bmatrix}
1\\\\
-3
\end{bmatrix}
$$
because
$$
\begin{bmatrix}
5 & 1\\\\
3 & 3
\end{bmatrix}\begin{bmatrix}
1\\\\
-3
\end{bmatrix} = \begin{bmatrix}
2\\\\
-6
\end{bmatrix}
$$
and
$$
2 \times \begin{bmatrix}
1\\\\
-3
\end{bmatrix} =
\begin{bmatrix}
2\\\\
-6
\end{bmatrix}
$$
So the corresponding eigenvalue is $\lambda=2$.
```
v = np.array([[1], [-3]])
v
Av = A.dot(v)
plotVectors([Av.flatten(), v.flatten()], cols=[blue, orange])
plt.ylim(-7, 1)
plt.xlim(-1, 3)
```
This example shows that the eigenvectors $\bs{v}$ are vectors that change only in scale when we apply the matrix $\bs{A}$ to them. Here the scales were 6 for the first eigenvector and 2 to the second but $\lambda$ can take any real or even complex value.
## Find eigenvalues and eigenvectors in Python
Numpy provides a function returning eigenvectors and eigenvalues (the first array corresponds to the eigenvalues and the second to the eigenvectors concatenated in columns):
```python
(array([ 6., 2.]), array([[ 0.70710678, -0.31622777],
[ 0.70710678, 0.9486833 ]]))
```
Here a demonstration with the preceding example.
```
A = np.array([[5, 1], [3, 3]])
A
np.linalg.eig(A)
```
We can see that the eigenvalues are the same than the ones we used before: 6 and 2 (first array).
The eigenvectors correspond to the columns of the second array. This means that the eigenvector corresponding to $\lambda=6$ is:
$$
\begin{bmatrix}
0.70710678\\\\
0.70710678
\end{bmatrix}
$$
The eigenvector corresponding to $\lambda=2$ is:
$$
\begin{bmatrix}
-0.31622777\\\\
0.9486833
\end{bmatrix}
$$
The eigenvectors look different because they have not necessarly the same scaling than the ones we gave in the example. We can easily see that the first corresponds to a scaled version of our $\begin{bmatrix}
1\\\\
1
\end{bmatrix}$. But the same property stands. We have still $\bs{Av} = \lambda\bs{v}$:
$$
\begin{bmatrix}
5 & 1\\\\
3 & 3
\end{bmatrix}
\begin{bmatrix}
0.70710678\\\\
0.70710678
\end{bmatrix}=
\begin{bmatrix}
4.24264069\\\\
4.24264069
\end{bmatrix}
$$
With $0.70710678 \times 6 = 4.24264069$. So there are an infinite number of eigenvectors corresponding to the eigenvalue $6$. They are equivalent because we are interested by their directions.
For the second eigenvector we can check that it corresponds to a scaled version of $\begin{bmatrix}
1\\\\
-3
\end{bmatrix}$. We can draw these vectors and see if they are parallel.
```
v = np.array([[1], [-3]])
Av = A.dot(v)
v_np = [-0.31622777, 0.9486833]
plotVectors([Av.flatten(), v.flatten(), v_np], cols=[blue, orange, 'blue'])
plt.ylim(-7, 1)
plt.xlim(-1, 3)
```
We can see that the vector found with Numpy (in dark blue) is a scaled version of our preceding $\begin{bmatrix}
1\\\\
-3
\end{bmatrix}$.
## Rescaled vectors
As we saw it with numpy, if $\bs{v}$ is an eigenvector of $\bs{A}$, then any rescaled vector $s\bs{v}$ is also an eigenvector of $\bs{A}$. The eigenvalue of the rescaled vector is the same.
Let's try to rescale
$$
\bs{v}=
\begin{bmatrix}
1\\\\
-3
\end{bmatrix}
$$
from our preceding example.
For instance,
$$
\bs{3v}=
\begin{bmatrix}
3\\\\
-9
\end{bmatrix}
$$
$$
\begin{bmatrix}
5 & 1\\\\
3 & 3
\end{bmatrix}
\begin{bmatrix}
3\\\\
-9
\end{bmatrix} =
\begin{bmatrix}
6\\\\
18
\end{bmatrix} = 2 \times
\begin{bmatrix}
3\\\\
-9
\end{bmatrix}
$$
We have well $\bs{A}\times 3\bs{v} = \lambda\bs{v}$ and the eigenvalue is still $\lambda=2$.
## Concatenating eigenvalues and eigenvectors
Now that we have an idea of what eigenvectors and eigenvalues are we can see how it can be used to decompose a matrix. All eigenvectors of a matrix $\bs{A}$ can be concatenated in a matrix with each column corresponding to each eigenvector (like in the second array return by `np.linalg.eig(A)`):
$$
\bs{V}=
\begin{bmatrix}
1 & 1\\\\
1 & -3
\end{bmatrix}
$$
The first column $
\begin{bmatrix}
1\\\\
1
\end{bmatrix}
$ corresponds to $\lambda=6$ and the second $
\begin{bmatrix}
1\\\\
-3
\end{bmatrix}
$ to $\lambda=2$.
The vector $\bs{\lambda}$ can be created from all eigenvalues:
$$
\bs{\lambda}=
\begin{bmatrix}
6\\\\
2
\end{bmatrix}
$$
Then the eigendecomposition is given by
$$
\bs{A}=\bs{V}\cdot diag(\bs{\lambda}) \cdot \bs{V}^{-1}
$$
<span class='pquote'>
We can decompose the matrix $\bs{A}$ with eigenvectors and eigenvalues. It is done with: $\bs{A}=\bs{V}\cdot diag(\bs{\lambda}) \cdot \bs{V}^{-1}$
</span>
$diag(\bs{v})$ is a diagonal matrix (see [2.6](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.6-Special-Kinds-of-Matrices-and-Vectors/)) containing all the eigenvalues. Continuing with our example we have
$$
\bs{V}=\begin{bmatrix}
1 & 1\\\\
1 & -3
\end{bmatrix}
$$
The diagonal matrix is all zeros except the diagonal that is our vector $\bs{\lambda}$.
$$
diag(\bs{v})=
\begin{bmatrix}
6 & 0\\\\
0 & 2
\end{bmatrix}
$$
The inverse matrix of $\bs{V}$ can be calculated with numpy:
```
V = np.array([[1, 1], [1, -3]])
V
V_inv = np.linalg.inv(V)
V_inv
```
So let's plug
$$
\bs{V}^{-1}=\begin{bmatrix}
0.75 & 0.25\\\\
0.25 & -0.25
\end{bmatrix}
$$
into our equation:
$$
\begin{align*}
&\bs{V}\cdot diag(\bs{\lambda}) \cdot \bs{V}^{-1}\\\\
&=
\begin{bmatrix}
1 & 1\\\\
1 & -3
\end{bmatrix}
\begin{bmatrix}
6 & 0\\\\
0 & 2
\end{bmatrix}
\begin{bmatrix}
0.75 & 0.25\\\\
0.25 & -0.25
\end{bmatrix}
\end{align*}
$$
If we do the dot product of the first two matrices we have:
$$
\begin{bmatrix}
1 & 1\\\\
1 & -3
\end{bmatrix}
\begin{bmatrix}
6 & 0\\\\
0 & 2
\end{bmatrix} =
\begin{bmatrix}
6 & 2\\\\
6 & -6
\end{bmatrix}
$$
So with replacing into the equation:
$$
\begin{align*}
&\begin{bmatrix}
6 & 2\\\\
6 & -6
\end{bmatrix}
\begin{bmatrix}
0.75 & 0.25\\\\
0.25 & -0.25
\end{bmatrix}\\\\
&=
\begin{bmatrix}
6\times0.75 + (2\times0.25) & 6\times0.25 + (2\times-0.25)\\\\
6\times0.75 + (-6\times0.25) & 6\times0.25 + (-6\times-0.25)
\end{bmatrix}\\\\
&=
\begin{bmatrix}
5 & 1\\\\
3 & 3
\end{bmatrix}=
\bs{A}
\end{align*}
$$
Let's check our result with Python:
```
lambdas = np.diag([6,2])
lambdas
V.dot(lambdas).dot(V_inv)
```
That confirms our previous calculation.
## Real symmetric matrix
In the case of real symmetric matrices (more details about symmetric matrices in [2.6](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.6-Special-Kinds-of-Matrices-and-Vectors/)), the eigendecomposition can be expressed as
$$
\bs{A} = \bs{Q}\Lambda \bs{Q}^\text{T}
$$
where $\bs{Q}$ is the matrix with eigenvectors as columns and $\Lambda$ is $diag(\lambda)$.
### Example 3.
$$
\bs{A}=\begin{bmatrix}
6 & 2\\\\
2 & 3
\end{bmatrix}
$$
This matrix is symmetric because $\bs{A}=\bs{A}^\text{T}$. Its eigenvectors are:
$$
\bs{Q}=
\begin{bmatrix}
0.89442719 & -0.4472136\\\\
0.4472136 & 0.89442719
\end{bmatrix}
$$
and its eigenvalues put in a diagonal matrix gives:
$$
\bs{\Lambda}=
\begin{bmatrix}
7 & 0\\\\
0 & 2
\end{bmatrix}
$$
So let's begin to calculate $\bs{Q\Lambda}$:
$$
\begin{align*}
\bs{Q\Lambda}&=
\begin{bmatrix}
0.89442719 & -0.4472136\\\\
0.4472136 & 0.89442719
\end{bmatrix}
\begin{bmatrix}
7 & 0\\\\
0 & 2
\end{bmatrix}\\\\
&=
\begin{bmatrix}
0.89442719 \times 7 & -0.4472136\times 2\\\\
0.4472136 \times 7 & 0.89442719\times 2
\end{bmatrix}\\\\
&=
\begin{bmatrix}
6.26099033 & -0.8944272\\\\
3.1304952 & 1.78885438
\end{bmatrix}
\end{align*}
$$
with:
$$
\bs{Q}^\text{T}=
\begin{bmatrix}
0.89442719 & 0.4472136\\\\
-0.4472136 & 0.89442719
\end{bmatrix}
$$
So we have:
$$
\begin{align*}
\bs{Q\Lambda} \bs{Q}^\text{T}&=
\begin{bmatrix}
6.26099033 & -0.8944272\\\\
3.1304952 & 1.78885438
\end{bmatrix}
\begin{bmatrix}
0.89442719 & 0.4472136\\\\
-0.4472136 & 0.89442719
\end{bmatrix}\\\\
&=
\begin{bmatrix}
6 & 2\\\\
2 & 3
\end{bmatrix}
\end{align*}
$$
It works! For that reason, it can useful to use symmetric matrices! Let's do the same things easily with `linalg` from numpy:
```
A = np.array([[6, 2], [2, 3]])
A
eigVals, eigVecs = np.linalg.eig(A)
eigVecs
eigVals = np.diag(eigVals)
eigVals
eigVecs.dot(eigVals).dot(eigVecs.T)
```
We can see that the result corresponds to our initial matrix.
# Quadratic form to matrix form
Eigendecomposition can be used to optimize quadratic functions. We will see that when $\bs{x}$ takes the values of an eigenvector, $f(\bs{x})$ takes the value of its corresponding eigenvalue.
<span class='pquote'>
When $\bs{x}$ takes the values of an eigenvector, $f(\bs{x})$ takes the value of its corresponding eigenvalue.
</span>
We will see in the following points how we can show that with different methods.
Let's have the following quadratic equation:
$$
f(\bs{x}) = ax_1^2 +(b+c)x_1x_2 + dx_2^2
$$
These quadratic forms can be generated by matrices:
$$
f(\bs{x})= \begin{bmatrix}
x_1 & x_2
\end{bmatrix}\begin{bmatrix}
a & b\\\\
c & d
\end{bmatrix}\begin{bmatrix}
x_1\\\\
x_2
\end{bmatrix} = \bs{x^\text{T}Ax}
$$
with:
$$
\bs{x} = \begin{bmatrix}
x_1\\\\
x_2
\end{bmatrix}
$$
and
$$
\bs{A}=\begin{bmatrix}
a & b\\\\
c & d
\end{bmatrix}
$$
We call them matrix forms. This form is useful to do various things on the quadratic equation like constrained optimization (see bellow).
<span class='pquote'>
Quadratic equations can be expressed under the matrix form
</span>
If you look at the relation between these forms you can see that $a$ gives you the number of $x_1^2$, $(b + c)$ the number of $x_1x_2$ and $d$ the number of $x_2^2$. This means that the same quadratic form can be obtained from infinite number of matrices $\bs{A}$ by changing $b$ and $c$ while preserving their sum.
### Example 4.
$$
\bs{x} = \begin{bmatrix}
x_1\\\\
x_2
\end{bmatrix}
$$
and
$$
\bs{A}=\begin{bmatrix}
2 & 4\\\\
2 & 5
\end{bmatrix}
$$
gives the following quadratic form:
$$
2x_1^2 + (4+2)x_1x_2 + 5x_2^2\\\\=2x_1^2 + 6x_1x_2 + 5x_2^2
$$
but if:
$$
\bs{A}=\begin{bmatrix}
2 & -3\\\\
9 & 5
\end{bmatrix}
$$
we still have the quadratic same form:
$$
2x_1^2 + (-3+9)x_1x_2 + 5x_2^2\\\\=2x_1^2 + 6x_1x_2 + 5x_2^2
$$
### Example 5
For this example, we will go from the matrix form to the quadratic form using a symmetric matrix $\bs{A}$. Let's use the matrix of the example 3.
$$
\bs{x} = \begin{bmatrix}
x_1\\\\
x_2
\end{bmatrix}
$$
and
$$\bs{A}=\begin{bmatrix}
6 & 2\\\\
2 & 3
\end{bmatrix}
$$
$$
\begin{align*}
\bs{x^\text{T}Ax}&=
\begin{bmatrix}
x_1 & x_2
\end{bmatrix}
\begin{bmatrix}
6 & 2\\\\
2 & 3
\end{bmatrix}
\begin{bmatrix}
x_1\\\\
x_2
\end{bmatrix}\\\\
&=
\begin{bmatrix}
x_1 & x_2
\end{bmatrix}
\begin{bmatrix}
6 x_1 + 2 x_2\\\\
2 x_1 + 3 x_2
\end{bmatrix}\\\\
&=
x_1(6 x_1 + 2 x_2) + x_2(2 x_1 + 3 x_2)\\\\
&=
6 x_1^2 + 4 x_1x_2 + 3 x_2^2
\end{align*}
$$
Our quadratic equation is thus $6 x_1^2 + 4 x_1x_2 + 3 x_2^2$.
### Note
If $\bs{A}$ is a diagonal matrix (all 0 except the diagonal), the quadratic form of $\bs{x^\text{T}Ax}$ will have no cross term. Take the following matrix form:
$$
\bs{A}=\begin{bmatrix}
a & b\\\\
c & d
\end{bmatrix}
$$
If $\bs{A}$ is diagonal, then $b$ and $c$ are 0 and since $f(\bs{x}) = ax_1^2 +(b+c)x_1x_2 + dx_2^2$ there is no cross term. A quadratic form without cross term is called diagonal form since it comes from a diagonal matrix.
# Change of variable
A change of variable (or linear substitution) simply means that we replace a variable by another one. We will see that it can be used to remove the cross terms in our quadratic equation. Without the cross term, it will then be easier to characterize the function and eventually optimize it (i.e finding its maximum or minimum).
## With the quadratic form
### Example 6.
Let's take again our previous quadratic form:
$$
\bs{x^\text{T}Ax} = 6 x_1^2 + 4 x_1x_2 + 3 x_2^2
$$
The change of variable will concern $x_1$ and $x_2$. We can replace $x_1$ with any combination of $y_1$ and $y_2$ and $x_2$ with any combination $y_1$ and $y_2$. We will of course end up with a new equation. The nice thing is that we can find a specific substitution that will lead to a simplification of our statement. Specifically, it can be used to get rid of the cross term (in our example: $4 x_1x_2$). We will see later why it is interesting.
Actually, the right substitution is given by the eigenvectors of the matrix used to generate the quadratic form. Let's recall that the matrix form of our equation is:
$$
\bs{x} = \begin{bmatrix}
x_1\\\\
x_2
\end{bmatrix}
$$
and
$$\bs{A}=\begin{bmatrix}
6 & 2\\\\
2 & 3
\end{bmatrix}
$$
and that the eigenvectors of $\bs{A}$ are:
$$
\begin{bmatrix}
0.89442719 & -0.4472136\\\\
0.4472136 & 0.89442719
\end{bmatrix}
$$
With the purpose of simplification, we can replace these values with:
$$
\begin{bmatrix}
\frac{2}{\sqrt{5}} & -\frac{1}{\sqrt{5}}\\\\
\frac{1}{\sqrt{5}} & \frac{2}{\sqrt{5}}
\end{bmatrix} =
\frac{1}{\sqrt{5}}
\begin{bmatrix}
2 & -1\\\\
1 & 2
\end{bmatrix}
$$
So our first eigenvector is:
$$
\frac{1}{\sqrt{5}}
\begin{bmatrix}
2\\\\
1
\end{bmatrix}
$$
and our second eigenvector is:
$$
\frac{1}{\sqrt{5}}
\begin{bmatrix}
-1\\\\
2
\end{bmatrix}
$$
The change of variable will lead to:
$$
\begin{bmatrix}
x_1\\\\
x_2
\end{bmatrix} =
\frac{1}{\sqrt{5}}
\begin{bmatrix}
2 & -1\\\\
1 & 2
\end{bmatrix}
\begin{bmatrix}
y_1\\\\
y_2
\end{bmatrix} =
\frac{1}{\sqrt{5}}
\begin{bmatrix}
2y_1 - y_2\\\\
y_1 + 2y_2
\end{bmatrix}
$$
so we have
$$
\begin{cases}
x_1 = \frac{1}{\sqrt{5}}(2y_1 - y_2)\\\\
x_2 = \frac{1}{\sqrt{5}}(y_1 + 2y_2)
\end{cases}
$$
So far so good! Let's replace that in our example:
$$
\begin{align*}
\bs{x^\text{T}Ax}
&=
6 x_1^2 + 4 x_1x_2 + 3 x_2^2\\\\
&=
6 [\frac{1}{\sqrt{5}}(2y_1 - y_2)]^2 + 4 [\frac{1}{\sqrt{5}}(2y_1 - y_2)\frac{1}{\sqrt{5}}(y_1 + 2y_2)] + 3 [\frac{1}{\sqrt{5}}(y_1 + 2y_2)]^2\\\\
&=
\frac{1}{5}[6 (2y_1 - y_2)^2 + 4 (2y_1 - y_2)(y_1 + 2y_2) + 3 (y_1 + 2y_2)^2]\\\\
&=
\frac{1}{5}[6 (4y_1^2 - 4y_1y_2 + y_2^2) + 4 (2y_1^2 + 4y_1y_2 - y_1y_2 - 2y_2^2) + 3 (y_1^2 + 4y_1y_2 + 4y_2^2)]\\\\
&=
\frac{1}{5}(24y_1^2 - 24y_1y_2 + 6y_2^2 + 8y_1^2 + 16y_1y_2 - 4y_1y_2 - 8y_2^2 + 3y_1^2 + 12y_1y_2 + 12y_2^2)\\\\
&=
\frac{1}{5}(35y_1^2 + 10y_2^2)\\\\
&=
7y_1^2 + 2y_2^2
\end{align*}
$$
That's great! Our new equation doesn't have any cross terms!
## With the Principal Axes Theorem
Actually there is a simpler way to do the change of variable. We can stay in the matrix form. Recall that we start with the form:
<div>
$$
f(\bs{x})=\bs{x^\text{T}Ax}
$$
</div>
The linear substitution can be wrote in these terms. We want replace the variables $\bs{x}$ by $\bs{y}$ that relates by:
<div>
$$
\bs{x}=P\bs{y}
$$
</div>
We want to find $P$ such as our new equation (after the change of variable) doesn't contain the cross terms. The first step is to replace that in the first equation:
<div>
$$
\begin{align*}
\bs{x^\text{T}Ax}
&=
(\bs{Py})^\text{T}\bs{A}(\bs{Py})\\\\
&=
\bs{y}^\text{T}(\bs{P}^\text{T}\bs{AP})\bs{y}
\end{align*}
$$
</div>
Can you see the how to transform the left hand side ($\bs{x}$) into the right hand side ($\bs{y}$)? The substitution is done by replacing $\bs{A}$ with $\bs{P^\text{T}AP}$. We also know that $\bs{A}$ is symmetric and thus that there is a diagonal matrix $\bs{D}$ containing the eigenvectors of $\bs{A}$ and such as $\bs{D}=\bs{P}^\text{T}\bs{AP}$. We thus end up with:
<div>
$$
\bs{x^\text{T}Ax}=\bs{y^\text{T}\bs{D} y}
$$
</div>
<span class='pquote'>
We can use $\bs{D}$ to simplify our quadratic equation and remove the cross terms
</span>
All of this implies that we can use $\bs{D}$ to simplify our quadratic equation and remove the cross terms. If you remember from example 2 we know that the eigenvalues of $\bs{A}$ are:
<div>
$$
\bs{D}=
\begin{bmatrix}
7 & 0\\\\
0 & 2
\end{bmatrix}
$$
</div>
<div>
$$
\begin{align*}
\bs{x^\text{T}Ax}
&=
\bs{y^\text{T}\bs{D} y}\\\\
&=
\bs{y}^\text{T}
\begin{bmatrix}
7 & 0\\\\
0 & 2
\end{bmatrix}
\bs{y}\\\\
&=
\begin{bmatrix}
y_1 & y_2
\end{bmatrix}
\begin{bmatrix}
7 & 0\\\\
0 & 2
\end{bmatrix}
\begin{bmatrix}
y_1\\\\
y_2
\end{bmatrix}\\\\
&=
\begin{bmatrix}
7y_1 +0y_2 & 0y_1 + 2y_2
\end{bmatrix}
\begin{bmatrix}
y_1\\\\
y_2
\end{bmatrix}\\\\
&=
7y_1^2 + 2y_2^2
\end{align*}
$$
</div>
That's nice! If you look back to the change of variable that we have done in the quadratic form, you will see that we have found the same values!
This form (without cross-term) is called the **principal axes form**.
### Summary
To summarise, the principal axes form can be found with
$$
\bs{x^\text{T}Ax} = \lambda_1y_1^2 + \lambda_2y_2^2
$$
where $\lambda_1$ is the eigenvalue corresponding to the first eigenvector and $\lambda_2$ the eigenvalue corresponding to the second eigenvector (second column of $\bs{x}$).
# Finding f(x) with eigendecomposition
We will see that there is a way to find $f(\bs{x})$ with eigenvectors and eigenvalues when $\bs{x}$ is a unit vector.
Let's start from:
$$
f(\bs{x}) =\bs{x^\text{T}Ax}
$$
We know that if $\bs{x}$ is an eigenvector of $\bs{A}$ and $\lambda$ the corresponding eigenvalue, then $
\bs{Ax}=\lambda \bs{x}
$. By replacing the term in the last equation we have:
$$
f(\bs{x}) =\bs{x^\text{T}\lambda x} = \bs{x^\text{T}x}\lambda
$$
Since $\bs{x}$ is a unit vector, $\norm{\bs{x}}_2=1$ and $\bs{x^\text{T}x}=1$ (cf. [2.5](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.5-Norms/) Norms). We end up with
$$
f(\bs{x}) = \lambda
$$
This is a usefull property. If $\bs{x}$ is an eigenvector of $\bs{A}$, $
f(\bs{x}) =\bs{x^\text{T}Ax}$ will take the value of the corresponding eigenvalue. We can see that this is working only if the euclidean norm of $\bs{x}$ is 1 (i.e $\bs{x}$ is a unit vector).
### Example 7
This example will show that $f(\bs{x}) = \lambda$. Let's take again the last example, the eigenvectors of $\bs{A}$ were
$$
\bs{Q}=
\begin{bmatrix}
0.89442719 & -0.4472136\\\\
0.4472136 & 0.89442719
\end{bmatrix}
$$
and the eigenvalues
$$
\bs{\Lambda}=
\begin{bmatrix}
7 & 0\\\\
0 & 2
\end{bmatrix}
$$
So if:
$$
\bs{x}=\begin{bmatrix}
0.89442719 & 0.4472136
\end{bmatrix}
$$
$f(\bs{x})$ should be equal to 7. Let's check that's true.
$$
\begin{align*}
f(\bs{x}) &= 6 x_1^2 + 4 x_1x_2 + 3 x_2^2\\\\
&= 6\times 0.89442719^2 + 4\times 0.89442719\times 0.4472136 + 3 \times 0.4472136^2\\\\
&= 7
\end{align*}
$$
In the same way, if $\bs{x}=\begin{bmatrix}
-0.4472136 & 0.89442719
\end{bmatrix}$, $f(\bs{x})$ should be equal to 2.
$$
\begin{align*}
f(\bs{x}) &= 6 x_1^2 + 4 x_1x_2 + 3 x_2^2\\\\
&= 6\times -0.4472136^2 + 4\times -0.4472136\times 0.89442719 + 3 \times 0.89442719^2\\\\
&= 2
\end{align*}
$$
# Quadratic form optimization
Depending to the context, optimizing a function means finding its maximum or its minimum. It is for instance widely used to minimize the error of cost functions in machine learning.
Here we will see how eigendecomposition can be used to optimize quadratic functions and why this can be done easily without cross terms. The difficulty is that we want a constrained optimization, that is to find the minimum or the maximum of the function for $f(\bs{x})$ being a unit vector.
### Example 7.
We want to optimize:
$$
f(\bs{x}) =\bs{x^\text{T}Ax} \textrm{ subject to }||\bs{x}||_2= 1
$$
In our last example we ended up with:
$$
f(\bs{x}) = 7y_1^2 + 2y_2^2
$$
And the constraint of $\bs{x}$ being a unit vector imply:
$$
||\bs{x}||_2 = 1 \Leftrightarrow x_1^2 + x_2^2 = 1
$$
We can also show that $\bs{y}$ has to be a unit vector if it is the case for $\bs{x}$. Recall first that $\bs{x}=\bs{Py}$:
$$
\begin{align*}
||\bs{x}||^2 &= \bs{x^\text{T}x}\\\\
&= (\bs{Py})^\text{T}(\bs{Py})\\\\
&= \bs{P^\text{T}y^\text{T}Py}\\\\
&= \bs{PP^\text{T}y^\text{T}y}\\\\
&= \bs{y^\text{T}y} = ||\bs{y}||^2
\end{align*}
$$
So $\norm{\bs{x}}^2 = \norm{\bs{y}}^2 = 1$ and thus $y_1^2 + y_2^2 = 1$
Since $y_1^2$ and $y_2^2$ cannot be negative because they are squared values, we can be sure that $2y_2^2\leq7y_2^2$. Hence:
$$
\begin{align*}
f(\bs{x}) &= 7y_1^2 + 2y_2^2\\\\
&\leq
7y_1^2 + 7y_2^2\\\\
&=
7(y_1^2+y_2^2)\\\\
&=
7
\end{align*}
$$
This means that the maximum value of $f(\bs{x})$ is 7.
The same way can lead to find the minimum of $f(\bs{x})$. $7y_1^2\geq2y_1^2$ and:
$$
\begin{align*}
f(\bs{x}) &= 7y_1^2 + 2y_2^2\\\\
&\geq
2y_1^2 + 2y_2^2\\\\
&=
2(y_1^2+y_2^2)\\\\
&=
2
\end{align*}
$$
And the minimum of $f(\bs{x})$ is 2.
### Summary
We can note that the minimum of $f(\bs{x})$ is the minimum eigenvalue of the corresponding matrix $\bs{A}$. Another useful fact is that this value is obtained when $\bs{x}$ takes the value of the corresponding eigenvector (check back the preceding paragraph). In that way, $f(\bs{x})=7$ when $\bs{x}=\begin{bmatrix}0.89442719 & 0.4472136\end{bmatrix}$. This shows how useful are the eigenvalues and eigenvector in this kind of constrained optimization.
## Graphical views
We saw that the quadratic functions $f(\bs{x}) = ax_1^2 +2bx_1x_2 + cx_2^2$ can be represented by the symmetric matrix $\bs{A}$:
$$
\bs{A}=\begin{bmatrix}
a & b\\\\
b & c
\end{bmatrix}
$$
Graphically, these functions can take one of three general shapes (click on the links to go to the Surface Plotter and move the shapes):
1.[Positive-definite form](https://academo.org/demos/3d-surface-plotter/?expression=x*x%2By*y&xRange=-50%2C+50&yRange=-50%2C+50&resolution=49) | 2.[Negative-definite form](https://academo.org/demos/3d-surface-plotter/?expression=-x*x-y*y&xRange=-50%2C+50&yRange=-50%2C+50&resolution=25) | 3.[Indefinite form](https://academo.org/demos/3d-surface-plotter/?expression=x*x-y*y&xRange=-50%2C+50&yRange=-50%2C+50&resolution=49)
:-------------------------:|:-------------------------:|:-------:
<img src="images/quadratic-functions-positive-definite-form.png" alt="Quadratic function with a positive definite form" title="Quadratic function with a positive definite form"> | <img src="images/quadratic-functions-negative-definite-form.png" alt="Quadratic function with a negative definite form" title="Quadratic function with a negative definite form"> | <img src="images/quadratic-functions-indefinite-form.png" alt="Quadratic function with a indefinite form" title="Quadratic function with a indefinite form">
With the constraints that $\bs{x}$ is a unit vector, the minimum of the function $f(\bs{x})$ corresponds to the smallest eigenvalue and is obtained with its corresponding eigenvector. The maximum corresponds to the biggest eigenvalue and is obtained with its corresponding eigenvector.
# Conclusion
We have seen a lot of things in this chapter. We saw that linear algebra can be used to solve a variety of mathematical problems and more specifically that eigendecomposition is a powerful tool! However, it cannot be used for non square matrices. In the next chapter, we will see the Singular Value Decomposition (SVD) which is another way of decomposing matrices. The advantage of the SVD is that you can use it also with non-square matrices.
# BONUS: visualizing linear transformations
We can see the effect of eigenvectors and eigenvalues in linear transformation. We will see first how linear transformation works. Linear transformation is a mapping between an input vector and an output vector. Different operations like projection or rotation are linear transformations. Every linear transformations can be though as applying a matrix on the input vector. We will see the meaning of this graphically. For that purpose, let's start by drawing the set of unit vectors (they are all vectors with a norm of 1).
```
t = np.linspace(0, 2*np.pi, 100)
x = np.cos(t)
y = np.sin(t)
plt.figure()
plt.plot(x, y)
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.show()
```
Then, we will transform each of these points by applying a matrix $\bs{A}$. This is the goal of the function bellow that takes a matrix as input and will draw
- the origin set of unit vectors
- the transformed set of unit vectors
- the eigenvectors
- the eigenvectors scalled by their eigenvalues
```
def linearTransformation(transformMatrix):
orange = '#FF9A13'
blue = '#1190FF'
# Create original set of unit vectors
t = np.linspace(0, 2*np.pi, 100)
x = np.cos(t)
y = np.sin(t)
# Calculate eigenvectors and eigenvalues
eigVecs = np.linalg.eig(transformMatrix)[1]
eigVals = np.diag(np.linalg.eig(transformMatrix)[0])
# Create vectors of 0 to store new transformed values
newX = np.zeros(len(x))
newY = np.zeros(len(x))
for i in range(len(x)):
unitVector_i = np.array([x[i], y[i]])
# Apply the matrix to the vector
newXY = transformMatrix.dot(unitVector_i)
newX[i] = newXY[0]
newY[i] = newXY[1]
plotVectors([eigVecs[:,0], eigVecs[:,1]],
cols=[blue, blue])
plt.plot(x, y)
plotVectors([eigVals[0,0]*eigVecs[:,0], eigVals[1,1]*eigVecs[:,1]],
cols=[orange, orange])
plt.plot(newX, newY)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.show()
A = np.array([[1,-1], [-1, 4]])
linearTransformation(A)
```
We can see the unit circle in dark blue, the non scaled eigenvectors in light blue, the transformed unit circle in green and the scaled eigenvectors in yellow.
It is worth noting that the eigenvectors are orthogonal here because the matrix is symmetric. Let's try with a non-symmetric matrix:
```
A = np.array([[1,1], [-1, 4]])
linearTransformation(A)
```
In this case, the eigenvectors are not orthogonal!
# References
## Videos of Gilbert Strang
- [Gilbert Strang, Lec21 MIT - Eigenvalues and eigenvectors](https://www.youtube.com/watch?v=lXNXrLcoerU)
- [Gilbert Strang, Lec 21 MIT, Spring 2005](https://www.youtube.com/watch?v=lXNXrLcoerU)
## Quadratic forms
- [David Lay, University of Colorado, Denver](http://math.ucdenver.edu/~esulliva/LinearAlgebra/SlideShows/07_02.pdf)
- [math.stackexchange QA](https://math.stackexchange.com/questions/2207111/eigendecomposition-optimization-of-quadratic-expressions)
## Eigenvectors
- [Victor Powell and Lewis Lehe - Interactive representation of eigenvectors](http://setosa.io/ev/eigenvectors-and-eigenvalues/)
## Linear transformations
- [Gilbert Strang - Linear transformation](http://ia802205.us.archive.org/18/items/MIT18.06S05_MP4/30.mp4)
- [Linear transformation - demo video](https://www.youtube.com/watch?v=wXCRcnbCsJA)
|
github_jupyter
|
<a href="https://colab.research.google.com/github/constantinpape/dl-teaching-resources/blob/main/exercises/classification/5_data_augmentation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Data Augmentation on CIFAR10
In this exercise we will use data augmentation to increase the available training data and thus improve the network training performance. We will use the same network architecture as in the previous exercise.
## Preparation
```
# load tensorboard extension
%load_ext tensorboard
# import torch and other libraries
import os
import numpy as np
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import Adam
!pip install cifar2png
# check if we have gpu support
# colab offers free gpus, however they are not activated by default.
# to activate the gpu, go to 'Runtime->Change runtime type'.
# Then select 'GPU' in 'Hardware accelerator' and click 'Save'
have_gpu = torch.cuda.is_available()
# we need to define the device for torch, yadda yadda
if have_gpu:
print("GPU is available")
device = torch.device('cuda')
else:
print("GPU is not available, training will run on the CPU")
device = torch.device('cpu')
# run this in google colab to get the utils.py file
!wget https://raw.githubusercontent.com/constantinpape/training-deep-learning-models-for-vison/master/day1/utils.py
# we will reuse the training function, validation function and
# data preparation from the previous notebook
import utils
cifar_dir = './cifar10'
!cifar2png cifar10 cifar10
categories = os.listdir('./cifar10/train')
categories.sort()
images, labels = utils.load_cifar(os.path.join(cifar_dir, 'train'))
(train_images, train_labels,
val_images, val_labels) = utils.make_cifar_train_val_split(images, labels)
```
## Data Augmentation
The goal of data augmentation is to increase the amount of training data by transforming the input images in a way that they still resemble realistic images. Popular transformations used in data augmentation include rotations, image flips, color jitter or additive noise.
Here, we will start with two transformations:
- random flips along the vertical centerline
- random color jitters
```
# define random augmentations
import skimage.color as color
def random_flip(image, target, probability=.5):
""" Randomly mirror the image across the vertical axis.
"""
if np.random.rand() < probability:
image = np.array([np.fliplr(im) for im in image])
return image, target
def random_color_jitter(image, target, probability=.5):
""" Randomly jitter the saturation, hue and brightness of the image.
"""
if np.random.rand() > probability:
# skimage expects WHC instead of CHW
image = image.transpose((1, 2, 0))
# transform image to hsv color space to apply jitter
image = color.rgb2hsv(image)
# compute jitter factors in range 0.66 - 1.5
jitter_factors = 1.5 * np.random.rand(3)
jitter_factors = np.clip(jitter_factors, 0.66, 1.5)
# apply the jitter factors, making sure we stay in correct value range
image *= jitter_factors
image = np.clip(image, 0, 1)
# transform back to rgb and CHW
image = color.hsv2rgb(image)
image = image.transpose((2, 0, 1))
return image, target
# create training dataset with augmentations
from functools import partial
train_trafos = [
utils.to_channel_first,
utils.normalize,
random_color_jitter,
random_flip,
utils.to_tensor
]
train_trafos = partial(utils.compose, transforms=train_trafos)
train_dataset = utils.DatasetWithTransform(train_images, train_labels,
transform=train_trafos)
# we don't use data augmentations for the validation set
val_dataset = utils.DatasetWithTransform(val_images, val_labels,
transform=utils.get_default_cifar_transform())
# sample augmentations
def show_image(ax, image):
# need to go back to numpy array and WHC axis order
image = image.numpy().transpose((1, 2, 0))
ax.imshow(image)
n_samples = 8
image_id = 0
fig, ax = plt.subplots(1, n_samples, figsize=(18, 4))
for sample in range(n_samples):
image, _ = train_dataset[0]
show_image(ax[sample], image)
# we reuse the model from the previous exercise
# if you want you can also use a different CNN architecture that
# you have designed in the tasks part of that exercise
model = utils.SimpleCNN(10)
model = model.to(device)
# instantiate loaders and optimizer and start tensorboard
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=25)
optimizer = Adam(model.parameters(), lr=1.e-3)
%tensorboard --logdir runs
# we have moved all the boilerplate for the full training procedure to utils now
n_epochs = 10
utils.run_cifar_training(model, optimizer,
train_loader, val_loader,
device=device, name='da1',
n_epochs=n_epochs)
# evaluate the model on test data
test_dataset = utils.make_cifar_test_dataset(cifar_dir)
test_loader = DataLoader(test_dataset, batch_size=25)
predictions, labels = utils.validate(model, test_loader, nn.NLLLoss(),
device, step=0, tb_logger=None)
print("Test accuracy:")
accuracy = metrics.accuracy_score(labels, predictions)
print(accuracy)
fig, ax = plt.subplots(1, figsize=(8, 8))
utils.make_confusion_matrix(labels, predictions, categories, ax)
```
## Normalization layers
In addition to convolutional layers and pooling layers, another important part of neural networks are normalization layers.
These layers keep their input normalized using a learned normalization. The first type of normalization introduced has been [BatchNorm](https://arxiv.org/abs/1502.03167), which we will now add to the CNN architecture from the previous exercise.
```
import torch.nn.functional as F
class CNNBatchNorm(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.n_classes = n_classes
# the convolutions
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=3)
# the pooling layer
self.pool = nn.MaxPool2d(2, 2)
# the normalization layers
self.bn1 = nn.BatchNorm2d(12)
self.bn2 = nn.BatchNorm2d(24)
# the fully connected part of the network
# after applying the convolutions and poolings, the tensor
# has the shape 24 x 6 x 6, see below
self.fc = nn.Sequential(
nn.Linear(24 * 6 * 6, 120),
nn.ReLU(),
nn.Linear(120, 60),
nn.ReLU(),
nn.Linear(60, self.n_classes)
)
self.activation = nn.LogSoftmax(dim=1)
def apply_convs(self, x):
# input image has shape 3 x 32 x 32
x = self.pool(F.relu(self.bn1(self.conv1(x))))
# shape after conv: 12 x 28 x 28
# shape after pooling: 12 x 14 X 14
x = self.pool(F.relu(self.bn2(self.conv2(x))))
# shape after conv: 24 x 12 x 12
# shape after pooling: 24 x 6 x 6
return x
def forward(self, x):
x = self.apply_convs(x)
x = x.view(-1, 24 * 6 * 6)
x = self.fc(x)
x = self.activation(x)
return x
# instantiate model and optimizer
model = CNNBatchNorm(10)
model = model.to(device)
optimizer = Adam(model.parameters(), lr=1.e-3)
n_epochs = 10
utils.run_cifar_training(model, optimizer,
train_loader, val_loader,
device=device, name='batch-norm',
n_epochs=n_epochs)
model = utils.load_checkpoin("best_checkpoint_batch-norm.tar", model, optimizer)[0]
predictions, labels = utils.validate(model, test_loader, nn.NLLLoss(),
device, step=0, tb_logger=None)
print("Test accuracy:")
accuracy = metrics.accuracy_score(labels, predictions)
print(accuracy)
fig, ax = plt.subplots(1, figsize=(8, 8))
utils.make_confusion_matrix(labels, predictions, categories, ax)
```
## Tasks and Questions
Tasks:
- Implement one or two additional augmentations and train the model again using these. You can use [the torchvision transformations](https://pytorch.org/docs/stable/torchvision/transforms.html) for inspiration.
Questions:
- Compare the model results in this exercise.
- Can you think of any transformations that make use of symmetries/invariances not present here but present in other kinds of images (e.g. biomedical images)?
Advanced:
- Check out the other [normalization layers available in pytorch](https://pytorch.org/docs/stable/nn.html#normalization-layers). Which layers could be beneficial to BatchNorm here? Try training with them and see if this improves performance further.
|
github_jupyter
|
# KFServing Sample
In this notebook, we provide two samples for demonstrating KFServing SDK and YAML versions.
### Setup
1. Your ~/.kube/config should point to a cluster with [KFServing installed](https://github.com/kubeflow/kfserving/blob/master/docs/DEVELOPER_GUIDE.md#deploy-kfserving).
2. Your cluster's Istio Ingress gateway must be network accessible, you can do:
`kubectl port-forward svc/istio-ingressgateway -n istio-system 8080:80`.
## 1. KFServing SDK sample
Below is a sample for KFServing SDK.
It shows how to use KFServing SDK to create, get, rollout_canary, promote and delete InferenceService.
### Prerequisites
```
!pip install kfserving kubernetes --user
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import utils
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from kubernetes.client import V1ResourceRequirements
```
Define namespace where InferenceService needs to be deployed to. If not specified, below function defines namespace to the current one where SDK is running in the cluster, otherwise it will deploy to default namespace.
```
namespace = utils.get_default_target_namespace()
```
### Label namespace so you can run inference tasks in it
```
!kubectl label namespace $namespace serving.kubeflow.org/inferenceservice=enabled
```
### Define InferenceService
Firstly define default endpoint spec, and then define the inferenceservice basic on the endpoint spec.
```
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}
)
)
)
)
isvc = V1alpha2InferenceService(
api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(name='flower-sample', namespace=namespace),
spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec)
)
```
### Create InferenceService
Call KFServingClient to create InferenceService.
```
KFServing = KFServingClient()
KFServing.create(isvc)
```
### Check the InferenceService
```
KFServing.get('flower-sample', namespace=namespace, watch=True, timeout_seconds=120)
```
### Invoke Endpoint
If you want to invoke endpoint by yourself, you can copy and paste below code block and execute in your local environment. Remember you should have a `kfserving-flowers-input.json` file in the same directory when you execute.
```
%%bash
MODEL_NAME=flower-sample
INPUT_PATH=@./kfserving-flowers-input.json
INGRESS_GATEWAY=istio-ingressgateway
SERVICE_HOSTNAME=$(kubectl get inferenceservice ${MODEL_NAME} -n $namespace -o jsonpath='{.status.url}' | cut -d "/" -f 3)
curl -v -H "Host: ${SERVICE_HOSTNAME}" http://localhost:8080/v1/models/$MODEL_NAME:predict -d $INPUT_PATH
```
Expected Output
```
* Trying 34.83.190.188...
* TCP_NODELAY set
* Connected to 34.83.190.188 (34.83.190.188) port 80 (#0)
> POST /v1/models/flowers-sample:predict HTTP/1.1
> Host: flowers-sample.default.svc.cluster.local
> User-Agent: curl/7.60.0
> Accept: */*
> Content-Length: 16201
> Content-Type: application/x-www-form-urlencoded
> Expect: 100-continue
>
< HTTP/1.1 100 Continue
* We are completely uploaded and fine
< HTTP/1.1 200 OK
< content-length: 204
< content-type: application/json
< date: Fri, 10 May 2019 23:22:04 GMT
< server: envoy
< x-envoy-upstream-service-time: 19162
<
{
"predictions": [
{
"scores": [0.999115, 9.20988e-05, 0.000136786, 0.000337257, 0.000300533, 1.84814e-05],
"prediction": 0,
"key": " 1"
}
]
* Connection #0 to host 34.83.190.188 left intact
}%
```
### Add Canary to InferenceService
Firstly define canary endpoint spec, and then rollout 10% traffic to the canary version, watch the rollout process.
```
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}
)
)
)
)
KFServing.rollout_canary('flower-sample', canary=canary_endpoint_spec, percent=10,
namespace=namespace, watch=True, timeout_seconds=120)
```
### Rollout more traffic to canary of the InferenceService
Rollout traffice percent to 50% to canary version.
```
KFServing.rollout_canary('flower-sample', percent=50, namespace=namespace,
watch=True, timeout_seconds=120)
```
Users send request to service 100 times.
```
%%bash
MODEL_NAME=flowers-sample
INPUT_PATH=@./kfserving-flowers-input.json
INGRESS_GATEWAY=istio-ingressgateway
SERVICE_HOSTNAME=$(kubectl get inferenceservice ${MODEL_NAME} -n $namespace -o jsonpath='{.status.url}' | cut -d "/" -f 3)
for i in {0..100};
do
curl -v -H "Host: ${SERVICE_HOSTNAME}" http://localhost:8080/v1/models/$MODEL_NAME:predict -d $INPUT_PATH;
done
```
check if traffic is split
```
%%bash
default_count=$(kubectl get replicaset -n $namespace -l serving.knative.dev/configuration=flowers-sample-predictor-default -o jsonpath='{.items[0].status.observedGeneration}')
canary_count=$(kubectl get replicaset -n $namespace -l serving.knative.dev/configuration=flowers-sample-predictor-canary -o jsonpath='{.items[0].status.observedGeneration}')
echo "\nThe count of traffic route to default: $default_count"
echo "The count of traffic route to canary: $canary_count"
```
### Promote Canary to Default
```
KFServing.promote('flower-sample', namespace=namespace, watch=True, timeout_seconds=120)
```
### Delete the InferenceService
```
KFServing.delete('flower-sample', namespace=namespace)
```
## 2. Sample for Kfserving YAML
Note: You should execute all the code blocks in your local environment.
### Create the InferenceService
Apply the CRD
```
!kubectl apply -n $namespace -f kfserving-flowers.yaml
```
Expected Output
```
$ inferenceservice.serving.kubeflow.org/flowers-sample configured
```
### Run a prediction
Use `istio-ingressgateway` as your `INGRESS_GATEWAY` if you are deploying KFServing as part of Kubeflow install, and not independently.
```
%%bash
MODEL_NAME=flowers-sample
INPUT_PATH=@./kfserving-flowers-input.json
INGRESS_GATEWAY=istio-ingressgateway
SERVICE_HOSTNAME=$(kubectl get inferenceservice ${MODEL_NAME} -n $namespace -o jsonpath='{.status.url}' | cut -d "/" -f 3)
curl -v -H "Host: ${SERVICE_HOSTNAME}" http://localhost:8080/v1/models/$MODEL_NAME:predict -d $INPUT_PATH
```
If you stop making requests to the application, you should eventually see that your application scales itself back down to zero. Watch the pod until you see that it is `Terminating`. This should take approximately 90 seconds.
```
!kubectl get pods --watch -n $namespace
```
Note: To exit the watch, use `ctrl + c`.
### Canary Rollout
To test a canary rollout, you can use the tensorflow-canary.yaml
Apply the CRD
```
!kubectl apply -n $namespace -f kfserving-flowers-canary.yaml
```
To verify if your traffic split percenage is applied correctly, you can use the following command:
```
!kubectl get inferenceservices -n $namespace
```
The output should looks the similar as below:
```
NAME READY URL DEFAULT TRAFFIC CANARY TRAFFIC AGE
flowers-sample True http://flowers-sample.default.example.com 90 10 48s
```
```
%%bash
MODEL_NAME=flowers-sample
INPUT_PATH=@./kfserving-flowers-input.json
INGRESS_GATEWAY=istio-ingressgateway
SERVICE_HOSTNAME=$(kubectl get inferenceservice ${MODEL_NAME} -n $namespace -o jsonpath='{.status.url}' | cut -d "/" -f 3)
for i in {0..100};
do
curl -v -H "Host: ${SERVICE_HOSTNAME}" http://localhost:8080/v1/models/$MODEL_NAME:predict -d $INPUT_PATH;
done
```
Verify if traffic split
```
%%bash
default_count=$(kubectl get replicaset -n $namespace -l serving.knative.dev/configuration=flowers-sample-predictor-default -o jsonpath='{.items[0].status.observedGeneration}')
canary_count=$(kubectl get replicaset -n $namespace -l serving.knative.dev/configuration=flowers-sample-predictor-canary -o jsonpath='{.items[0].status.observedGeneration}')
echo "\nThe count of traffic route to default: $default_count"
echo "The count of traffic route to canary: $canary_count"
```
### Clean Up Resources
```
!kubectl delete inferenceservices flowers-sample -n $namespace
```
|
github_jupyter
|
```
%autosave 0
```
# 4. Evaluation Metrics for Classification
In the previous session we trained a model for predicting churn. How do we know if it's good?
## 4.1 Evaluation metrics: session overview
* Dataset: https://www.kaggle.com/blastchar/telco-customer-churn
* https://raw.githubusercontent.com/alexeygrigorev/mlbookcamp-code/master/chapter-03-churn-prediction/WA_Fn-UseC_-Telco-Customer-Churn.csv
*Metric* - function that compares the predictions with the actual values and outputs a single number that tells how good the predictions are
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
df = pd.read_csv('data-week-3.csv')
df.columns = df.columns.str.lower().str.replace(' ', '_')
categorical_columns = list(df.dtypes[df.dtypes == 'object'].index)
for c in categorical_columns:
df[c] = df[c].str.lower().str.replace(' ', '_')
df.totalcharges = pd.to_numeric(df.totalcharges, errors='coerce')
df.totalcharges = df.totalcharges.fillna(0)
df.churn = (df.churn == 'yes').astype(int)
df_full_train, df_test = train_test_split(df, test_size=0.2, random_state=1)
df_train, df_val = train_test_split(df_full_train, test_size=0.25, random_state=1)
df_train = df_train.reset_index(drop=True)
df_val = df_val.reset_index(drop=True)
df_test = df_test.reset_index(drop=True)
y_train = df_train.churn.values
y_val = df_val.churn.values
y_test = df_test.churn.values
del df_train['churn']
del df_val['churn']
del df_test['churn']
numerical = ['tenure', 'monthlycharges', 'totalcharges']
categorical = [
'gender',
'seniorcitizen',
'partner',
'dependents',
'phoneservice',
'multiplelines',
'internetservice',
'onlinesecurity',
'onlinebackup',
'deviceprotection',
'techsupport',
'streamingtv',
'streamingmovies',
'contract',
'paperlessbilling',
'paymentmethod',
]
dv = DictVectorizer(sparse=False)
train_dict = df_train[categorical + numerical].to_dict(orient='records')
X_train = dv.fit_transform(train_dict)
model = LogisticRegression()
model.fit(X_train, y_train)
val_dict = df_val[categorical + numerical].to_dict(orient='records')
X_val = dv.transform(val_dict)
y_pred = model.predict_proba(X_val)[:, 1]
churn_decision = (y_pred >= 0.5)
(y_val == churn_decision).mean()
```
## 4.2 Accuracy and dummy model
* Evaluate the model on different thresholds
* Check the accuracy of dummy baselines
```
len(y_val)
(y_val == churn_decision).mean()
1132/ 1409
from sklearn.metrics import accuracy_score
accuracy_score(y_val, y_pred >= 0.5)
thresholds = np.linspace(0, 1, 21)
scores = []
for t in thresholds:
score = accuracy_score(y_val, y_pred >= t)
print('%.2f %.3f' % (t, score))
scores.append(score)
plt.plot(thresholds, scores)
from collections import Counter
Counter(y_pred >= 1.0)
1 - y_val.mean()
```
## 4.3 Confusion table
* Different types of errors and correct decisions
* Arranging them in a table
```
actual_positive = (y_val == 1)
actual_negative = (y_val == 0)
t = 0.5
predict_positive = (y_pred >= t)
predict_negative = (y_pred < t)
tp = (predict_positive & actual_positive).sum()
tn = (predict_negative & actual_negative).sum()
fp = (predict_positive & actual_negative).sum()
fn = (predict_negative & actual_positive).sum()
confusion_matrix = np.array([
[tn, fp],
[fn, tp]
])
confusion_matrix
(confusion_matrix / confusion_matrix.sum()).round(2)
```
## 4.4 Precision and Recall
```
p = tp / (tp + fp)
p
r = tp / (tp + fn)
r
```
## 4.5 ROC Curves
### TPR and FRP
```
tpr = tp / (tp + fn)
tpr
fpr = fp / (fp + tn)
fpr
scores = []
thresholds = np.linspace(0, 1, 101)
for t in thresholds:
actual_positive = (y_val == 1)
actual_negative = (y_val == 0)
predict_positive = (y_pred >= t)
predict_negative = (y_pred < t)
tp = (predict_positive & actual_positive).sum()
tn = (predict_negative & actual_negative).sum()
fp = (predict_positive & actual_negative).sum()
fn = (predict_negative & actual_positive).sum()
scores.append((t, tp, fp, fn, tn))
columns = ['threshold', 'tp', 'fp', 'fn', 'tn']
df_scores = pd.DataFrame(scores, columns=columns)
df_scores['tpr'] = df_scores.tp / (df_scores.tp + df_scores.fn)
df_scores['fpr'] = df_scores.fp / (df_scores.fp + df_scores.tn)
plt.plot(df_scores.threshold, df_scores['tpr'], label='TPR')
plt.plot(df_scores.threshold, df_scores['fpr'], label='FPR')
plt.legend()
```
### Random model
```
np.random.seed(1)
y_rand = np.random.uniform(0, 1, size=len(y_val))
((y_rand >= 0.5) == y_val).mean()
def tpr_fpr_dataframe(y_val, y_pred):
scores = []
thresholds = np.linspace(0, 1, 101)
for t in thresholds:
actual_positive = (y_val == 1)
actual_negative = (y_val == 0)
predict_positive = (y_pred >= t)
predict_negative = (y_pred < t)
tp = (predict_positive & actual_positive).sum()
tn = (predict_negative & actual_negative).sum()
fp = (predict_positive & actual_negative).sum()
fn = (predict_negative & actual_positive).sum()
scores.append((t, tp, fp, fn, tn))
columns = ['threshold', 'tp', 'fp', 'fn', 'tn']
df_scores = pd.DataFrame(scores, columns=columns)
df_scores['tpr'] = df_scores.tp / (df_scores.tp + df_scores.fn)
df_scores['fpr'] = df_scores.fp / (df_scores.fp + df_scores.tn)
return df_scores
df_rand = tpr_fpr_dataframe(y_val, y_rand)
plt.plot(df_rand.threshold, df_rand['tpr'], label='TPR')
plt.plot(df_rand.threshold, df_rand['fpr'], label='FPR')
plt.legend()
```
### Ideal model
```
num_neg = (y_val == 0).sum()
num_pos = (y_val == 1).sum()
num_neg, num_pos
y_ideal = np.repeat([0, 1], [num_neg, num_pos])
y_ideal
y_ideal_pred = np.linspace(0, 1, len(y_val))
1 - y_val.mean()
accuracy_score(y_ideal, y_ideal_pred >= 0.726)
df_ideal = tpr_fpr_dataframe(y_ideal, y_ideal_pred)
df_ideal[::10]
plt.plot(df_ideal.threshold, df_ideal['tpr'], label='TPR')
plt.plot(df_ideal.threshold, df_ideal['fpr'], label='FPR')
plt.legend()
```
### Putting everything together
```
plt.plot(df_scores.threshold, df_scores['tpr'], label='TPR', color='black')
plt.plot(df_scores.threshold, df_scores['fpr'], label='FPR', color='blue')
plt.plot(df_ideal.threshold, df_ideal['tpr'], label='TPR ideal')
plt.plot(df_ideal.threshold, df_ideal['fpr'], label='FPR ideal')
# plt.plot(df_rand.threshold, df_rand['tpr'], label='TPR random', color='grey')
# plt.plot(df_rand.threshold, df_rand['fpr'], label='FPR random', color='grey')
plt.legend()
plt.figure(figsize=(5, 5))
plt.plot(df_scores.fpr, df_scores.tpr, label='Model')
plt.plot([0, 1], [0, 1], label='Random', linestyle='--')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend()
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_val, y_pred)
plt.figure(figsize=(5, 5))
plt.plot(fpr, tpr, label='Model')
plt.plot([0, 1], [0, 1], label='Random', linestyle='--')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend()
```
## 4.6 ROC AUC
* Area under the ROC curve - useful metric
* Interpretation of AUC
```
from sklearn.metrics import auc
auc(fpr, tpr)
auc(df_scores.fpr, df_scores.tpr)
auc(df_ideal.fpr, df_ideal.tpr)
fpr, tpr, thresholds = roc_curve(y_val, y_pred)
auc(fpr, tpr)
from sklearn.metrics import roc_auc_score
roc_auc_score(y_val, y_pred)
neg = y_pred[y_val == 0]
pos = y_pred[y_val == 1]
import random
n = 100000
success = 0
for i in range(n):
pos_ind = random.randint(0, len(pos) - 1)
neg_ind = random.randint(0, len(neg) - 1)
if pos[pos_ind] > neg[neg_ind]:
success = success + 1
success / n
n = 50000
np.random.seed(1)
pos_ind = np.random.randint(0, len(pos), size=n)
neg_ind = np.random.randint(0, len(neg), size=n)
(pos[pos_ind] > neg[neg_ind]).mean()
```
## 4.7 Cross-Validation
* Evaluating the same model on different subsets of data
* Getting the average prediction and the spread within predictions
```
def train(df_train, y_train, C=1.0):
dicts = df_train[categorical + numerical].to_dict(orient='records')
dv = DictVectorizer(sparse=False)
X_train = dv.fit_transform(dicts)
model = LogisticRegression(C=C, max_iter=1000)
model.fit(X_train, y_train)
return dv, model
dv, model = train(df_train, y_train, C=0.001)
def predict(df, dv, model):
dicts = df[categorical + numerical].to_dict(orient='records')
X = dv.transform(dicts)
y_pred = model.predict_proba(X)[:, 1]
return y_pred
y_pred = predict(df_val, dv, model)
from sklearn.model_selection import KFold
!pip install tqdm
from tqdm.auto import tqdm
n_splits = 5
# C = regularization parameter for the model
# tqdm() is a function that prints progress bars
for C in tqdm([0.001, 0.01, 0.1, 0.5, 1, 5, 10]):
kfold = KFold(n_splits=n_splits, shuffle=True, random_state=1)
scores = []
for train_idx, val_idx in kfold.split(df_full_train):
df_train = df_full_train.iloc[train_idx]
df_val = df_full_train.iloc[val_idx]
y_train = df_train.churn.values
y_val = df_val.churn.values
dv, model = train(df_train, y_train, C=C)
y_pred = predict(df_val, dv, model)
auc = roc_auc_score(y_val, y_pred)
scores.append(auc)
print('C=%s %.3f +- %.3f' % (C, np.mean(scores), np.std(scores)))
scores
dv, model = train(df_full_train, df_full_train.churn.values, C=1.0)
y_pred = predict(df_test, dv, model)
auc = roc_auc_score(y_test, y_pred)
auc
```
## 4.8 Summary
* Metric - a single number that describes the performance of a model
* Accuracy - fraction of correct answers; sometimes misleading
* Precision and recall are less misleading when we have class inbalance
* ROC Curve - a way to evaluate the performance at all thresholds; okay to use with imbalance
* K-Fold CV - more reliable estimate for performance (mean + std)
## 4.9 Explore more
* Check the precision and recall of the dummy classifier that always predict "FALSE"
* F1 score = 2 * P * R / (P + R)
* Evaluate precision and recall at different thresholds, plot P vs R - this way you'll get the precision/recall curve (similar to ROC curve)
* Area under the PR curve is also a useful metric
Other projects:
* Calculate the metrics for datasets from the previous week
|
github_jupyter
|
# 神经网络的训练
作者:杨岱川
时间:2019年12月
github:https://github.com/DrDavidS/basic_Machine_Learning
开源协议:[MIT](https://github.com/DrDavidS/basic_Machine_Learning/blob/master/LICENSE)
参考文献:
- 《深度学习入门》,作者:斋藤康毅;
- 《深度学习》,作者:Ian Goodfellow 、Yoshua Bengio、Aaron Courville。
- [Keras overview](https://tensorflow.google.cn/guide/keras/overview)
## 本节目的
在[3.01 神经网络与前向传播](https://github.com/DrDavidS/basic_Machine_Learning/blob/master/03深度学习基础/3.01%20神经网络与前向传播.ipynb)中我们学习了基于多层感知机的神经网络前向传播的原理,并且动手实现了一个很简单的神经网络模型。
但是,目前为止我们搭建的神经网络的权重矩阵 $W$ 是随机初始化的,我们只能说把输入 $X$ “喂”了进去, 然后“跑通”了这个网络。但是它的输出并没有任何实际的意义,因为我们并没有对它进行训练。
在 3.02 教程中,我们的主题就是**神经网络的学习**,也就是我们的神经网络是如何从训练数据中自动获取最优权重参数的过程,这个过程的主要思想和之前在传统机器学习中描述的训练本质相同。
我们为了让神经网络能够进行学习,将导入**损失函数(loss function)**这一指标,相信大家对其并不陌生。
神经网络学习的目的就是以损失函数为基准,找出能够使它的值达到最小的权重参数。而为了找出尽可能小的损失函数的值,我们将采用**梯度法**。
> 这些名词是不是听起来都很熟悉?
>
>“梯度法”在[2.11 XGBoost原理与应用](https://github.com/DrDavidS/basic_Machine_Learning/blob/master/02机器学习基础/2.11%20XGBoost原理与应用.ipynb)中以**梯度提升**的形式出现,而“损失函数”更是贯穿了整个传统机器学习过程。
## 从数据中学习
同其他机器学习算法一样,神经网络的特征仍然是可以从数据中学习。什么叫“从数据中学习”,就是说我们的权重参数可以由数据来自动决定。
既然是机器学习,我们当然不能人工地决定参数,这样怎么忙得过来呢?
>一些大型神经网络参数数量,当然参数更多不代表效果一定更好:
>
>- ALBERT:1200万,by 谷歌;
>- BERT-large:3.34亿,by 谷歌;
>- BERT-xlarge:12.7亿,by 谷歌;
>- Megatron:80亿,by Nvidia;
>- T5,110亿,by 谷歌。
接下来我们会介绍神经网络地学习,也就是如何利用数据决定参数值。
## 损失函数
损失函数地概念大家都熟悉,我们在之前学过非常多的损失函数,比如 0-1 损失函数,均方误差损失函数等。这里我们会再介绍一种新的损失函数。
### 交叉熵误差
**交叉熵误差(cross entropy error)**是一种非常常用的损失函数,其公式如下:
$$\large E=-\sum_k t_k\log y_k$$
其中,$\log$ 是以 $\rm e$ 为底数的自然对数 $\log_e$。$k$ 表示共有 $k$ 个类别。$y_k$ 是神经网络的输出,$t_k$ 是真实的、正确的标签。$t_k$ 中只有正确解的标签索引为1,其他均为0,注意这里用的是 one-hot 表示,所以接受多分类问题。
实际上这个公式只计算了正确解标签输出的自然对数。
比如,一个三分类问题,有 A, B ,C 三种类别,而真实值为C,即 $t_k=[0,\quad0,\quad1]$,
而神经网络经过 softmax 后的输出 $y_k=[0.1,\quad0.3,\quad0.6]$。所以其交叉熵误差为 $-\log0.6\approx0.51$。
我们用代码来实现交叉熵:
```
import numpy as np
def cross_entropy_error(y, t):
"""定义交叉熵损失函数"""
delta = 1e-7
return -np.sum(t * np.log(y + delta))
```
这里的 $y$ 和 $t$ 都是 NumPy 数组。我们在计算 `np.log` 的时候加上了一个很小的值 delta,是为了防止出现 `np.log(0)` 的情况,也就是返回值为负无穷。这样一来会导致后续计算无法进行。
接下来我们试试使用代码进行简单的计算:
```
# 设置第三类为正确解
t = np.array([0, 0, 1])
t
# 设置三类概率情况,y1
y1 = np.array([0.1, 0.3, 0.6])
y1
# 设置三类概率情况,y2
y2 = np.array([0.3, 0.4, 0.3])
y2
# 计算y1交叉熵
cross_entropy_error(y1, t)
# 计算y2交叉熵
cross_entropy_error(y2, t)
```
可以看出第一个输出 y1 与监督数据(训练数据)更为切合,所以交叉熵误差更小。
### mini-batch 学习
机器学习使用训练数据进行学习,我们对训练数据计算损失函数的值。找出让这个值尽可能小的参数。也就是说,计算损失函数的时候必须将所有的训练数据作为对象,有 100 个数据,就应当把这 100 个损失函数的总和作为学习的目标。
要计算所有训练数据的损失函数的综合,以交叉熵误差为例:
$$\large E=-\frac{1}{N}\sum_n \sum_k t_{nk}\log y_{nk}$$
虽然看起来复杂,其实只是把单个数据的损失函数扩展到了 $n$ 个数据而已,最后再除以 $N$,求得单个数据的“平均损失函数”。这样平均化以后,可以获得和训练数据的数量无关的统一指标。
问题在于,很多数据集的数据量可不少,以 MNIST 为例,其训练数据有 60000 个,如果以全部数据为对象求损失函数的和,则时间花费较长。如果更大的数据集,比如 [ImageNet](http://www.image-net.org/about-stats) 数据集,甚至有1419万张图片(2019年12月),这种情况下以全部数据为对象计算损失函数是不现实的。
因此,我们从全部数据中选出一部分,作为全部数据的“近似”。神经网络的学习也是从训练数据中选出一批数据(mini-batch,小批量),然后对每个mini-batch进行学习。
比如在 MNIST 数据集中,每次选择 100 张图片学习。这种学习方式称为 **mini-batch学习**。或者说,整个训练过程的 batch-size 为 100。
### 为何要设定损失函数
为什么我们训练过程是损失函数最小?我们的最终目的是提高神经网络的识别精度,为什么不把识别精度作为指标?
这涉及到导数在神经网络学习中的作用。以后会详细解释,在神经网络的学习中,寻找最优参数(权重和偏置)时,要寻找使得损失函数的值尽可能小的的参数。而为了找到让损失函数值尽可能小的地方,需要计算参数的导数(准确说是**梯度**),然后以这个导数为指引,逐步更新参数的值。
假设有一个神经网络,我们关注这个网络中某一个权重参数。现在,对这个权重参数的损失函数求导,表示的是“如果稍微改变这个权重参数的值,损失函数会怎么变化”。如果导数的值为负,通过使该权重参数向正方向改变,可以减小损失函数的值;反过来,如果导数的值为正,则通过使该权重参数向负方向改变,可以减小损失函数的值。
>如果导数的值为 0 时,无论权重参数向哪个方向变化,损失函数的值都不变。
如果我们用识别精度(准确率)作为指标,那么绝大多数地方的导数都会变成 0 ,导致参数无法更新。
>假设某个神经网络识别出了 100 个训练数据中的 32 个,这时候准确率为 32%。如果我们以准确率为指标,即使稍微改变权重参数的值,识别的准确率也将继续保持在 32%,不会有变化。也就是说,仅仅微调参数,是无法改善识别精度的。即使有所改善,也不会变成 32.011% 这样连续变化,而是变成 33%,34% 这样离散的值。
>
>而如果我们采用**损失函数**作为指标,则当前损失函数的值可以表示为 0.92543...之类的值,而稍微微调一下参数,对应损失函数也会如 0.93431... 这样发生连续的变化。
所以,识别精度对微小的参数变化基本没啥反应,即使有反应,它的值也是不连续地、突然地变化。
回忆之前学习的 **阶跃函数** 和 **sigmoid 函数**:
```
import matplotlib
print(matplotlib.__version__)
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
%config InlineBackend.figure_format = 'svg' # 生成矢量图
def sigmoid(x):
"""定义sigmoid函数"""
return 1.0/(1.0 + np.exp(-x))
def step_function(x):
"""定义阶跃函数"""
return np.array(x > 0, dtype=np.int)
# 阶跃函数
plt.figure(figsize=(8,4))
plt.subplot(1, 2, 1)
x = np.arange(-6.0, 6.0, 0.1)
plt.plot(x, step_function(x))
plt.axhline(y=0.0,ls='dotted',color='k')
plt.axhline(y=1.0,ls='dotted',color='k')
plt.axhline(y=0.5,ls='dotted',color='k')
plt.yticks([0.0,0.5,1.0])
plt.ylim(-0.1,1.1)
plt.xlabel('x')
plt.ylabel('$step(x)$')
plt.title('Step Function')
# plt.savefig("pic001.png", dpi=600) # 保存图片
# sigmoid 函数
plt.subplot(1, 2, 2)
plt.plot(x, sigmoid(x))
plt.axhline(y=0.0,ls='dotted',color='k')
plt.axhline(y=1.0,ls='dotted',color='k')
plt.axhline(y=0.5,ls='dotted',color='k')
plt.yticks([0.0,0.5,1.0])
plt.ylim(-0.1,1.1)
plt.xlabel('x')
plt.ylabel('$sigmoid(x)$')
plt.title('Sigmoid Function')
# plt.savefig("pic001.png", dpi=600) # 保存图片
plt.tight_layout(3) # 间隔
plt.show()
```
如果我们使用**阶跃函数**作为激活函数,神经网络的学习无法进行。如图,阶跃函数的导数在绝大多数的地方都是 0 ,也就是说,如果我们采用阶跃函数,那么即使将损失函数作为指标,参数的微小变化也会被阶跃函数抹杀,导致损失函数的值没有任何变化。
而 **sigmoid 函数**,如图,不仅函数的输出是连续变化的,曲线的斜率也是连续变化的。也就是说,sigmoid 函数的导数在任何地方都不为 0。得益于这个性质,神经网络的学习得以正确进行。
## 数值微分
我们使用梯度信息决定前进方向。现在我们会介绍什么是梯度,它有什么性质。
### 导数
相信大家对导数都不陌生。导数就是表示某个瞬间的变化量,定义为:
$$\large \frac{{\rm d}f(x)}{{\rm d}x} = \lim_{h\to 0}\frac{f(x+h)-f(x)}{h}$$
那么现在我们参考上式实现函数求导:
```
def numerical_diff(f, x):
"""不太好的导数实现"""
h = 1e-50
return (f(x + h) - f(x)) / h
```
`numerical_diff` 的命名来源于 **数值微分(numerical differentiation)**。
实际上,我们对 $h$ 赋予了一个很小的值,反倒产生了**舍入误差**:
```
np.float32(1e-50)
```
如果采用 `float32` 类型来表示 $10^{-50}$,就会变成 $0.0$,无法正确表示。这是第一个问题,我们应当将微小值 $h$ 改为 $10^{-4}$,就可以得到正确的结果了。
第二个问题和函数 $f$ 的差分有关。我们虽然实现了计算函数 $f$ 在 $x+h$ 和 $x$ 之间的差分,但是是有误差的。我们实际上计算的是点 $x+h$ 和 $x$ 之间连线的斜率,而真正的导数则是函数在 $x$ 处切线的斜率。出现这个差异的原因是因为 $h$ 不能真的无限接近于 0。
为了减小误差,我们计算函数 $f$ 在 $(x+h)$ 和 $(x-h)$ 之间的差分。因为这种计算方法以 $x$ 为中心,计算左右两边的差分,所以叫**中心差分**,而 $(x+h)$ 和 $x$ 之间的差分叫**前向差分**。
现在改进如下:
```
def numerical_diff(f, x):
"""改进后的导数实现"""
h = 1e-4
return (f(x + h) - f(x - h)) / (2 * h)
```
### 数值微分的例子
使用上面的数值微分函数对简单函数求导:
$$\large y=0.01x^2+0.1x$$
首先我们绘制这个函数的图像。
```
def function_1(x):
"""定义函数"""
return 0.01 * x**2 + 0.1*x
x = np.arange(0.0, 20.0, 0.1)
y = function_1(x)
plt.xlabel('x')
plt.ylabel('$f(x)$')
plt.plot(x, y)
plt.show()
```
计算函数在 $x=5$ 时候的导数,画切线:
```
def tangent_line(f, x):
"""切线"""
d = numerical_diff(f, x)
print(d)
y = f(x) - d*x
return lambda t: d*t + y
x = np.arange(0.0, 20.0, 0.1)
y = function_1(x)
plt.xlabel("x")
plt.ylabel("f(x)")
tf = tangent_line(function_1, 5)
y2 = tf(x)
plt.plot(x, y)
plt.plot(x, y2)
plt.axvline(x=5,ls='dotted',color='k')
plt.axhline(y=0.75,ls='dotted',color='k')
plt.yticks([0, 0.75, 1, 2, 3, 4])
plt.show()
```
众所周知,$f(x)=0.01x^2+0.1x$ 求导的解析解是 $\cfrac{{\rm d}f(x)}{{\rm d}x}=0.02x+0.1$,因此在 $x=5$ 的时候,“真的导数”为 0.2。和上面的结果比起来,严格来说不一致,但是误差很小。
### 偏导数
接下来我们看一个新函数,这个函数有两个变量:
$$\large f(x_0, x_1)=x_0^2+x_1^2$$
其图像的绘制,用代码实现就是如下:
```
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import numpy as np
def function_2_old(x_0, x_1):
"""二元函数"""
return x_0**2 + x_1**2
fig = plt.figure()
ax = Axes3D(fig)
x_0 = np.arange(-2, 2.5, 0.2) # x0
x_1 = np.arange(-2, 2.5, 0.2) # x1
X_0, X_1 = np.meshgrid(x_0, x_1) # 二维数组生成
Y = function_2_old(X_0, X_1)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_zlabel('$f(x)$')
ax.plot_surface(X_0, X_1, Y, rstride=1, cstride=1, cmap='rainbow')
# ax.view_init(30, 60) # 调整视角
plt.show()
```
很漂亮的一幅图。
如果我们要对这个二元函数求导,就有必要区分是对 $x_0$ 还是 $x_1$ 求导。
这里讨论的有多个变量函数的导数就是**偏导数**,表示为 $\cfrac{\partial f}{\partial x_0}$、$\cfrac{\partial f}{\partial x_1}$。
当 $x_0=3$,$x_1=4$ 的时候,求关于 $x_0$ 的偏导数$\cfrac{\partial f}{\partial x_0}$:
```
def function_tmp1(x0):
return x0 * x0 + 4.0**2.0
numerical_diff(function_tmp1, 3.0)
```
当 $x_0=3$,$x_1=4$ 的时候,求关于 $x_1$ 的偏导数$\cfrac{\partial f}{\partial x_1}$:
```
def function_tmp2(x1):
return 3.0**2.0 + x1 * x1
numerical_diff(function_tmp2, 4.0)
```
实际上动笔计算,这两个计算值和解析解的导数基本一致。
所以偏导数和单变量的导数一样,都是求某个地方的**斜率**,不过偏导数需要将多个变量中的某一个变量定为目标变量,然后将其他变量固定为某个值。
## 梯度
铺垫了这么多,终于到了关键的环节。
我们刚刚计算了 $x_0$ 和 $x_1$ 的偏导数,现在我们要一起计算 $x_0$ 和 $x_1$ 的偏导数。
比如我们考虑求 $x_0=3$,$x_1=4$ 时 $(x_0,x_1)$ 的偏导数 $\left( \cfrac{\partial f}{\partial x_0},\cfrac{\partial f}{\partial x_1} \right)$。
>像 $\left( \cfrac{\partial f}{\partial x_0},\cfrac{\partial f}{\partial x_1} \right)$ 这样由全部变量的偏导数汇总而成的向量就叫做**梯度**。
我们采用以下代码来计算:
```
def _numerical_gradient_no_batch(f, x):
"""
计算梯度
输入:
f:函数
x:数组,多元变量。
"""
h = 1e-4 # 0.0001
grad = np.zeros_like(x) # 生成一个和x形状一样的全为0的数组
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 还原值
return grad
def function_2(x):
"""
二元函数
重新定义一下,此时输入为一个np.array数组
"""
return x[0]**2 + x[1]**2
```
这个代码看起来稍微长一点,但是和求单变量的数值微分本质一样。
现在我们用这个函数实际计算一下梯度:
```
_numerical_gradient_no_batch(function_2, np.array([3.0, 4.0]))
_numerical_gradient_no_batch(function_2, np.array([0.0, 2.0]))
_numerical_gradient_no_batch(function_2, np.array([3.0, 0.0]))
```
像这样我们就能计算 $(x_0,x_1)$ 在各个点的梯度了。现在我们要把 $f(x_0,x_1)=x_0^2+x_1^2$ 的梯度画在图上,不过我们画的是**负梯度**的向量。
代码参考:[deep-learning-from-scratch](https://github.com/oreilly-japan/deep-learning-from-scratch/blob/master/ch04/gradient_2d.py)。
```
def numerical_gradient(f, X):
"""计算梯度矢量"""
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_no_batch(f, x)
return grad
x0 = np.arange(-2, 2.5, 0.25)
x1 = np.arange(-2, 2.5, 0.25)
X, Y = np.meshgrid(x0, x1)
X = X.flatten()
Y = Y.flatten()
grad = numerical_gradient(function_2, np.array([X, Y]).T).T
plt.figure()
plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",color="#666666")
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.xlabel('x0')
plt.ylabel('x1')
plt.grid()
plt.draw()
plt.show()
```
如图所示,$f(x_0,x_1)=x_0^2+x_1^2$ 的梯度呈现为有向箭头,而且:
- 所有的箭头都指向 $f(x_0,x_1)$ 的“最低处”;
- 离“最低处”越远,箭头越大。
> 实际上,梯度并非任何时候都指向最低处。
>
> 更严格讲,**梯度指示的方向是各点处的函数值减小最多的方向**。
>
> 也就是说,我们有可能在某些优化过程中只收敛到了局部最小值。
### 梯度法
机器学习的主要任务是在训练(学习)过程中寻找最优的参数。这里“最优参数”就是让损失函数取到最小值时的参数。
但是损失函数一般都很复杂(回忆一下 `XGBoost` 的损失函数推导),参数空间很庞大,我们一般不知道它在何处能取得最小值。而使用梯度来寻找函数最小值(或者尽可能小的值)的方法就是梯度法。
>再次提醒:**梯度** 表示的是各点出函数的值减小最多的方向,因此没法保证梯度所指的方向就是函数的最小值或是真正应该前进的方向。实际上在复杂的函数中,梯度指示的方向基本上都 **不是** 函数值的最小位置。
我们沿着梯度方向能够最大限度减小函数(比如损失函数)的值,因此在寻找函数的最小值的位置上还是以梯度信息为线索,决定前进的方向。
这个时候**梯度法**就起作用了。在梯度法中,函数的取值从当前位置沿着梯度方向前进一小步(配合上面的图),然后在新的地方重新求梯度,再沿着梯度方向前进,如此循环往复。
像这样,通过不断地沿着梯度方向前进,逐渐减小函数的值的过程就是**梯度法(gradient method)**,它是解决机器学习中最优化问题的常用方法。
>严格地说,寻找最小值的梯度法叫**梯度下降法**(gradient descent method),而寻找最大值的梯度法称为**梯度上升法**(gradient ascent method),注意和 **提升方法**(Boosting)相区别。
用数学式来表达梯度法,就是:
$$x_0=x_0 - \eta \frac{\partial f}{\partial x_0}$$
$$x_1=x_1 - \eta \frac{\partial f}{\partial x_1}$$
其中,$\eta$,读作 **eta**,表示更新量。回忆一下,在之前的 SKLearn 的机器学习示例中,大多都用 `eta` 作为**学习率(learning rate)**的参数,在神经网络中也是如此。学习率决定在一次学习中,应该学习多少,以及在多大程度上更新参数,就像我们走在下山路上,$\eta$ 决定了我们每一步迈多远。
上面的公式只更新了一次,我们需要反复执行,逐渐减小函数值。
$\eta$ 的具体取值不能太大或者太小,否则都没法抵达一个“合适的位置”。在神经网络中,一般会一边改变学习率的值,一般确认训练是否正常进行。
代码参考[gradient_method.py](https://github.com/oreilly-japan/deep-learning-from-scratch/blob/master/ch04/gradient_method.py),用代码实现梯度下降法:
```
def gradient_descent(f, init_x, lr=0.01, step_num=100):
"""
梯度下降法
f:要进行最优化的参数
init_x:初始值
lr:学习率,默认为0.01
step_sum:梯度下降法重复的次数
"""
x = init_x
x_history = [] # 保存每一步的信息
for i in range(step_num):
x_history.append( x.copy() )
grad = numerical_gradient(f, x) # 计算梯度矢量
x -= lr * grad
return x, np.array(x_history)
```
使用这个函数就能求得函数的极小值,如果顺利,还能求得最小值。
现在我们来求 $f(x_0,x_1)=x_0^2+x_1^2$ 的最小值:
```
init_x = np.array([-3.0, 4.0]) # 初始位置
resutl = gradient_descent(function_2, init_x=init_x, lr=0.1, step_num=100) # 执行梯度下降算法
print(resutl[0])
```
最终结果是 $(-6.11110793\times10^{-10}, 8.14814391\times10^{-10})$,非常接近我们已知的正确值 $(0, 0)$。所以说通过梯度下降法我们基本得到了正确的结果。
如果我们把梯度更新的图片画出,如下:
```
init_x = np.array([-3.0, 4.0]) # 初始位置
lr = 0.1
step_num = 20
x, x_history = gradient_descent(function_2, init_x, lr=lr, step_num=step_num)
step = 0.01
x_0 = np.arange(-5,5,step)
x_1 = np.arange(-5,5,step)
X, Y = np.meshgrid(x_0, x_1) # 建立网格
Z = function_2_old(X, Y)
plt.contour(X, Y, Z, levels=10, linewidths=0.5, linestyles='dashdot') # 绘制等高线
plt.plot(x_history[:,0], x_history[:,1], '.') # 绘制梯度下降过程
plt.xlim(-4.5, 4.5)
plt.ylim(-4.5, 4.5)
plt.xlabel("$x_0$")
plt.ylabel("$x_1$")
plt.show()
```
前面说过,**学习率**过大或者过小都无法得到好结果。
可以做实验验证一下:
```
# 学习率过大
init_x = np.array([-3.0, 4.0]) # 初始位置
lr = 10.0 # 学习率
x, x_history = gradient_descent(function_2, init_x=init_x, lr=lr, step_num=step_num)
print(x)
# 学习率过小
init_x = np.array([-3.0, 4.0]) # 初始位置
lr = 1e-10 # 学习率
x, x_history = gradient_descent(function_2, init_x=init_x, lr=lr, step_num=step_num)
print(x)
```
由此可见:
- 学习率过大,会发散成一个很大的值;
- 学习率过小,基本上还没更新就结束了。
因此我们需要设置适当的学习率。记住,学习率是一个**超参数**,通常是人工设定的。
### 神经网络的梯度
神经网络的训练也是要求梯度的。这里的梯度指的是**损失函数**关于权重参数的梯度。比如,在[3.01 神经网络与前向传播](https://github.com/DrDavidS/basic_Machine_Learning/blob/master/深度学习基础/3.01%20神经网络与前向传播.ipynb)中,我们搭建了一个三层神经网络。其中第一层(layer1)的权重 $W$ 的形状为 $2\times3$,损失函数用 $L$ 表示。
此时梯度用 $\cfrac{\partial L}{\partial W}$ 表示。用具体的数学表达式(注意下标为了方便说明,和以前不一样)来说,就是:
$$
\large
W=
\begin{pmatrix}
w_{11} & w_{12} & w_{13} \\
w_{21} & w_{22} & w_{23}\\
\end{pmatrix}
$$
$$
\large
\frac{\partial L}{\partial W}=
\begin{pmatrix}
\cfrac{\partial L}{\partial w_{11}} & \cfrac{\partial L}{\partial w_{12}} & \cfrac{\partial L}{\partial w_{13}} \\
\cfrac{\partial L}{\partial w_{21}} & \cfrac{\partial L}{\partial w_{22}} & \cfrac{\partial L}{\partial w_{23}}\\
\end{pmatrix}
$$
$\cfrac{\partial L}{\partial W}$ 的元素由各个元素关于 $W$ 的偏导数构成。比如,第1行第1列的元素 $\cfrac{\partial L}{\partial w_{11}}$ 表示当 $w_{11}$ 稍微变化的时候,损失函数 $L$ 会发生多大变化。
我们以一个简单的神经网络为例子,来实现求梯度的代码:
```
import os
import sys
import numpy as np
def softmax(a):
"""定义 softmax 函数"""
exp_a = np.exp(a)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def cross_entropy_error(y, t):
"""定义交叉熵损失函数"""
delta = 1e-7
return -np.sum(t * np.log(y + delta))
def numerical_gradient(f, X):
"""计算梯度矢量"""
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_no_batch(f, x)
return grad
class simpleNet:
def __init__(self):
"""初始化"""
# self.W = np.random.randn(2, 3) # 高斯分布初始化
self.W = np.array([[ 0.68851943, 2.06916921, -0.88125086],
[-1.30951576, 0.72350587, -1.88984482]])
self.q = 1
def predict(self, x):
"""预测"""
return np.dot(x, self.W)
def loss(self, x, t):
"""损失函数"""
z = self.predict(x)
y = softmax(z)
loss = cross_entropy_error(y, t)
return loss
```
我们建立了一个名叫 `simpleNet` 的简单神经网络,其中 `softmax` 和 `cross_entropy_error` 都和以前一样。simpleNet 类只有一个实例变量,也就是形状为 $2\times 3$ 的权重参数矩阵。
网络中有两个方法,一个是前向传播 `predict`,用于预测;另一个是用于求损失函数的 `loss` 。其中参数 `x` 接受输入数据,`t`接受正确标签。
现在我们运行一下看看结果:
```
net = simpleNet()
print(net.W) # 权重参数
x = np.array([0.6, 0.9])
p = net.predict(x) # 预测
print(p)
np.argmax(p) # 正确解(最大值)的索引
# 正确解的标签,如果是随机初始化,注意每次运行可能都不一样!!!
t = np.array([0, 1, 0])
# 损失
loss1 = net.loss(x, t)
print(loss1)
```
现在我们来求**梯度**。我们使用 `numerical_gradient(f, x)` 求梯度:
由于 `numerical_gradient(f, x)` 中的 `f` 是一个函数,所以为了程序兼容,我们先定义函数 `f(W)`:
```
def f(W):
return net.loss(x, t)
dW = numerical_gradient(f, net.W)
print(dW)
```
`numerical_gradient(f, net.W)` 的结果是 $dW$,形状是一个 $2\times 3$ 的矩阵。
观察这个矩阵,在$\cfrac{\partial L}{\partial W}$ 中:
$\cfrac{\partial L}{\partial W_{11}}$的值约为0.039,这表示如果将$w_{11}$ 增加 $h$,则损失函数的值会增加 $0.039h$。
$\cfrac{\partial L}{\partial W_{22}}$的值约为-0.071,这表示如果将$w_{22}$ 增加 $h$,则损失函数的值会减少 $0.071h$。
所以,从减少损失函数的目的出发,$w_{22}$ 应该向正方向更新,而 $w_{11}$ 应该向负方向更新。
我们求出神经网络在输入 $x=[0.6, \quad 0.9]$ 的梯度以后,只需要根据梯度法,更新权重参数即可。
手动更新试试:
```
# 学习率 lr
lr = 1e-4
print(lr)
class simpleNet_step2:
def __init__(self):
"""初始化,手动更新一次参数"""
self.W = np.array([[ 0.68851943 - 0.0001, 2.06916921 + 0.0001, -0.88125086 - 0.0001],
[-1.30951576 - 0.0001, 0.72350587 + 0.0001, -1.88984482 - 0.0001]])
self.q = 1
def predict(self, x):
"""预测"""
return np.dot(x, self.W)
def loss(self, x, t):
"""损失函数"""
z = self.predict(x)
y = softmax(z)
loss = cross_entropy_error(y, t)
return loss
net = simpleNet_step2()
net.W
x = np.array([0.6, 0.9])
p = net.predict(x) # 预测
print(p)
# 最大值为正确答案
t = np.array([0, 1, 0])
# 损失
loss2 = net.loss(x, t)
print(loss2)
if loss2 < loss1:
print("loss2 比 loss1 小了:", loss1 - loss2)
```
由此可见,我们按照梯度法,更新了权重参数(步长为学习率)以后,损失函数的值下降了。
## 学习算法总结
到此,我们学习了“损失函数”、“mini-batch”、“梯度”、“梯度下降”等概念。现在回顾一些神经网络的学习步骤:
1. **minibatch**:
从训练数据中**随机**选出一部分数据,这部分数据称为 mini-batch。我们的目标是减小 mini-batch 的损失函数的值。
>在 PyTorch 中,使用 `torch.utils.data` 实现此功能,参考 [TORCH.UTILS.DATA](https://pytorch.org/docs/stable/data.html#multi-process-data-loading)。
>
>在 Tensorflow 中,使用 `tf.data` 实现此功能,参考 [tf.data: Build TensorFlow input pipelines](https://tensorflow.google.cn/guide/data)。
2. **计算梯度**:
为了减小 mini-batch 的损失函数的值,需要求出各个权重参数的梯度。梯度表示损失函数的值减小最多的方向。
3. **更新参数**:
将权重参数 $W$ 沿梯度方向进行微小更新。
4. **重复**:
重复步骤1、步骤2、步骤3。
神经网络的学习大概就是按照上面4个步骤进行。这个方法通过梯度下降法更新参数。由于我们使用的数据是**随机**选择的 mini-batch 数据,所以又称为**随机梯度下降(stochastic gradient descent)**。这就是其名称由来。
在大多数深度学习框架中,随机梯度下降法一般由一个名为 **SGD** 的函数来实现:
- TensorFlow:`tf.keras.optimizers.SGD`。
- PyTorch:`torch.optim.SGD`
实际上,随机梯度下降是通过数值微分实现的,但是缺点是计算上很耗费时间,后续我们会学习**误差反向传播**法,来解决这个问题。
|
github_jupyter
|
# AEJxLPS (Auroral electrojets SECS)
> Abstract: Access to the AEBS products, SECS type. This notebook uses code from the previous notebook to build a routine that is flexible to plot either the LC or SECS products - this demonstrates a prototype quicklook routine.
```
%load_ext watermark
%watermark -i -v -p viresclient,pandas,xarray,matplotlib
from viresclient import SwarmRequest
import datetime as dt
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib as mpl
request = SwarmRequest()
```
## AEBS product information
See previous notebook, "Demo AEBS products (LC)", for an introduction to these products.
### Function to request data from VirES and reshape it
```
def fetch_data(start_time=None, end_time=None, spacecraft=None, AEBS_type="L"):
"""DUPLICATED FROM PREVIOUS NOTEBOOK. TO BE REFACTORED"""
# Fetch data from VirES
auxiliaries = ['OrbitNumber', 'QDLat', 'QDOrbitDirection', 'OrbitDirection', 'MLT']
if AEBS_type == "L":
measurement_vars = ["J_NE"]
elif AEBS_type == "S":
measurement_vars = ["J_CF_NE", "J_DF_NE"]
# Fetch LPL/LPS
request.set_collection(f'SW_OPER_AEJ{spacecraft}LP{AEBS_type}_2F')
request.set_products(
measurements=measurement_vars,
auxiliaries=auxiliaries,
)
data = request.get_between(start_time, end_time, asynchronous=False, show_progress=False)
ds_lp = data.as_xarray()
# Fetch LPL/LPS Quality
request.set_collection(f'SW_OPER_AEJ{spacecraft}LP{AEBS_type}_2F:Quality')
request.set_products(
measurements=['RMS_misfit', 'Confidence'],
)
data = request.get_between(start_time, end_time, asynchronous=False, show_progress=False)
ds_lpq = data.as_xarray()
# Fetch PBL
request.set_collection(f'SW_OPER_AEJ{spacecraft}PB{AEBS_type}_2F')
request.set_products(
measurements=['PointType', 'Flags'],
auxiliaries=auxiliaries
)
data = request.get_between(start_time, end_time, asynchronous=False, show_progress=False)
ds_pb = data.as_xarray()
# Meaning of PointType
PointType_meanings = {
"WEJ_peak": 0, # minimum
"EEJ_peak": 1, # maximum
"WEJ_eq_bound_s": 2, # equatorward (pair start)
"EEJ_eq_bound_s": 3,
"WEJ_po_bound_s": 6, # poleward
"EEJ_po_bound_s": 7,
"WEJ_eq_bound_e": 10, # equatorward (pair end)
"EEJ_eq_bound_e": 11,
"WEJ_po_bound_e": 14, # poleward
"EEJ_po_bound_e": 15,
}
# Add new data variables (boolean Type) according to the dictionary above
ds_pb = ds_pb.assign(
{name: ds_pb["PointType"] == PointType_meanings[name]
for name in PointType_meanings.keys()}
)
# Merge datasets together
def drop_duplicate_times(_ds):
_, index = np.unique(_ds['Timestamp'], return_index=True)
return _ds.isel(Timestamp=index)
def merge_attrs(_ds1, _ds2):
attrs = {"Sources":[], "MagneticModels":[], "RangeFilters":[]}
for item in ["Sources", "MagneticModels", "RangeFilters"]:
attrs[item] = list(set(_ds1.attrs[item] + _ds2.attrs[item]))
return attrs
# Create new dataset from just the newly created PointType arrays
# This is created on a non-repeating Timestamp coordinate
ds = xr.Dataset(
{name: ds_pb[name].where(ds_pb[name], drop=True)
for name in PointType_meanings.keys()}
)
# Merge in the positional and auxiliary data
data_vars = list(set(ds_pb.data_vars).difference(set(PointType_meanings.keys())))
data_vars.remove("PointType")
ds = ds.merge(
(ds_pb[data_vars]
.pipe(drop_duplicate_times))
)
# Merge together with the LPL data
# Note that the Timestamp coordinates aren't equal
# Separately merge data with matching and missing time sample points in ds_lpl
idx_present = list(set(ds["Timestamp"].values).intersection(set(ds_lp["Timestamp"].values)))
idx_missing = list(set(ds["Timestamp"].values).difference(set(ds_lp["Timestamp"].values)))
# Override prioritises the first dataset (ds_lpl) where there are conflicts
ds2 = ds_lp.merge(ds.sel(Timestamp=idx_present), join="outer", compat="override")
ds2 = ds2.merge(ds.sel(Timestamp=idx_missing), join="outer")
# Update the metadata
ds2.attrs = merge_attrs(ds_lp, ds_pb)
# Switch the point type arrays to uint8 or bool for performance?
# But the .where operations later cast them back to float64 since gaps are filled with nan
for name in PointType_meanings.keys():
ds2[name] = ds2[name].astype("uint8").fillna(False)
# ds2[name] = ds2[name].fillna(False).astype(bool)
ds = ds2
# Append the PBL Flags information into the LPL:Quality dataset to use as a lookup table
ds_lpq = ds_lpq.assign(
Flags_PBL=
ds_pb["Flags"]
.pipe(drop_duplicate_times)
.reindex_like(ds_lpq, method="nearest"),
)
return ds, ds_lpq
```
### Plotting function
```
# Bit numbers which indicate non-nominal state
# Check SW-DS-DTU-GS-003_AEBS_PDD for details
BITS_PBL_FLAGS_EEJ_MINOR = (2, 3, 6)
BITS_PBL_FLAGS_WEJ_MINOR = (4, 5, 6)
BITS_PBL_FLAGS_EEJ_BAD = (0, 7, 8, 11)
BITS_PBL_FLAGS_WEJ_BAD = (1, 9, 10, 12)
def check_PBL_Flags(flags=0b0, EJ_type="WEJ"):
"""Return "good", "poor", or "bad" depending on status"""
def _check_bits(bitno_set):
return any(flags & (1 << bitno) for bitno in bitno_set)
if EJ_type == "WEJ":
if _check_bits(BITS_PBL_FLAGS_WEJ_BAD):
return "bad"
elif _check_bits(BITS_PBL_FLAGS_WEJ_MINOR):
return "poor"
else:
return "good"
elif EJ_type == "EEJ":
if _check_bits(BITS_PBL_FLAGS_EEJ_BAD):
return "bad"
elif _check_bits(BITS_PBL_FLAGS_EEJ_MINOR):
return "poor"
else:
return "good"
glyphs = {
"WEJ_peak": {"marker": 'v', "color":'tab:red'}, # minimum
"EEJ_peak": {"marker": '^', "color":'tab:purple'}, # maximum
"WEJ_eq_bound_s": {"marker": '>', "color":'black'}, # equatorward (pair start)
"EEJ_eq_bound_s": {"marker": '>', "color":'black'},
"WEJ_po_bound_s": {"marker": '>', "color":'black'}, # poleward
"EEJ_po_bound_s": {"marker": '>', "color":'black'},
"WEJ_eq_bound_e": {"marker": '<', "color":'black'}, # equatorward (pair end)
"EEJ_eq_bound_e": {"marker": '<', "color":'black'},
"WEJ_po_bound_e": {"marker": '<', "color":'black'}, # poleward
"EEJ_po_bound_e": {"marker": '<', "color":'black'},
}
def plot_stack(ds, ds_lpq, hemisphere="North", x_axis="Latitude", AEBS_type="L"):
# Identify which variable to plot from dataset
# If accessing the SECS (LPS) data, sum the DF & CF parts
if "J_CF_NE" in ds.data_vars:
ds["J_NE"] = ds["J_DF_NE"] + ds["J_CF_NE"]
plotvar = "J_NE"
orbdir = "OrbitDirection" if x_axis=="Latitude" else "QDOrbitDirection"
markersize = 1 if AEBS_type=="S" else 5
# Select hemisphere
if hemisphere == "North":
ds = ds.where(ds["Latitude"]>0, drop=True)
elif hemisphere == "South":
ds = ds.where(ds["Latitude"]<0, drop=True)
# Generate plot with split by columns: ascending/descending to/from pole
# by rows: successive orbits
fig, axes = plt.subplots(
nrows=len(ds.groupby("OrbitNumber")), ncols=2, sharex="col", sharey="all",
figsize=(10, 20)
)
max_ylim = np.max(np.abs(ds[plotvar].sel({"NE": "E"})))
# Loop through each orbit
for i, (_, ds_orbit) in enumerate(ds.groupby("OrbitNumber")):
if hemisphere == "North":
ds_orb_asc = ds_orbit.where(ds_orbit[orbdir] == 1, drop=True)
ds_orb_desc = ds_orbit.where(ds_orbit[orbdir] == -1, drop=True)
if hemisphere == "South":
ds_orb_asc = ds_orbit.where(ds_orbit[orbdir] == -1, drop=True)
ds_orb_desc = ds_orbit.where(ds_orbit[orbdir] == 1, drop=True)
# Loop through ascending and descending sections
for j, _ds in enumerate((ds_orb_asc, ds_orb_desc)):
if len(_ds.Timestamp) == 0:
continue
# Line plot of current strength
axes[i, j].plot(
_ds[x_axis], _ds[plotvar].sel({"NE": "E"}),
color="tab:blue", marker=".", markersize=markersize, linestyle=""
)
axes[i, j].plot(
_ds[x_axis], _ds[plotvar].sel({"NE": "N"}),
color="tab:grey", marker=".", markersize=markersize, linestyle=""
)
# Plot glyphs at the peaks and boundaries locations
for name in glyphs.keys():
__ds = _ds.where(_ds[name], drop=True)
try:
for lat in __ds[x_axis]:
axes[i, j].plot(
lat, 0,
marker=glyphs[name]["marker"], color=glyphs[name]["color"]
)
except Exception:
pass
# Identify Quality and Flags info
# Use either the start time of the section or the end, depending on asc or desc
index = 0 if j == 0 else -1
t = _ds["Timestamp"].isel(Timestamp=index).values
_ds_qualflags = ds_lpq.sel(Timestamp=t, method="nearest")
pbl_flags = int(_ds_qualflags["Flags_PBL"].values)
lpl_rms_misfit = float(_ds_qualflags["RMS_misfit"].values)
lpl_confidence = float(_ds_qualflags["Confidence"].values)
# Shade WEJ and EEJ regions, only if well-defined
# def _shade_EJ_region(_ds=None, EJ="WEJ", color="tab:red", alpha=0.3):
wej_status = check_PBL_Flags(pbl_flags, "WEJ")
eej_status = check_PBL_Flags(pbl_flags, "EEJ")
if wej_status in ["good", "poor"]:
alpha = 0.3 if wej_status == "good" else 0.1
try:
WEJ_left = _ds.where(
(_ds["WEJ_eq_bound_s"] == 1) | (_ds["WEJ_po_bound_s"] == 1), drop=True)
WEJ_right = _ds.where(
(_ds["WEJ_eq_bound_e"] == 1) | (_ds["WEJ_po_bound_e"] == 1), drop=True)
x1 = WEJ_left[x_axis][0]
x2 = WEJ_right[x_axis][0]
axes[i, j].fill_betweenx(
[-max_ylim, max_ylim], [x1, x1], [x2, x2], color="tab:red", alpha=alpha)
except Exception:
pass
if eej_status in ["good", "poor"]:
alpha = 0.3 if eej_status == "good" else 0.15
try:
EEJ_left = _ds.where(
(_ds["EEJ_eq_bound_s"] == 1) | (_ds["EEJ_po_bound_s"] == 1), drop=True)
EEJ_right = _ds.where(
(_ds["EEJ_eq_bound_e"] == 1) | (_ds["EEJ_po_bound_e"] == 1), drop=True)
x1 = EEJ_left[x_axis][0]
x2 = EEJ_right[x_axis][0]
axes[i, j].fill_betweenx(
[-max_ylim, max_ylim], [x1, x1], [x2, x2], color="tab:purple", alpha=alpha)
except Exception:
pass
# Write the LPL:Quality and PBL Flags info
ha = "right" if j == 0 else "left"
textx = 0.98 if j == 0 else 0.02
axes[i, j].text(
textx, 0.95,
f"RMS Misfit {np.round(lpl_rms_misfit, 2)}; Confidence {np.round(lpl_confidence, 2)}",
transform=axes[i, j].transAxes, verticalalignment="top", horizontalalignment=ha
)
axes[i, j].text(
textx, 0.05,
f"PBL Flags {pbl_flags:013b}",
transform=axes[i, j].transAxes, verticalalignment="bottom", horizontalalignment=ha
)
# Write the start/end time and MLT of the section, and the orbit number
def _format_utc(t):
return f"UTC {t.strftime('%H:%M')}"
def _format_mlt(mlt):
hour, fraction = divmod(mlt, 1)
t = dt.time(int(hour), minute=int(60*fraction))
return f"MLT {t.strftime('%H:%M')}"
try:
# Left part (section starting UTC, MLT, OrbitNumber)
time_s = pd.to_datetime(ds_orb_asc["Timestamp"].isel(Timestamp=0).data)
mlt_s = ds_orb_asc["MLT"].dropna(dim="Timestamp").isel(Timestamp=0).data
orbit_number = int(ds_orb_asc["OrbitNumber"].isel(Timestamp=0).data)
axes[i, 0].text(
0.01, 0.95, f"{_format_utc(time_s)}\n{_format_mlt(mlt_s)}",
transform=axes[i, 0].transAxes, verticalalignment="top"
)
axes[i, 0].text(
0.01, 0.05, f"Orbit {orbit_number}",
transform=axes[i, 0].transAxes, verticalalignment="bottom"
)
except Exception:
pass
try:
# Right part (section ending UTC, MLT)
time_e = pd.to_datetime(ds_orb_desc["Timestamp"].isel(Timestamp=-1).data)
mlt_e = ds_orb_desc["MLT"].dropna(dim="Timestamp").isel(Timestamp=-1).data
axes[i, 1].text(
0.99, 0.95, f"{_format_utc(time_e)}\n{_format_mlt(mlt_e)}",
transform=axes[i, 1].transAxes, verticalalignment="top", horizontalalignment="right"
)
except Exception:
pass
# Extra config of axes and figure text
axes[0, 0].set_ylim(-max_ylim, max_ylim)
if hemisphere == "North":
axes[0, 0].set_xlim(50, 90)
axes[0, 1].set_xlim(90, 50)
elif hemisphere == "South":
axes[0, 0].set_xlim(-50, -90)
axes[0, 1].set_xlim(-90, -50)
for ax in axes.flatten():
ax.grid()
axes[-1, 0].set_xlabel(x_axis)
axes[-1, 0].set_ylabel("Horizontal currents\n[ A.km$^{-1}$ ]")
time = pd.to_datetime(ds["Timestamp"].isel(Timestamp=0).data)
spacecraft = ds["Spacecraft"].dropna(dim="Timestamp").isel(Timestamp=0).data
AEBS_type_name = "LC" if AEBS_type == "L" else "SECS"
fig.text(
0.5, 0.9, f"{time.strftime('%Y-%m-%d')}\nSwarm {spacecraft}\n{hemisphere}\nAEBS: {AEBS_type_name}",
transform=fig.transFigure, horizontalalignment="center",
)
fig.subplots_adjust(wspace=0, hspace=0)
return fig, axes
```
### Fetching and plotting function
```
def quicklook(day="2015-01-01", hemisphere="North", spacecraft="A", AEBS_type="L", xaxis="Latitude"):
start_time = dt.datetime.fromisoformat(day)
end_time = start_time + dt.timedelta(days=1)
ds, ds_lpq = fetch_data(start_time, end_time, spacecraft, AEBS_type)
fig, axes = plot_stack(ds, ds_lpq, hemisphere, xaxis, AEBS_type)
return ds, fig, axes
```
Consecutive orbits are shown in consecutive rows, centered over the pole. The starting and ending times (UTC and MLT) of the orbital section are shown at the left and right. Westward (WEJ) and Eastward (EEJ) electrojet extents and peak intensities are indicated:
- Blue dots: Estimated current density in Eastward direction, J_NE (E)
- Grey dots: Estimated current density in Northward direction, J_NE (N)
- Red/Purple shaded region: WEJ/EEJ extent (boundaries marked by black triangles)
- Red/Purple triangles: Locations of peak WEJ/EEJ intensity
Select AEBS_type as S to get SECS results, L to get LC results
SECS = spherical elementary current systems method
LC = Line current method
Notes:
The code is currently quite fragile, so it is broken on some days. Sometimes the electrojet regions are not shaded correctly. Only the horizontal currents are currently shown.
```
quicklook(day="2016-01-01", hemisphere="North", spacecraft="A", AEBS_type="S", xaxis="Latitude");
quicklook(day="2016-01-01", hemisphere="North", spacecraft="A", AEBS_type="L", xaxis="Latitude");
```
|
github_jupyter
|
```
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from torchvision.utils import make_grid
import matplotlib
from matplotlib import pyplot as plt
import seaborn as sns
from IPython import display
import torchsummary as ts
import numpy as np
sns.set()
display.set_matplotlib_formats("svg")
plt.rcParams['font.sans-serif'] = "Liberation Sans"
device = torch.device("cuda")
torch.cuda.is_available()
trans = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = datasets.ImageFolder("dataset/faces/", transform=trans)
data_loader = DataLoader(dataset, batch_size=16, shuffle=True, num_workers=4,
drop_last=True)
images = make_grid(next(iter(data_loader))[0], normalize=True, padding=5, pad_value=1)
plt.imshow(images.permute(1, 2, 0))
plt.axis("off")
plt.grid(False)
def imshow(data):
images = make_grid(data.detach().cpu() , normalize=True, padding=5, pad_value=1)
plt.imshow(images.permute(1, 2, 0))
plt.axis("off")
plt.grid(False)
plt.pause(0.0001)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=2,
padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=6, out_channels=12, kernel_size=4, stride=2,
padding=1),
nn.BatchNorm2d(12),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=12, out_channels=24, kernel_size=4, stride=2,
padding=1),
nn.BatchNorm2d(24),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(24, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, x):
x = self.main(x)
x = x.reshape(-1)
return x
class Generator(nn.Module):
def __init__(self, init_size=100):
super().__init__()
self.expand_dim = nn.Linear(init_size, 1024)
self.init_size = init_size
self.main = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=4,
stride=2, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 12, kernel_size=4,
stride=2, padding=1, bias=False),
nn.BatchNorm2d(12),
nn.ReLU(),
nn.ConvTranspose2d(12, 3, kernel_size=4,
stride=2, padding=1, bias=False),
nn.Tanh()
)
def forward(self, x):
x = self.expand_dim(x).reshape(-1, 64, 4, 4)
x = self.main(x)
return x
netD = Discriminator()
netD(torch.randn(16, 3, 32, 32))
netG = Generator()
netG(torch.randn(16, 100)).shape
BATCH_SIZE = 64
ININT_SIZE = 100
data_loader = DataLoader(dataset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=4, drop_last=True)
Epoch = 1000
D_losses = []
G_losses = []
generator = Generator(ININT_SIZE).to(device)
discirminator = Discriminator().to(device)
generator.apply(weights_init)
discirminator.apply(weights_init)
criterion = nn.BCELoss()
OPTIMIZER_G = optim.Adam(generator.parameters(), lr=4e-4, betas=(0.5, 0.999))
OPTIMIZER_D = optim.Adam(discirminator.parameters(), lr=1e-4, betas=(0.5, 0.999))
pdr, pdf, pg = None, None, None
for epoch in range(1, 1 + Epoch):
dis_temp_loss = []
gen_temp_loss = []
for idx, (d, l) in enumerate(data_loader):
d = d.to(device)
l = l.float().to(device)
out = discirminator(d)
pdr = out.mean().item()
real_loss = criterion(out, torch.ones_like(l))
noise = torch.randn(BATCH_SIZE, ININT_SIZE).to(device)
images = generator(noise)
out = discirminator(images.detach().to(device))
pdf = out.mean().item()
fake_loss = criterion(out, torch.zeros_like(l))
OPTIMIZER_D.zero_grad()
real_loss.backward()
fake_loss.backward()
OPTIMIZER_D.step()
noise = torch.randn(BATCH_SIZE, ININT_SIZE).to(device)
images = generator(noise)
out = discirminator(images)
pg = out.mean().item()
loss = criterion(out, torch.ones_like(l))
OPTIMIZER_G.zero_grad()
loss.backward()
OPTIMIZER_G.step()
d_loss = fake_loss + real_loss
print("Epoch = {:<2} Step[{:3}/{:3}] Dis-Loss = {:.5f} Gen-Loss = {:.5f} acc = {} {} {}"\
.format(epoch, idx + 1, len(data_loader), d_loss.item(),
loss.item(), pdr, pdf, pg))
dis_temp_loss.append(d_loss.item())
gen_temp_loss.append(loss.item())
D_losses.append(np.mean(dis_temp_loss))
G_losses.append(np.mean(gen_temp_loss))
if epoch > 1:
fig, ax = plt.subplots()
ax.plot(np.arange(len(D_losses)) + 1,
D_losses, label="Discriminator", ls="-.")
ax.plot(np.arange(len(G_losses)) + 1,
G_losses, label="Generator", ls="--")
ax.set_xlabel("Epoch")
ax.set_ylabel("Loss")
ax.set_title("GAN Training process")
ax.legend(bbox_to_anchor=[1, 1.02])
plt.pause(0.0001)
imshow(images[:16])
imshow(d[:16])
if epoch % 10 == 0:
display.clear_output()
```
|
github_jupyter
|
```
%matplotlib inline
import matplotlib.pyplot as plt
import torch
from torch import nn as nn
from math import factorial
import random
import torch.nn.functional as F
import numpy as np
import seaborn as sn
import pandas as pd
import os
from os.path import join
import glob
from math import factorial
ttype = torch.cuda.DoubleTensor if torch.cuda.is_available() else torch.DoubleTensor
print(ttype)
from sith import DeepSITH
from tqdm.notebook import tqdm
import pickle
sn.set_context("poster")
sig_lets = ["A","B","C","D","E","F","G","H",]
signals = ttype([[0,1,1,1,0,1,1,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,1,1,1,0,1,0,1,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,1,1,1,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,1,1,1,0,1,0,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,1,0,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,1,1,1,0,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,1,1,1,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0]]
).view(7, 1, 1, -1)
key2id = {k:i for i, k in enumerate(sig_lets)}
print(key2id)
target = ttype([[0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0],
[0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0],
[0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0],
[0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0],
[0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0],
[0,0,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0]]).view(7, -1)
print(target.shape)
signals.shape
def train_model(model,
signals,
target,
optimizer,
loss_func,
train_dur=2.0,
test_durs=[1.5, 2.0, 2.5],
epochs=1500,
loss_buffer_size=50,
testing_every=30):
loss_track = {"loss":[],
"epoch":[],
"acc":[],
"perf":[]}
losses = []
progress_bar = tqdm(range(int(epochs)), ncols=800)
for e in progress_bar:
for i in range(target.shape[1]):
# use target one by one
perm = target[:,i].type(torch.LongTensor)
#print(perm.shape)
# Zero the gradient between each batch
model.zero_grad()
# Present an entire batch to the model
# indexing using -1 at the time dimension,
# only use the latest value
out = model(signals)[:, -1,:]
#print(out.shape)
# Measure loss via CrossEntropyLoss
loss = loss_func(out,
perm)
# Adjust Weights
loss.backward()
optimizer.step()
losses.append(loss.detach().cpu().numpy())
if len(losses) > loss_buffer_size:
losses = losses[1:]
# Record loss, epoch number, batch number in epoch,
# last accuracy measure, etc
loss_track['loss'].append(np.mean(losses))
loss_track['epoch'].append(e)
# calculate model accuracy:
if ((e)%testing_every == 0) & (e != 0):
model.eval()
perf = test_model(model, signals, target)
model.train()
loss_track['perf'].append(perf)
if e > testing_every:
# Update progress_bar
s = "{}: Loss: {:.6f}, Acc:{:.4f}"
format_list = [e, loss_track['loss'][-1]] + [perf]
s = s.format(*format_list)
progress_bar.set_description(s)
if loss_track['perf'][-1] == 1.0:
break
return loss_track
def test_model(model, signals,target):
# Test the Model
out = model(signals)[:, -1, :]
print(out)
pred = torch.argmax(out, dim=-1)
print(pred)
groundTruth = target
perf = 0
return perf
```
# Setup Classifier type model
```
class DeepSITH_Classifier(nn.Module):
def __init__(self, out_features, layer_params, dropout=.5):
super(DeepSITH_Classifier, self).__init__()
last_hidden = layer_params[-1]['hidden_size']
self.hs = DeepSITH(layer_params=layer_params, dropout=dropout)
self.to_out = nn.Linear(last_hidden, out_features)
def forward(self, inp):
x = self.hs(inp)
x = self.to_out(x)
return x
```
# TEST layers for correct taustars/parameters/cvalues
These dictionaries will not be used later.
```
sith_params2 = {"in_features":1,
"tau_min":.1, "tau_max":20.0, 'buff_max':40,
"k":50,
"ntau":5, 'g':0,
"ttype":ttype,
"hidden_size":10, "act_func":nn.ReLU()}
sith_params3 = {"in_features":sith_params2['hidden_size'],
"tau_min":.1, "tau_max":200.0, 'buff_max':240,
"k":50,
"ntau":5, 'g':0,
"ttype":ttype,
"hidden_size":20, "act_func":nn.ReLU()}
layer_params = [sith_params2, sith_params3]
model = DeepSITH_Classifier(out_features=2,
layer_params=layer_params, dropout=.0).double()
print(model)
for i, l in enumerate(model.hs.layers):
print("Layer {}".format(i), l.sith.tau_star)
tot_weights = 0
for p in model.parameters():
tot_weights += p.numel()
print("Total Weights:", tot_weights)
```
# Visualize the taustar buffers
They must all completely empty or there will be edge effects
```
plt.plot(model.hs.layers[0].sith.filters[:, 0, 0, :].detach().cpu().T);
```
# Training and testing
```
# You likely don't need this to be this long, but just in case.
epochs = 500
# Just for visualizing average loss through time.
loss_buffer_size = 100
loss_func = torch.nn.CrossEntropyLoss()
sith_params2 = {"in_features":1,
"tau_min":.1, "tau_max":20.0, 'buff_max':40,
"k":50,
"ntau":10, 'g':0,
"ttype":ttype,
"hidden_size":10, "act_func":nn.ReLU()}
sith_params3 = {"in_features":sith_params2['hidden_size'],
"tau_min":.1, "tau_max":200.0, 'buff_max':240,
"k":50,
"ntau":10, 'g':0,
"ttype":ttype,
"hidden_size":20, "act_func":nn.ReLU()}
layer_params = [sith_params2, sith_params3]
model = DeepSITH_Classifier(out_features=5,
layer_params=layer_params,
dropout=0.).double()
optimizer = torch.optim.Adam(model.parameters())
perf = train_model(model, signals, target,optimizer, loss_func,
epochs=epochs,
loss_buffer_size=loss_buffer_size)
#perfs.append(perf)
with open('filename.dill', 'wb') as handle:
pickle.dump(perf, handle, protocol=pickle.HIGHEST_PROTOCOL)
fig = plt.figure(figsize=(8,10))
ax = fig.add_subplot(2,1,1)
ax.plot(perfs[-1]['loss'])
ax.set_ylabel("Loss")
#ax.set_xlabel("Presentation Number")
ax = fig.add_subplot(2,1,2)
dat = pd.DataFrame(perfs[-1]['perf'])
ax.plot(np.arange(dat.shape[0])*30, dat)
ax.set_ylabel("Classification Acc")
ax.set_xlabel("Presentation Number")
()
plt.savefig(join("figs","DeepSith_training_H8"))
```
|
github_jupyter
|
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
pal = sns.color_palette()
df_train = pd.read_csv('train.csv')
df_train.head()
print('Total number of question pairs for training: {}'.format(len(df_train)))
print('Duplicate pairs: {}%'.format(round(df_train['is_duplicate'].mean()*100, 2)))
qids = pd.Series(df_train['qid1'].tolist() + df_train['qid2'].tolist())
print('Total number of questions in the training data: {}'.format(len(
np.unique(qids))))
print('Number of questions that appear multiple times: {}'.format(np.sum(qids.value_counts() > 1)))
plt.figure(figsize=(12, 5))
plt.hist(qids.value_counts(), bins=50)
plt.yscale('log', nonposy='clip')
plt.title('Log-Histogram of question appearance counts')
plt.xlabel('Number of occurences of question')
plt.ylabel('Number of questions')
print()
df_test = pd.read_csv('test.csv')
df_test.head()
print('Total number of question pairs for testing: {}'.format(len(df_test)))
train_qs = pd.Series(df_train['question1'].tolist() + df_train['question2'].tolist()).astype(str)
test_qs = pd.Series(df_test['question1'].tolist() + df_test['question2'].tolist()).astype(str)
dist_train = train_qs.apply(len)
dist_test = test_qs.apply(len)
plt.figure(figsize=(15, 10))
plt.hist(dist_train, bins=200, range=[0, 200], color=pal[2], normed=True, label='train')
plt.hist(dist_test, bins=200, range=[0, 200], color=pal[1], normed=True, alpha=0.5, label='test')
plt.title('Normalised histogram of character count in questions', fontsize=15)
plt.legend()
plt.xlabel('Number of characters', fontsize=15)
plt.ylabel('Probability', fontsize=15)
print('mean-train {:.2f} std-train {:.2f} mean-test {:.2f} std-test {:.2f} max-train {:.2f} max-test {:.2f}'.format(dist_train.mean(),
dist_train.std(), dist_test.mean(), dist_test.std(), dist_train.max(), dist_test.max()))
```
We can see that most questions have anywhere from 15 to 150 characters in them. It seems that the test distribution is a little different from the train one, but not too much so.
```
dist_train = train_qs.apply(lambda x: len(x.split(' ')))
dist_test = test_qs.apply(lambda x: len(x.split(' ')))
plt.figure(figsize=(15, 10))
plt.hist(dist_train, bins=50, range=[0, 50], color=pal[2], normed=True, label='train')
plt.hist(dist_test, bins=50, range=[0, 50], color=pal[1], normed=True, alpha=0.5, label='test')
plt.title('Normalised histogram of word count in questions', fontsize=15)
plt.legend()
plt.xlabel('Number of words', fontsize=15)
plt.ylabel('Probability', fontsize=15)
print('mean-train {:.2f} std-train {:.2f} mean-test {:.2f} std-test {:.2f} max-train {:.2f} max-test {:.2f}'.format(dist_train.mean(),
dist_train.std(), dist_test.mean(), dist_test.std(), dist_train.max(), dist_test.max()))
```
### WordCloud
```
from wordcloud import WordCloud
cloud = WordCloud(width=1440, height=1080).generate(" ".join(train_qs.astype(str)))
plt.figure(figsize=(20, 15))
plt.imshow(cloud)
plt.axis('off')
```
## Semantic Analysis
```
qmarks = np.mean(train_qs.apply(lambda x: '?' in x))
math = np.mean(train_qs.apply(lambda x: '[math]' in x))
fullstop = np.mean(train_qs.apply(lambda x: '.' in x))
capital_first = np.mean(train_qs.apply(lambda x: x[0].isupper()))
capitals = np.mean(train_qs.apply(lambda x: max([y.isupper() for y in x])))
numbers = np.mean(train_qs.apply(lambda x: max([y.isdigit() for y in x])))
print('Questions with question marks: {:.2f}%'.format(qmarks * 100))
print('Questions with [math] tags: {:.2f}%'.format(math * 100))
print('Questions with full stops: {:.2f}%'.format(fullstop * 100))
print('Questions with capitalised first letters: {:.2f}%'.format(capital_first * 100))
print('Questions with capital letters: {:.2f}%'.format(capitals * 100))
print('Questions with numbers: {:.2f}%'.format(numbers * 100))
```
# Initial Feature Analysis
Before we create a model, we should take a look at how powerful some features are. I will start off with the word share feature from the benchmark model.
```
from nltk.corpus import stopwords
stops = set(stopwords.words("english"))
def word_match_share(row):
q1words = {}
q2words = {}
for word in str(row['question1']).lower().split():
if word not in stops:
q1words[word] = 1
for word in str(row['question2']).lower().split():
if word not in stops:
q2words[word] = 1
if len(q1words) == 0 or len(q2words) == 0:
# The computer-generated chaff includes a few questions that are nothing but stopwords
return 0
shared_words_in_q1 = [w for w in q1words.keys() if w in q2words]
shared_words_in_q2 = [w for w in q2words.keys() if w in q1words]
R = (len(shared_words_in_q1) + len(shared_words_in_q2))/(len(q1words) + len(q2words))
return R
plt.figure(figsize=(15, 5))
train_word_match = df_train.apply(word_match_share, axis=1, raw=True)
plt.hist(train_word_match[df_train['is_duplicate'] == 0], bins=20, normed=True, label='Not Duplicate')
plt.hist(train_word_match[df_train['is_duplicate'] == 1], bins=20, normed=True, alpha=0.7, label='Duplicate')
plt.legend()
plt.title('Label distribution over word_match_share', fontsize=15)
plt.xlabel('word_match_share', fontsize=15)
from collections import Counter
# If a word appears only once, we ignore it completely (likely a typo)
# Epsilon defines a smoothing constant, which makes the effect of extremely rare words smaller
def get_weight(count, eps=10000, min_count=2):
if count < min_count:
return 0
else:
return 1 / (count + eps)
eps = 5000
words = (" ".join(train_qs)).lower().split()
counts = Counter(words)
weights = {word: get_weight(count) for word, count in counts.items()}
print('Most common words and weights: \n')
print(sorted(weights.items(), key=lambda x: x[1] if x[1] > 0 else 9999)[:10])
print('\nLeast common words and weights: ')
(sorted(weights.items(), key=lambda x: x[1], reverse=True)[:10])
def tfidf_word_match_share(row):
q1words = {}
q2words = {}
for word in str(row['question1']).lower().split():
if word not in stops:
q1words[word] = 1
for word in str(row['question2']).lower().split():
if word not in stops:
q2words[word] = 1
if len(q1words) == 0 or len(q2words) == 0:
# The computer-generated chaff includes a few questions that are nothing but stopwords
return 0
shared_weights = [weights.get(w, 0) for w in q1words.keys() if w in q2words] + [weights.get(w, 0) for w in q2words.keys() if w in q1words]
total_weights = [weights.get(w, 0) for w in q1words] + [weights.get(w, 0) for w in q2words]
R = np.sum(shared_weights) / np.sum(total_weights)
return R
plt.figure(figsize=(15, 5))
tfidf_train_word_match = df_train.apply(tfidf_word_match_share, axis=1, raw=True)
plt.hist(tfidf_train_word_match[df_train['is_duplicate'] == 0].fillna(0), bins=20, normed=True, label='Not Duplicate')
plt.hist(tfidf_train_word_match[df_train['is_duplicate'] == 1].fillna(0), bins=20, normed=True, alpha=0.7, label='Duplicate')
plt.legend()
plt.title('Label distribution over tfidf_word_match_share', fontsize=15)
plt.xlabel('word_match_share', fontsize=15)
from sklearn.metrics import roc_auc_score
print('Original AUC:', roc_auc_score(df_train['is_duplicate'], train_word_match))
print(' TFIDF AUC:', roc_auc_score(df_train['is_duplicate'], tfidf_train_word_match.fillna(0)))
```
## Rebalancing the Data
However, before I do this, I would like to rebalance the data that XGBoost receives, since we have 37% positive class in our training data, and only 17% in the test data. By re-balancing the data so our training set has 17% positives, we can ensure that XGBoost outputs probabilities that will better match the data, and should get a better score (since LogLoss looks at the probabilities themselves and not just the order of the predictions like AUC)
```
# First we create our training and testing data
x_train = pd.DataFrame()
x_test = pd.DataFrame()
x_train['word_match'] = train_word_match
x_train['tfidf_word_match'] = tfidf_train_word_match
x_test['word_match'] = df_test.apply(word_match_share, axis=1, raw=True)
x_test['tfidf_word_match'] = df_test.apply(tfidf_word_match_share, axis=1, raw=True)
y_train = df_train['is_duplicate'].values
pos_train = x_train[y_train == 1]
neg_train = x_train[y_train == 0]
# Now we oversample the negative class
# There is likely a much more elegant way to do this...
p = 0.165
scale = ((len(pos_train) / (len(pos_train) + len(neg_train))) / p) - 1
while scale > 1:
neg_train = pd.concat([neg_train, neg_train])
scale -=1
neg_train = pd.concat([neg_train, neg_train[:int(scale * len(neg_train))]])
print(len(pos_train) / (len(pos_train) + len(neg_train)))
x_train = pd.concat([pos_train, neg_train])
y_train = (np.zeros(len(pos_train)) + 1).tolist() + np.zeros(len(neg_train)).tolist()
del pos_train, neg_train
# Finally, we split some of the data off for validation
from sklearn.model_selection import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.2, random_state=4242)
```
## XGBoost
```
import xgboost as xgb
# Set our parameters for xgboost
params = {}
params['objective'] = 'binary:logistic'
params['eval_metric'] = 'logloss'
params['eta'] = 0.02
params['max_depth'] = 4
d_train = xgb.DMatrix(x_train, label=y_train)
d_valid = xgb.DMatrix(x_valid, label=y_valid)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
bst = xgb.train(params, d_train, 400, watchlist, early_stopping_rounds=50, verbose_eval=10)
d_test = xgb.DMatrix(x_test)
p_test = bst.predict(d_test)
sub = pd.DataFrame()
sub['test_id'] = df_test['test_id']
sub['is_duplicate'] = p_test
sub.to_csv('simple_xgb.csv', index=False)
sub.head()
def logloss(ptest):
s = 0
for res in ptest:
s+=np.log(res)
return -s
print(logloss(p_test)/len(p_test))
```
|
github_jupyter
|
```
# default_exp models.OmniScaleCNN
```
# OmniScaleCNN
> This is an unofficial PyTorch implementation by Ignacio Oguiza - [email protected] based on:
* Rußwurm, M., & Körner, M. (2019). Self-attention for raw optical satellite time series classification. arXiv preprint arXiv:1910.10536.
* Official implementation: https://github.com/dl4sits/BreizhCrops/blob/master/breizhcrops/models/OmniScaleCNN.py
```
#export
from tsai.imports import *
from tsai.models.layers import *
from tsai.models.utils import *
#export
#This is an unofficial PyTorch implementation by Ignacio Oguiza - [email protected] based on:
# Rußwurm, M., & Körner, M. (2019). Self-attention for raw optical satellite time series classification. arXiv preprint arXiv:1910.10536.
# Official implementation: https://github.com/dl4sits/BreizhCrops/blob/master/breizhcrops/models/OmniScaleCNN.py
class SampaddingConv1D_BN(Module):
def __init__(self, in_channels, out_channels, kernel_size):
self.padding = nn.ConstantPad1d((int((kernel_size - 1) / 2), int(kernel_size / 2)), 0)
self.conv1d = torch.nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size)
self.bn = nn.BatchNorm1d(num_features=out_channels)
def forward(self, x):
x = self.padding(x)
x = self.conv1d(x)
x = self.bn(x)
return x
class build_layer_with_layer_parameter(Module):
"""
formerly build_layer_with_layer_parameter
"""
def __init__(self, layer_parameters):
"""
layer_parameters format
[in_channels, out_channels, kernel_size,
in_channels, out_channels, kernel_size,
..., nlayers
]
"""
self.conv_list = nn.ModuleList()
for i in layer_parameters:
# in_channels, out_channels, kernel_size
conv = SampaddingConv1D_BN(i[0], i[1], i[2])
self.conv_list.append(conv)
def forward(self, x):
conv_result_list = []
for conv in self.conv_list:
conv_result = conv(x)
conv_result_list.append(conv_result)
result = F.relu(torch.cat(tuple(conv_result_list), 1))
return result
class OmniScaleCNN(Module):
def __init__(self, c_in, c_out, seq_len, layers=[8 * 128, 5 * 128 * 256 + 2 * 256 * 128], few_shot=False):
receptive_field_shape = seq_len//4
layer_parameter_list = generate_layer_parameter_list(1,receptive_field_shape, layers, in_channel=c_in)
self.few_shot = few_shot
self.layer_parameter_list = layer_parameter_list
self.layer_list = []
for i in range(len(layer_parameter_list)):
layer = build_layer_with_layer_parameter(layer_parameter_list[i])
self.layer_list.append(layer)
self.net = nn.Sequential(*self.layer_list)
self.gap = GAP1d(1)
out_put_channel_number = 0
for final_layer_parameters in layer_parameter_list[-1]:
out_put_channel_number = out_put_channel_number + final_layer_parameters[1]
self.hidden = nn.Linear(out_put_channel_number, c_out)
def forward(self, x):
x = self.net(x)
x = self.gap(x)
if not self.few_shot: x = self.hidden(x)
return x
def get_Prime_number_in_a_range(start, end):
Prime_list = []
for val in range(start, end + 1):
prime_or_not = True
for n in range(2, val):
if (val % n) == 0:
prime_or_not = False
break
if prime_or_not:
Prime_list.append(val)
return Prime_list
def get_out_channel_number(paramenter_layer, in_channel, prime_list):
out_channel_expect = max(1, int(paramenter_layer / (in_channel * sum(prime_list))))
return out_channel_expect
def generate_layer_parameter_list(start, end, layers, in_channel=1):
prime_list = get_Prime_number_in_a_range(start, end)
layer_parameter_list = []
for paramenter_number_of_layer in layers:
out_channel = get_out_channel_number(paramenter_number_of_layer, in_channel, prime_list)
tuples_in_layer = []
for prime in prime_list:
tuples_in_layer.append((in_channel, out_channel, prime))
in_channel = len(prime_list) * out_channel
layer_parameter_list.append(tuples_in_layer)
tuples_in_layer_last = []
first_out_channel = len(prime_list) * get_out_channel_number(layers[0], 1, prime_list)
tuples_in_layer_last.append((in_channel, first_out_channel, 1))
tuples_in_layer_last.append((in_channel, first_out_channel, 2))
layer_parameter_list.append(tuples_in_layer_last)
return layer_parameter_list
bs = 16
c_in = 3
seq_len = 12
c_out = 2
xb = torch.rand(bs, c_in, seq_len)
m = create_model(OmniScaleCNN, c_in, c_out, seq_len)
test_eq(OmniScaleCNN(c_in, c_out, seq_len)(xb).shape, [bs, c_out])
m
#hide
from tsai.imports import *
from tsai.export import *
nb_name = get_nb_name()
# nb_name = "109_models.OmniScaleCNN.ipynb"
create_scripts(nb_name);
```
|
github_jupyter
|
# Natural Language Processing - Unsupervised Topic Modeling with Reddit Posts
###### This project dives into multiple techniques used for NLP and subtopics such as dimensionality reduction, topic modeling, and clustering.
1. [Google BigQuery](#Google-BigQuery)
1. [Exploratory Data Analysis (EDA) & Preprocessing](#Exploratory-Data-Analysis-&-Preprocessing)
1. [Singular Value Decomposition (SVD)](#Singular-Value-Decomposition-(SVD))
1. [Latent Semantic Analysis (LSA - applied SVD)](#Latent-Semantic-Analysis-(LSA))
1. [Similarity Scoring Metrics](#sim)
1. [KMeans Clustering](#km)
1. [Latent Dirichlet Allocation (LDA)](#lda)
1. [pyLDAvis - interactive d3 for LDA](#py)
- This was separated out in a new notebook to quickly view visual (load files and see visualization)
```
# Easter Egg to start your imports
#import this
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import logging
import pickle
import sys
import os
from google.cloud import bigquery
import warnings
def warn(*args, **kwargs):
pass
warnings.warn = warn
# Logging is the verbose for Gensim
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#plt.style.available # Style options
plt.style.use('fivethirtyeight')
sns.set_context("talk")
%matplotlib inline
pd.options.display.max_rows = 99
pd.options.display.max_columns = 99
pd.options.display.max_colwidth = 99
#pd.describe_option('display') # Option settings
float_formatter = lambda x: "%.3f" % x if x>0 else "%.0f" % x
np.set_printoptions(formatter={'float_kind':float_formatter})
pd.set_option('display.float_format', float_formatter)
```
## Google BigQuery
```
%%time
path = "data/posts.pkl"
key = 'fakeKey38i7-4259.json'
if not os.path.isdir('data/'):
os.makedirs('data/')
# Set GOOGLE_APPLICATION_CREDENTIALS before querying
def bigQuery(QUERY, key=key):
"""
Instantiates a client using a key,
Requests a SQL query from the Big Query API,
Returns the queried table as a DataFrame
"""
client = bigquery.Client.from_service_account_json(key)
job_config = bigquery.QueryJobConfig()
job_config.use_legacy_sql = False
query_job = client.query(QUERY, job_config=job_config)
return query_job.result().to_dataframe()
# SQL query for Google BigQuery
QUERY = (
"""
SELECT created_utc, subreddit, author, domain, url, num_comments,
score, title, selftext, id, gilded, retrieved_on, over_18
FROM `fh-bigquery.reddit_posts.*`
WHERE _table_suffix IN ( '2016_06' )
AND LENGTH(selftext) > 550
AND LENGTH(title) > 15
AND LENGTH(title) < 345
AND score > 8
AND is_self = true
AND NOT subreddit IS NULL
AND NOT subreddit = 'de'
AND NOT subreddit = 'test'
AND NOT subreddit = 'tr'
AND NOT subreddit = 'A6XHE'
AND NOT subreddit = 'es'
AND NOT subreddit = 'removalbot'
AND NOT subreddit = 'tldr'
AND NOT selftext LIKE '[removed]'
AND NOT selftext LIKE '[deleted]'
;""")
#df = bigQuery(QUERY)
#df.to_pickle(path)
df = pd.read_pickle(path)
df.info(memory_usage='Deep')
```
## Exploratory Data Analysis & Preprocessing
```
# Exploring data by length of .title or .selftext
df[[ True if 500 < len(x) < 800 else False for x in df.selftext ]].sample(1, replace=False)
%%time
run = False
path = '/home/User/data/gif'
# Run through various selftext lengths and save the plots of the distribution of the metric
# Gif visual after piecing all the frames together
while run==True:
for i in range(500,20000,769):
tempath = os.path.join(path, f"textlen{i}.png") # PEP498 requires python 3.6
print(tempath)
# Look at histogram of posts with len<i
cuts = [len(x) for x in df.selftext if len(x)<i]
# Save plot
plt.figure()
plt.hist(cuts, bins=30) #can change bins based on function of i
plt.savefig(tempath, dpi=120, format='png', bbox_inches='tight', pad_inches=0.1)
plt.close()
# Bin Settings
def binSize(lower, upper, buffer=.05):
bins = upper - lower
buffer = int(buffer*bins)
bins -= buffer
print('Lower Bound:', lower)
print('Upper Bound:', upper)
return bins, lower, upper
# Plotting
def plotHist(tmp, bins, title, xlabel, ylabel, l, u):
plt.figure(figsize=(10,6))
plt.hist(tmp, bins=bins)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim(lower + l, upper + u)
print('\nLocal Max %s:' % xlabel, max(tmp))
print('Local Average %s:' % xlabel, int(np.mean(tmp)))
print('Local Median %s:' % xlabel, int(np.median(tmp)))
# Create the correct bin size
bins, lower, upper = binSize(lower=0, upper=175)
# Plot distribution of lower scores
tmp = df[[ True if lower <= x <= upper else False for x in df['score'] ]]['score']
plotHist(tmp=tmp, bins=bins, title='Lower Post Scores', xlabel='Scoring', ylabel='Frequency', l=5, u=5);
# Titles should be less than 300 charcters
# Outliers are due to unicode translation
# Plot lengths of titles
tmp = [ len(x) for x in df.title ]
bins, lower, upper = binSize(lower=0, upper=300, buffer=-.09)
plotHist(tmp=tmp, bins=bins, title='Lengths of Titles', xlabel='Length', ylabel='Frequency', l=10, u=0);
# Slice lengths of texts and plot histogram
bins, lower, upper = binSize(lower=500, upper=5000, buffer=.011)
tmp = [len(x) for x in df.selftext if lower <= len(x) <= upper]
plotHist(tmp=tmp, bins=bins, title='Length of Self Posts Under 5k', xlabel='Length', ylabel='Frequency', l=10, u=0)
plt.ylim(0, 200);
# Anomalies could be attributed to bots or duplicate reposts
# Posts per Subreddit
tmp = df.groupby('subreddit')['id'].nunique().sort_values(ascending=False)
top = 100
s = sum(tmp)
print('Subreddits:', len(tmp))
print('Total Posts:', s)
print('Total Posts from Top %s:' % top, sum(tmp[:top]), ', %.3f of Total' % (sum(tmp[:top])/s))
print('Total Posts from Top 10:', sum(tmp[:10]), ', %.3f of Total' % (sum(tmp[:10])/s))
print('\nTop 10 Contributors:', tmp[:10])
plt.figure(figsize=(10,6))
plt.plot(tmp, 'go')
plt.xticks('')
plt.title('Top %s Subreddit Post Counts' % top)
plt.xlabel('Subreddits, Ranked')
plt.ylabel('Post Count')
plt.xlim(-2, top+1)
plt.ylim(0, 2650);
path1 = 'data/origin.pkl'
#path2 = 'data/grouped.pkl'
# Save important data
origin_df = df.loc[:,['created_utc', 'subreddit', 'author', 'title', 'selftext', 'id']] \
.copy().reset_index().rename(columns={"index": "position"})
print(origin_df.info())
origin_df.to_pickle(path1)
posts_df = origin_df.loc[:,['title', 'selftext']]
posts_df['text'] = posts_df.title + ' ' + df.selftext
#del origin_df
# To group the results later
def groupUserPosts(x):
''' Group users' id's by post '''
return pd.Series(dict(ids = ", ".join(x['id']),
text = ", ".join(x['text'])))
###df = posts_df.groupby('author').apply(groupUserPosts)
#df.to_pickle(path2)
df = posts_df.text.to_frame()
origin_df.sample(2).drop('author', axis=1)
%%time
def clean_text(df, text_field):
'''
Clean all the text data within a certain text column of the dataFrame.
'''
df[text_field] = df[text_field].str.replace(r"http\S+", " ")
df[text_field] = df[text_field].str.replace(r"&[a-z]{2,4};", "")
df[text_field] = df[text_field].str.replace("\\n", " ")
df[text_field] = df[text_field].str.replace(r"#f", "")
df[text_field] = df[text_field].str.replace(r"[\’\'\`\":]", "")
df[text_field] = df[text_field].str.replace(r"[^A-Za-z0-9]", " ")
df[text_field] = df[text_field].str.replace(r" +", " ")
df[text_field] = df[text_field].str.lower()
clean_text(df, 'text')
df.sample(3)
# For exploration of users
df[origin_df.author == '<Redacted>'][:3]
# User is a post summarizer and aggregator, added /r/tldr to the blocked list!
# Slice lengths of texts and plot histogram
bins, lower, upper = binSize(lower=500, upper=5000, buffer=.015)
tmp = [len(x) for x in df.text if lower <= len(x) <= upper]
plotHist(tmp=tmp, bins=bins, title='Cleaned - Length of Self Posts Under 5k',
xlabel='Lengths', ylabel='Frequency', l=0, u=0)
plt.ylim(0, 185);
# Download everything for nltk! ('all')
import nltk
nltk.download() # (Change config save path)
nltk.data.path.append('/home/User/data/')
from nltk.corpus import stopwords
# "stopeng" is our extended list of stopwords for use in the CountVectorizer
# I could spend days extending this list for fine tuning results
stopeng = stopwords.words('english')
stopeng.extend([x.replace("\'", "") for x in stopeng])
stopeng.extend(['nbsp', 'also', 'really', 'ive', 'even', 'jon', 'lot', 'could', 'many'])
stopeng = list(set(stopeng))
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# Count vectorization for LDA
cv = CountVectorizer(token_pattern='\\w{3,}', max_df=.30, min_df=.0001,
stop_words=stopeng, ngram_range=(1,1), lowercase=False,
dtype='uint8')
# Vectorizer object to generate term frequency-inverse document frequency matrix
tfidf = TfidfVectorizer(token_pattern='\\w{3,}', max_df=.30, min_df=.0001,
stop_words=stopeng, ngram_range=(1,1), lowercase=False,
sublinear_tf=True, smooth_idf=False, dtype='float32')
```
###### Tokenization is one of the most important steps in NLP, I will explain some of my parameter choices in the README. CountVectorizer was my preferred choice. I used these definitions to help me in the iterative process of building an unsupervised model.
###### The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus.
###### Smooth = False: The effect of adding “1” to the idf in the equation above is that terms with zero idf, i.e., terms that occur in all documents in a training set, will not be entirely ignored.
###### sublinear_tf = True: “l” (logarithmic), replaces tf with 1 + log(tf)
```
%%time
# Count & tf-idf vectorizer fits the tokenizer and transforms data into new matrix
cv_vecs = cv.fit_transform(df.text).transpose()
tf_vecs = tfidf.fit_transform(df.text).transpose()
pickle.dump(cv_vecs, open('data/cv_vecs.pkl', 'wb'))
# Checking the shape and size of the count vectorizer transformed matrix
# 47,317 terms
# 146996 documents
print("Sparse Shape:", cv_vecs.shape)
print('CV:', sys.getsizeof(cv_vecs))
print('Tf-Idf:', sys.getsizeof(tf_vecs))
# IFF using a subset can you store these in a Pandas DataFrame/
#tfidf_df = pd.DataFrame(tf_vecs.transpose().todense(), columns=[tfidf.get_feature_names()]).astype('float32')
#cv_df = pd.DataFrame(cv_vecs.transpose().todense(), columns=[cv.get_feature_names()]).astype('uint8')
#print(cv_df.info())
#print(tfidf_df.info())
#cv_description = cv_df.describe().T
#tfidf_description = tfidf_df.describe().T
#tfidf_df.sum().sort_values(ascending=False)
# Explore the document-term vectors
#cv_description.sort_values(by='max', ascending=False)
#tfidf_description.sort_values(by='mean', ascending=False)
```
## Singular Value Decomposition (SVD)
```
#from sklearn.utils.extmath import randomized_svd
# Randomized SVD for extracting the full decomposition
#U, Sigma, VT = randomized_svd(tf_vecs, n_components=8, random_state=42)
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
def Trunc_SVD(vectorized, n_components=300, iterations=1, normalize=False, random_state=42):
"""
Performs LSA/LSI on a sparse document term matrix, returns a fitted, transformed, (normalized) LSA object
"""
# Already own the vectorized data for LSA, just transpose it back to normal
vecs_lsa = vectorized.T
# Initialize SVD object as LSA
lsa = TruncatedSVD(n_components=n_components, n_iter=iterations, algorithm='randomized', random_state=random_state)
dtm_lsa = lsa.fit(vecs_lsa)
print("Explained Variance - LSA {}:".format(n_components), dtm_lsa.explained_variance_ratio_.sum())
if normalize:
dtm_lsa_t = lsa.fit_transform(vecs_lsa)
dtm_lsa_t = Normalizer(copy=False).fit_transform(dtm_lsa_t)
return dtm_lsa, dtm_lsa_t
return dtm_lsa
def plot_SVD(lsa, title, level=None):
"""
Plots the singular values of an LSA object
"""
plt.figure(num=1, figsize=(15,10))
plt.suptitle(title, fontsize=22, x=.55, y=.45, horizontalalignment='left')
plt.subplot(221)
plt.title('Explained Variance by each Singular Value')
plt.plot(lsa.explained_variance_[:level])
plt.subplot(222)
plt.title('Explained Variance Ratio by each Singular Value')
plt.plot(lsa.explained_variance_ratio_[:level])
plt.subplot(223)
plt.title("Singular Values ('Components')")
plt.plot(lsa.singular_values_[:level])
plt.show()
%%time
components = 350
cv_dtm_lsa = Trunc_SVD(cv_vecs, n_components=components, iterations=5, normalize=False)
plot_SVD(cv_dtm_lsa, title='Count Vectorizer', level=25)
tf_dtm_lsa = Trunc_SVD(tf_vecs, n_components=components, iterations=5, normalize=False)
plot_SVD(tf_dtm_lsa, title='Term Frequency - \nInverse Document Frequency', level=25)
# Numerically confirming the elbow in the above plot
print('SVD Value| CV | TFIDF')
print('Top 2: ',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:2])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:2])),3))
print('Top 3: ',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:3])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:3])),3))
print('Top 4: ',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:4])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:4])),3))
print('Top 5: ',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:5])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:5])),3))
print('Top 6: ',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:6])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:6])),3))
print('Top 7: ',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:7])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:7])),3))
print('Top 8: ',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:8])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:8])),3))
print('Top 16:\t',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:16])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:16])),3))
print('Top 32:\t',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:32])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:32])),3))
print('Top 64:\t',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:64])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:64])),3))
print('Top 128:',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:128])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:128])),3))
print('Top 256:',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:256])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:256])),3))
print('Top 350:',round(sum(list(cv_dtm_lsa.explained_variance_ratio_[:350])),3),round(sum(list(tf_dtm_lsa.explained_variance_ratio_[:350])),3))
# Close look at the elbow plots
def elbow(dtm_lsa):
evr = dtm_lsa.explained_variance_ratio_[:20]
print("Explained Variance Ratio (EVR):\n", evr)
print("Difference in EVR (start 3):\n", np.diff(evr[2:]))
plt.figure()
plt.plot(-np.diff(evr[2:]))
plt.xticks(range(-1,22), range(2,20))
plt.suptitle('Difference in Explained Variance Ratio', fontsize=15);
plt.title('Start from 3, moves up to 20');
# Count Vectorizer
elbow(cv_dtm_lsa)
# Tf-Idf
elbow(tf_dtm_lsa)
```
###### The count vectorizer seems like it will be more fool proof, so I will use cv for my study. 8 might be a good cutoff value for the number of components kept in dimensionality reduction, I will try to confirm this later with KMeans clustering. The intuition behind this is that the slope after the 8th element is significantly different from the first elements. Keeping just 2 components would not be sufficient enough for clustering because we want to retain as much information as we can while still cutting down the dimensions to find some kind of human readable latent concept space.
###### I am going to try out 2 quick methods before clustering and moving onto my main goal of topic modeling with LDA.
## Latent Semantic Analysis (LSA)
```
%%time
from gensim import corpora, matutils, models
# Convert sparse matrix of term-doc counts to a gensim corpus
cv_corpus = matutils.Sparse2Corpus(cv_vecs)
pickle.dump(cv_corpus, open('data/cv_corpus.pkl', 'wb'))
# Maps index to term
id2word = dict((v, k) for k, v in cv.vocabulary_.items())
# This is for Python 3, Need this for something at the end
id2word = corpora.Dictionary.from_corpus(cv_corpus, id2word=id2word)
pickle.dump(lda, open('data/id2word.pkl', 'wb'))
# Fitting an LSI model
lsi = models.LsiModel(corpus=cv_corpus, id2word=id2word, num_topics=10)
%%time
# Retrieve vectors for the original cv corpus in the LS space ("transform" in sklearn)
lsi_corpus = lsi[cv_corpus]
# Dump the resulting document vectors into a list
doc_vecs = [doc for doc in lsi_corpus]
doc_vecs[0][:5]
# Sum of a documents' topics (?)
for i in range(5):
print(sum(doc_vecs[i][1]))
```
## <a id='sim'></a> Similarity Scoring
```
from gensim import similarities
# Create an index transformer that calculates similarity based on our space
index = similarities.MatrixSimilarity(doc_vecs, num_features=300)
# Return the sorted list of cosine similarities to the docu document
docu = 5 # Change docu as needed
sims = sorted(enumerate(index[doc_vecs[docu]]), key=lambda item: -item[1])
np.r_[sims[:10] , sims[-10:]]
# Viewing similarity of top documents
top = 1
for sim_doc_id, sim_score in sims[:top + 1]:
print("\nScore:", sim_score)
print("Document Text:\n", df.text[sim_doc_id])
```
###### The metrics look artifically high, and do not match well for each document. The similarity method could be used to optimize keyword search if we were trying to expand the reach of a certain demographic using these rankings. The next step would be to improve on this method with word2vec or a better LSI model.
# <a id='km'></a>KMeans Clustering
```
lsi_red = matutils.corpus2dense(lsi_corpus, num_terms=300).transpose()
print('Reduced LS space shape:', lsi_red.shape)
print('Reduced LS space size in bytes:', sys.getsizeof(lsi_red))
# Taking a subset for Kmeans due to memory dropout
lsi_red_sub = lsi_red.copy()
np.random.shuffle(lsi_red_sub)
lsi_red_sub = lsi_red_sub[:30000]
lsi_red_sub = Normalizer(copy=False).fit_transform(lsi_red_sub) # Normalized for the Euclidean metric
print('Reduced LS space subset shape:', lsi_red_sub.shape)
print('Reduced LS space subset size in bytes:', sys.getsizeof(lsi_red_sub))
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
# Calculating Silhouette coefficients and Sum of Squared Errors
def silhouette_co(start, stop, lsi_red_sub, random_state=42, n_jobs=-2, verbose=4):
"""
Input a normalized subset of a reduced dense latent semantic matrix
Returns list of scores for plotting
"""
SSEs = []
Sil_coefs = []
try_clusters = range(start, stop)
for k in try_clusters:
km = KMeans(n_clusters=k, random_state=random_state, n_jobs=n_jobs)
km.fit(lsi_red_sub)
labels = km.labels_
Sil_coefs.append(silhouette_score(lsi_red_sub, labels, metric='euclidean'))
SSEs.append(km.inertia_)
if k%verbose==0:
print(k)
return SSEs, Sil_coefs, try_clusters
%%time
SSEs, Sil_coefs, try_clusters = silhouette_co(start=2, stop=40, lsi_red_sub=lsi_red_sub)
def plot_sil(try_clusters, Sil_coefs, SSEs):
""" Function for visualizing/ finding the best clustering point """
# Plot Silhouette scores
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(15,5), sharex=True, dpi=200)
ax1.plot(try_clusters, Sil_coefs)
ax1.title('Silhouette of Clusters')
ax1.set_xlabel('Number of Clusters')
ax1.set_ylabel('Silhouette Coefficient')
# Plot errors
ax2.plot(try_clusters, SSEs)
ax2.title("Cluster's Error")
ax2.set_xlabel('Number of Clusters')
ax2.set_ylabel('SSE');
plot_sil(try_clusters=try_clusters, Sil_coefs=Sil_coefs, SSEs=SSes)
```
###### This suggests that there arent meaningful clusters in the normalized LS300 space. To test if 300 dimensions is too large, I will try clustering again with a reduced input.
```
# Fix IndexError: index 10
lsi_red5 = matutils.corpus2dense(lsi_corpus, num_terms=10).transpose()
print('Reduced LSI space shape:', lsi_red5.shape)
print('Reduced LS space subset size in bytes:', sys.getsizeof(lsi_red5))
# Taking a subset for Kmeans due to memory dropout
lsi_red_sub5 = lsi_red5.copy()
np.random.shuffle(lsi_red_sub5)
lsi_red_sub5 = lsi_red_sub5[:5000]
lsi_red_sub5 = Normalizer(copy=False).fit_transform(lsi_red_sub5) # Normalized for the Euclidean metric
print('Reduced LSI space subset shape:', lsi_red_sub5.shape)
print('Reduced LS space subset size in bytes:', sys.getsizeof(lsi_red_sub5))
%%time
SSEs, Sil_coefs, try_clusters = silhouette_co(start=2, stop=40, lsi_red_sub=lsi_red_sub)
plot_sil(try_clusters=try_clusters, Sil_coefs=Sil_coefs, SSEs=SSes)
```
###### Due to project deadlines, I was not able to complete this method but I wanted to preserve the effort and document the process for later use. I will move on to LDA.
```
# Cluster with the best results
#kmeans = KMeans(n_clusters=20, n_jobs=-2)
#lsi_clusters = kmeans.fit_predict(lsi_red)
# Take a look at the
print(lsi_clusters[0:15])
df.text[0:2]
from sklearn.metrics import silhouette_samples, silhouette_score
# Validating cluster performance
# Select range around best result, plot the silhouette distributions for each cluster
for k in range(14,17):
plt.figure(dpi=120, figsize=(8,6))
ax1 = plt.gca()
km = KMeans(n_clusters=k, random_state=1)
km.fit(X)
labels = km.labels_
silhouette_avg = silhouette_score(X, labels)
print("For n_clusters =", k,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, labels)
y_lower = 100
for i in range(k):
# Aggregate the silhouette scores for samples belonging to cluster i
ith_cluster_silhouette_values = sample_silhouette_values[labels == i]
#Sort
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = plt.cm.spectral(float(i) / k)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
```
## <a id='lda'></a> Latent Dirichlet Allocation (LDA)
```
%%time
run = False
passes = 85
if run==True:
lda = models.LdaMulticore(corpus=cv_corpus, num_topics=15, id2word=id2word, passes=passes,
workers=13, random_state=42, eval_every=None, chunksize=6000)
# Save model after your last run, or continue to update LDA
#pickle.dump(lda, open('data/lda_gensim.pkl', 'wb'))
# Gensim save
#lda.save('data/gensim_lda.model')
lda = models.LdaModel.load('data/gensim_lda.model')
%%time
# Transform the docs from the word space to the topic space (like "transform" in sklearn)
lda_corpus = lda[cv_corpus]
# Store the documents' topic vectors in a list so we can take a peak
lda_docs = [doc for doc in lda_corpus]
# Review Dirichlet distribution for documents
lda_docs[25000]
# Manually review the document to see if it makes sense!
# Look back at the topics that it matches with to confirm the result!
df.iloc[25000]
#bow = df.iloc[1,0].split()
# Print topic probability distribution for a document
#print(lda[bow]) #Values unpack error
# Given a chunk of sparse document vectors, estimate gamma:
# (parameters controlling the topic weights) for each document in the chunk.
#lda.inference(bow) #Not enough values
# Makeup of each topic! Interpretable!
# The d3 visualization below is far better for looking at the interpretations.
lda.print_topics(num_words=10, num_topics=1)
```
## <a id='py'></a> pyLDAvis
```
# For quickstart, we can just jump straight to results
import pickle
from gensim import models
def loadingPickles():
id2word = pickle.load(open('data/id2word.pkl','rb'))
cv_vecs = pickle.load(open('data/cv_vecs.pkl','rb'))
cv_corpus = pickle.load(open('data/cv_corpus.pkl','rb'))
lda = models.LdaModel.load('data/gensim_lda.model')
return id2word, cv_vecs, cv_corpus, lda
import pyLDAvis.gensim
import gensim
# Enables visualization in jupyter notebook
pyLDAvis.enable_notebook()
# Prepare the visualization
# Change multidimensional scaling function via mds parameter
# Options are tsne, mmds, pcoa
# cv_corpus or cv_vecs work equally
id2word, _, cv_corpus, lda = loadingPickles()
viz = pyLDAvis.gensim.prepare(topic_model=lda, corpus=cv_corpus, dictionary=id2word, mds='mmds')
# Save the html for sharing!
pyLDAvis.save_html(viz,'data/viz.html')
# Interact! Saliency is the most important metric that changes the story of each topic.
pyLDAvis.display(viz)
```
# There you have it. There is a ton of great information right here that I will conclude upon in the README and the slides on my github.
###### In it I will discuss what I could do with this information. I did not end up using groupUserPosts but I could create user profiles based on the aggregate of their document topic distributions. I believe this is a great start to understanding NLP and how it can be used. I would consider working on this again but with more technologies needed for big data.
|
github_jupyter
|
```
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Dense
def default_categorical():
img_in = Input(shape=(120, 160, 3), name='img_in') # First layer, input layer, Shape comes from camera.py resolution, RGB
x = img_in
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name = 'conv1')(x) # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu', name = 'conv2')(x) # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
x = Convolution2D(64, (5,5), strides=(2,2), activation='relu', name = 'conv3')(x) # 64 features, 5px5p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu', name = 'conv4')(x) # 64 features, 3px3p kernal window, 2wx2h stride, relu
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name = 'conv5')(x) # 64 features, 3px3p kernal window, 1wx1h stride, relu
# Possibly add MaxPooling (will make it less sensitive to position in image). Camera angle fixed, so may not to be needed
x = Flatten(name='flattened')(x) # Flatten to 1D (Fully connected)
x = Dense(100, activation='relu', name = 'dense1')(x) # Classify the data into 100 features, make all negatives 0
x = Dropout(.1)(x) # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
x = Dense(50, activation='relu', name = 'dense2')(x) # Classify the data into 50 features, make all negatives 0
x = Dropout(.1)(x) # Randomly drop out 10% of the neurons (Prevent overfitting)
#categorical output of the angle
angle_out = Dense(15, activation='softmax', name='angle_out')(x) # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0
#continous output of throttle
throttle_out = Dense(1, activation='relu', name='throttle_out')(x) # Reduce to 1 number, Positive number only
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
return model
model = default_categorical()
model.load_weights('weights.h5')
img_in = Input(shape=(120, 160, 3), name='img_in')
x = img_in
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name='conv1')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu', name='conv2')(x)
x = Convolution2D(64, (5,5), strides=(2,2), activation='relu', name='conv3')(x)
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu', name='conv4')(x)
conv_5 = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name='conv5')(x)
convolution_part = Model(inputs=[img_in], outputs=[conv_5])
for layer_num in ('1', '2', '3', '4', '5'):
convolution_part.get_layer('conv' + layer_num).set_weights(model.get_layer('conv' + layer_num).get_weights())
from keras import backend as K
inp = convolution_part.input # input placeholder
outputs = [layer.output for layer in convolution_part.layers[1:]] # all layer outputs
functor = K.function([inp], outputs)
import tensorflow as tf
import numpy as np
import pdb
kernel_3x3 = tf.constant(np.array([
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]]
]), tf.float32)
kernel_5x5 = tf.constant(np.array([
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
]), tf.float32)
layers_kernels = {5: kernel_3x3, 4: kernel_3x3, 3: kernel_5x5, 2: kernel_5x5, 1: kernel_5x5}
layers_strides = {5: [1, 1, 1, 1], 4: [1, 2, 2, 1], 3: [1, 2, 2, 1], 2: [1, 2, 2, 1], 1: [1, 2, 2, 1]}
def compute_visualisation_mask(img):
# pdb.set_trace()
activations = functor([np.array([img])])
activations = [np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2]))] + activations
upscaled_activation = np.ones((3, 6))
for layer in [5, 4, 3, 2, 1]:
averaged_activation = np.mean(activations[layer], axis=3).squeeze(axis=0) * upscaled_activation
output_shape = (activations[layer - 1].shape[1], activations[layer - 1].shape[2])
x = tf.constant(
np.reshape(averaged_activation, (1,averaged_activation.shape[0],averaged_activation.shape[1],1)),
tf.float32
)
conv = tf.nn.conv2d_transpose(
x, layers_kernels[layer],
output_shape=(1,output_shape[0],output_shape[1], 1),
strides=layers_strides[layer],
padding='VALID'
)
with tf.Session() as session:
result = session.run(conv)
upscaled_activation = np.reshape(result, output_shape)
final_visualisation_mask = upscaled_activation
return (final_visualisation_mask - np.min(final_visualisation_mask))/(np.max(final_visualisation_mask) - np.min(final_visualisation_mask))
import cv2
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display, HTML
def plot_movie_mp4(image_array):
dpi = 72.0
xpixels, ypixels = image_array[0].shape[0], image_array[0].shape[1]
fig = plt.figure(figsize=(ypixels/dpi, xpixels/dpi), dpi=dpi)
im = plt.figimage(image_array[0])
def animate(i):
im.set_array(image_array[i])
return (im,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(HTML(anim.to_html5_video()))
from glob import iglob
imgs = []
alpha = 0.004
beta = 1.0 - alpha
counter = 0
for path in sorted(iglob('imgs/*.jpg')):
img = cv2.imread(path)
salient_mask = compute_visualisation_mask(img)
salient_mask_stacked = np.dstack((salient_mask,salient_mask))
salient_mask_stacked = np.dstack((salient_mask_stacked,salient_mask))
blend = cv2.addWeighted(img.astype('float32'), alpha, salient_mask_stacked, beta, 0.0)
imgs.append(blend)
counter += 1
if counter >= 400:
break
plot_movie_mp4(imgs)
```
|
github_jupyter
|
# Notebook to visualize location data
```
import csv
# count the number of Starbucks in DC
with open('starbucks.csv') as file:
csvinput = csv.reader(file)
acc = 0
for record in csvinput:
if 'DC' in record[3]:
acc += 1
print( acc )
def parse_locations(csv_iterator,state=''):
""" strip out long/lat and convert to a list of floating point 2-tuples --
optionally, filter by a specified state """
return [ ( float(row[0]), float(row[1])) for row in csv_iterator
if state in row[3]]
def get_locations(filename, state=''):
""" read a list of longitude/latitude pairs from a csv file,
optionally, filter by a specified state """
with open(filename, 'r') as input_file:
csvinput = csv.reader(input_file)
location_data = parse_locations(csvinput,state)
return location_data
# get the data from all starbucks locations
starbucks_locations = get_locations('starbucks.csv')
# get the data from burger locations"
burger_locations = get_locations('burgerking.csv') + \
get_locations('mcdonalds.csv') + \
get_locations('wendys.csv')
# look at the first few (10) data points of each
for n in range(10):
print( starbucks_locations[n] )
print()
for n in range(10):
print( burger_locations[n] )
# a common, powerful plotting library
import matplotlib.pyplot as plt
# set figure size
plt.figure(figsize=(12, 9))
# get the axes of the plot and set them to be equal-aspect and limited (specify bounds) by data
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
# plot the data
plt.scatter(*zip(*starbucks_locations), s=1)
plt.legend(["Starbucks"])
# jupyter automatically plots this inline. On the console, you need to invoke plt.show()
# FYI: In that case, execution halts until you close the window it opens.
# set figure size
plt.figure(figsize=(12, 9))
# get the axes of the plot and set them to be equal-aspect and limited (specify bounds) by data
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
# plot the data
plt.scatter(*zip(*burger_locations), color='green', s=1)
plt.legend(["Burgers"])
lat, lon = zip(*get_locations('burgerking.csv'))
min_lat = min(lat)
max_lat = max(lat)
min_lon = min(lon)
max_lon = max(lon)
lat, lon = zip(*get_locations('mcdonalds.csv'))
min_lat = min(min_lat,min(lat))
max_lat = max(max_lat,max(lat))
min_lon = min(min_lon,min(lon))
max_lon = max(max_lon,max(lon))
lat, lon = zip(*get_locations('wendys.csv'))
min_lat = min(min_lat,min(lat))
max_lat = max(max_lat,max(lat))
min_lon = min(min_lon,min(lon))
max_lon = max(max_lon,max(lon))
lat, lon = zip(*get_locations('pizzahut.csv'))
min_lat = min(min_lat,min(lat))
max_lat = max(max_lat,max(lat))
min_lon = min(min_lon,min(lon))
max_lon = max(max_lon,max(lon))
# set figure size
fig = plt.figure(figsize=(12, 9))
#fig = plt.figure()
plt.subplot(2,2,1)
plt.scatter(*zip(*get_locations('burgerking.csv')), color='black', s=1, alpha=0.2)
plt.xlim(min_lat-5,max_lat+5)
plt.ylim(min_lon-5,max_lon+5)
plt.gca().set_aspect('equal')
plt.subplot(2,2,2)
plt.scatter(*zip(*get_locations('mcdonalds.csv')), color='black', s=1, alpha=0.2)
plt.xlim(min_lat-5,max_lat+5)
plt.ylim(min_lon-5,max_lon+5)
plt.gca().set_aspect('equal')
plt.subplot(2,2,3)
plt.scatter(*zip(*get_locations('wendys.csv')), color='black', s=1, alpha=0.2)
plt.xlim(min_lat-5,max_lat+5)
plt.ylim(min_lon-5,max_lon+5)
plt.gca().set_aspect('equal')
plt.subplot(2,2,4)
plt.scatter(*zip(*get_locations('pizzahut.csv')), color='black', s=1, alpha=0.2)
plt.xlim(min_lat-5,max_lat+5)
plt.ylim(min_lon-5,max_lon+5)
plt.gca().set_aspect('equal')
#plt.scatter(*zip(*get_locations('dollar-tree.csv')), color='black', s=1, alpha=0.2)
# get the starbucks in DC
starbucks_dc_locations = get_locations('starbucks.csv', state='DC')
burger_dc_locations = get_locations('burgerking.csv', state='DC') + \
get_locations('mcdonalds.csv', state='DC') + \
get_locations('wendys.csv', state='DC')
# show the first 10 locations of each:
for n in range(10):
print( starbucks_dc_locations[n] )
print()
for n in range(min(10,len(burger_dc_locations))):
print( burger_dc_locations[n] )
# set figure size
plt.figure(figsize=(12, 9))
# get the axes of the plot and set them to be equal-aspect and limited by data
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
# plot the data
plt.scatter(*zip(*starbucks_dc_locations))
plt.scatter(*zip(*burger_dc_locations), color='green')
# We also want to plot the DC boundaries, so we have a better idea where these things are
# the data is contained in DC.txt
# let's inspect it. Observe the format
with open('DC.txt') as file:
for line in file:
print(line,end='') # lines already end with a newline so don't print another
with open('DC.txt') as file:
# get the lower left and upper right coords for the bounding box
ll_long, ll_lat = map(float, next(file).split())
ur_long, ur_lat = map(float, next(file).split())
# get the number of regions
num_records = int(next(file))
# there better just be one
assert num_records == 1
# then a blank line
next(file)
# Title of "county"
county_name = next(file).rstrip() # removes newline at end
# "State" county resides in
state_name = next(file).rstrip()
# this is supposed to be DC
assert state_name == "DC"
# number of points to expect
num_pairs = int(next(file))
dc_boundary = [ tuple(map(float,next(file).split())) for n in range(num_pairs)]
dc_boundary
# add the beginning to the end so that it closes up
dc_boundary.append(dc_boundary[0])
# draw it!
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
plt.plot(*zip(*dc_boundary))
# draw both the starbucks location and DC boundary together
plt.figure(figsize=(12, 9))
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
plt.scatter(*zip(*starbucks_dc_locations))
plt.scatter(*zip(*burger_dc_locations), color='green')
plt.plot(*zip(*dc_boundary))
# draw both the starbucks location and DC boundary together
plt.figure(figsize=(12, 9))
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
plt.scatter(*zip(*get_locations('burgerking.csv', state='DC')), color='red')
plt.scatter(*zip(*get_locations('mcdonalds.csv', state='DC')), color='green')
plt.scatter(*zip(*get_locations('wendys.csv', state='DC')), color='blue')
plt.scatter(*zip(*get_locations('pizzahut.csv', state='DC')), color='yellow')
plt.scatter(*zip(*get_locations('dollar-tree.csv', state='DC')), color='black')
plt.plot(*zip(*dc_boundary))
```
### But where's AU?
```
# draw both the starbucks location and DC boundary together
plt.figure(figsize=(12, 9))
ax = plt.axes()
ax.set_aspect('equal', 'datalim')
plt.scatter(*zip(*starbucks_dc_locations))
plt.scatter(*zip(*burger_dc_locations), color='green')
plt.plot(*zip(*dc_boundary))
# add a red dot right over Anderson
plt.scatter([-77.0897511],[38.9363019],color='red')
from ipyleaflet import Map, basemaps, basemap_to_tiles, Marker, CircleMarker
m = Map(layers=(basemap_to_tiles(basemaps.OpenStreetMap.HOT), ),
center=(38.898082, -77.036696),
zoom=11)
# marker for AU
marker = Marker(location=(38.937831, -77.088852), radius=2, color='green')
m.add_layer(marker)
for (long,lat) in starbucks_dc_locations:
marker = CircleMarker(location=(lat,long), radius=1, color='steelblue')
m.add_layer(marker);
for (long,lat) in burger_dc_locations:
marker = CircleMarker(location=(lat,long), radius=1, color='green')
m.add_layer(marker);
m
```
|
github_jupyter
|
# 基于Tensorflow的softmax回归
Tensorflow是近年来非常非常流行的一个分布式的机器学习框架,之前一直想学习但是一直被各种各样的事情耽搁着。这学期恰好选了“人工神经网络”这门课,不得不接触这个框架了。最开始依照书上的教程通过Anaconda来配置环境,安装tensorflow。结果tensorflow是安装好了但是用起来是真麻烦。最后卸载了Anaconda在裸机上用`pip install tensorflow`来安装,可是裸机上的python是3.6.3版本的,似乎不支持tensorflow,于是在电脑上安装了另一个版本的python才算解决了这个问题,哎!说多了都是泪。言归正传,现在通过一个softmax实现手写字母识别的例子来正式进入tensorflow学习之旅。
softmax是一个非常常见的函数处理方式,它允许我们将模型的输出归一化并且以概率的形式输出,是非常有用的一种处理方式。具体的内容可以参见这个知乎问题[Softmax 函数的特点和作用是什么?](https://www.zhihu.com/question/23765351)
## 数据集
本例采用的是`mnist`数据集,它在机器学习领域非常有名。首先我们来认识一下这个数据集,tensorflow能够自动下载并使用这个数据集。获取到数据集之后首先查看一下训练集的大小,由于这次softmax回归使用的是mnist中的手写图片作为训练集,因此为了直观地了解一下数据集还需要查看其中的一些手写图片,在这里就用到了matplotlib这个框架来绘图。
```
from tensorflow.examples.tutorials.mnist import input_data
import os
import matplotlib.pyplot as plt
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"]='3'#禁止输出警告信息
#加载mnist数据集,one_hot设定为True是使用向量的形式编码数据类别,这主要是考虑到使用softmax
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print("训练集数据的大小为:{0}".format(mnist.train.images.shape))
fig = plt.figure("数据展示")
for k in range(3):
result = []
temp = []
img = mnist.train.images[k]#获得第一幅图片,是一个28*28的图片展开成的784维的向量,
for i in range(img.shape[0]):
temp.append(img[i])
if (i + 1) % 28 == 0:
result.append(temp)
temp = []
img = np.matrix(result, dtype=np.float)#获取第一幅图片的矩阵形式
ax = fig.add_subplot(130 + k + 1)
ax.imshow(img)
plt.show()
```
从上面代码的输出可以看到:该数据集的训练集的规模是55000x784。数据集中每一个行向量代表着一个28x28图片的一维展开,虽然说在图片识别中像素点的位置页蕴含着非常大的信息,但是在这里就不在意那么多了,仅仅将其一维展开就可以。笔者用mat将数据集中的前三个图像画了出来展示在上面,接下来就要用到softmax回归的方法实现一个基本的首写字母识别。
## softmax回归
在这里简要介绍一下softmax回归的相关知识。softmax回归也是线性回归的一种,只不过是在输出层使用了softmax函数进行处理。具体的训练方法上也是使用了经典的随机梯度下降法训练。其判别函数有如下形式:
$$f(x)=wx+b$$
**注意:softmax回归可以用于处理多分类问题,因此上式中的所有变量都是矩阵或向量形式。**
模型的输出f(x)还并不是最终的输出,还需要用softmax函数进行处理,softmax函数的形式如下所示:
$$softmax(x)=\frac{exp(x_{i})}{\sum_{j}^{n}exp(x_{j})}$$
这样处理后的模型输出可以表达输入数据在各个标签分类上的概率表达,此外使用softmax函数还有着其他很多的好处,最主要的还是在损失函数上的便利。此处不一一列举。
softmax回归的损失函数采用信息熵的形式给出:
$$H_{y^{'}}(y)=-\sum y_{i}^{'}ln(y_{i})$$
最后,笔者想在softmax函数下推导上述损失函数的随机梯度下降法的迭代公式,虽然tensorflow为我们做了这件事,但是作为算法编写者的我们依然有必要了解这其中的细节。首先,我们需要得到损失函数关于`w`的梯度:
$$\frac{\partial H_{y^{'}}(y)}{\partial w}=\frac{\partial -\sum y_{i}^{'}ln(y_{i})}{\partial w}=\frac{\partial -yln(softmax(f(xw + b))}{\partial w}$$
该求导比较复杂,采用链式求导法:
$$\frac{\partial Loss}{\partial w}=\frac{\partial Loss}{\partial softmax(xw + b)}\frac{\partial softmax(xw + b)}{\partial xw + b}\frac{\partial xw + b}{\partial w}$$
上述链式求导就比较简单,第一项和最后一项的求导都很容易得到,关键是第二项的求导。在这里我们直接给出softmax函数的求导公式。
$$\frac{\partial softmax(xw + b)}{\partial xw + b}=softmax(xw + b)(1 - softmax(xw + b))$$
又由于上述第一项和第二项的求导为:
$$\frac{\partial Loss}{\partial softmax(xw + b)}=\frac{\partial -yln(softmax(f(xw + b))}{\partial softmax(xw + b)}=\frac{-y}{softmax(xw + b)}$$
$$\frac{\partial xw + b}{\partial w}=x$$
因此:
$$\frac{\partial -yln(softmax(f(xw + b))}{\partial w}=y(softmax(xw + b) - 1)x$$
接下来就可以使用随机梯度下降法的迭代公式来迭代求解`w`:
$$w=w+\alpha \frac{\partial -yln(softmax(f(xw + b))}{\partial w}$$
## tensroflow实现softmax回归
首先我们定义模型中的各个参数,其中x和真实的标签值y_不设定死,使用`placeholder`占据计算图的一个节点。代码如下:
```
import tensorflow as tf
session = tf.InteractiveSession()#定义一个交互式会话
x = tf.placeholder(tf.float32, [None, 784])
w = tf.Variable(tf.zeros([784, 10]))#权重w初始化为0
b = tf.Variable(tf.zeros([1, 10]))#bias初始化为0
y = tf.nn.softmax(tf.matmul(x, w) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
```
设定其损失函数`cross_entry`,规定优化目标,初始化全局参数:
```
cross_entry = -tf.reduce_mean(tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_set = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entry)
tf.global_variables_initializer().run()
```
准备工作都差不多做完了,接下来应该进行模型的训练。在本例中迭代一千次,每次从训练集中随机选取100组数据训练模型。
```
for i in range(1000):
trainSet, trainLabel = mnist.train.next_batch(100)
train_set.run(feed_dict={x: trainSet, y_: trainLabel})
```
以上,我们已经完成了模型的训练,现在应该检测一下模型的效果。我们使用mnist的测试数据来测试模型的分类效果:
```
accuracy = tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)), dtype=tf.float32)
accuracy = tf.reduce_mean(accuracy)
print("模型的分类正确率为{0:.3f}".format(session.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})))
```
由上面代码的输出可以看到,该例程的分类准确率还是非常高的,达到了接近92%。这篇入门介绍中用到的softmax回归其实本质上可以看作是一个无隐层的神经网络模型,只拥有一个输入层和一个输出层,二者中间使用了简单的线性方法连接。在实际的手写图片识别中可能并不会用到这样的线性模型,更多的是使用卷积神经网络(CNN)。
## 后记
这篇文章算是笔者tensorflow入门的一个小应用。说一下我对神经网络,机器学习和tensorflow的看法吧。最近这几年最热的名词可能就是人工智能,深度学习了。笔者也未能免俗,随着这股洪流加入了浩浩荡荡的机器学习大军。从最开始最简单的随机梯度下降法,线性回归到后来有些难的序列最小优化算法,支持向量机。一步步走来发现这个计算机学科的分枝还是非常有意思的,看似非常严谨,枯燥却又十分优美的数学模型竟然能够表现出一丝丝“智能”,有时候真的会惊艳到我。
从2006年起,随着计算机计算能力的快速提升,曾经被冷落的神经网络又开始热了起来。对于神经网络,个人对它的未来不是很确定,一则是因为神经网络在历史上的命运可谓是大起大落,曾经的感知机,BP都是炙手可热,却又都草草收场,谁知道这一次的AI火热是不是能持续下去,也许碰到某一个天花板就又归于沉积了呢?说到底目前的AI行业所研究的“弱人工智能”距离真正的AI还是相差甚远。都已经说不清这到底是人类技术的问题,还是哲学的问题。二则是,目前神经网络模型的可解释性太差,相较于SVM这样的模型,人们似乎说不出为什么神经网络能够表现出如此强大的分类能力。三则是,人类在AI行业的探索上总是在刻意模仿自己的大脑,在神经网络的设计中融入了很多人脑中的机制,但这真的是一条正确的路吗?人类飞上蓝天不是靠着挥舞的翅膀而是飞机的机翼。
Tensorflow是Google公司推出的一款非常流行的机器学习框架,目前看来已经占据了机器学习框架的绝对霸主地位。对于这些机器学习框架我个人的感觉是不能脱离它们自己闭门造车,但也不能过度依赖。之前笔者是排斥一切框架的,很多经典的机器学习算法都是自己编写。然而到了神经网络这一块,自己再手动编程的代价太大了,于是就不得不入了框架的坑。我的观点是:对于一个算法一定要自己将其弄懂,其中的数学推导搞清楚再看代码,用框架。切忌将模型作为一个黑箱工具来使用,虽然这在短期来看确实效率很高,但长期来看绝对是百害无一利的。学习的过程中还可以自己动手做一些小demo,提升一下编程的乐趣,还是一件非常有益的事情。
|
github_jupyter
|
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# How to Publish a Pipeline and Invoke the REST endpoint
In this notebook, we will see how we can publish a pipeline and then invoke the REST endpoint.
## Prerequisites and Azure Machine Learning Basics
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration Notebook](https://aka.ms/pl-config) first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc.
### Initialization Steps
```
import azureml.core
from azureml.core import Workspace, Datastore, Experiment, Dataset
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
# Check core SDK version number
print("SDK version:", azureml.core.VERSION)
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.pipeline.steps import PythonScriptStep
from azureml.pipeline.core.graph import PipelineParameter
print("Pipeline SDK-specific imports completed")
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
# Default datastore (Azure blob storage)
# def_blob_store = ws.get_default_datastore()
def_blob_store = Datastore(ws, "workspaceblobstore")
print("Blobstore's name: {}".format(def_blob_store.name))
```
### Compute Targets
#### Retrieve an already attached Azure Machine Learning Compute
```
from azureml.core.compute_target import ComputeTargetException
aml_compute_target = "cpu-cluster"
try:
aml_compute = AmlCompute(ws, aml_compute_target)
print("found existing compute target.")
except ComputeTargetException:
print("creating new compute target")
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2",
min_nodes = 1,
max_nodes = 4)
aml_compute = ComputeTarget.create(ws, aml_compute_target, provisioning_config)
aml_compute.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# For a more detailed view of current Azure Machine Learning Compute status, use get_status()
# example: un-comment the following line.
# print(aml_compute.get_status().serialize())
```
## Building Pipeline Steps with Inputs and Outputs
A step in the pipeline can take [dataset](https://docs.microsoft.com/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) as input. This dataset can be a data source that lives in one of the accessible data locations, or intermediate data produced by a previous step in the pipeline.
```
# Uploading data to the datastore
data_path = def_blob_store.upload_files(["./20news.pkl"], target_path="20newsgroups", overwrite=True)
# Reference the data uploaded to blob storage using file dataset
# Assign the datasource to blob_input_data variable
blob_input_data = Dataset.File.from_files(data_path).as_named_input("test_data")
print("Dataset created")
# Define intermediate data using PipelineData
processed_data1 = PipelineData("processed_data1",datastore=def_blob_store)
print("PipelineData object created")
```
#### Define a Step that consumes a dataset and produces intermediate data.
In this step, we define a step that consumes a dataset and produces intermediate data.
**Open `train.py` in the local machine and examine the arguments, inputs, and outputs for the script. That will give you a good sense of why the script argument names used below are important.**
The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.
```
# trainStep consumes the datasource (Datareference) in the previous step
# and produces processed_data1
source_directory = "publish_run_train"
trainStep = PythonScriptStep(
script_name="train.py",
arguments=["--input_data", blob_input_data, "--output_train", processed_data1],
inputs=[blob_input_data],
outputs=[processed_data1],
compute_target=aml_compute,
source_directory=source_directory
)
print("trainStep created")
```
#### Define a Step that consumes intermediate data and produces intermediate data
In this step, we define a step that consumes an intermediate data and produces intermediate data.
**Open `extract.py` in the local machine and examine the arguments, inputs, and outputs for the script. That will give you a good sense of why the script argument names used below are important.**
```
# extractStep to use the intermediate data produced by step4
# This step also produces an output processed_data2
processed_data2 = PipelineData("processed_data2", datastore=def_blob_store)
source_directory = "publish_run_extract"
extractStep = PythonScriptStep(
script_name="extract.py",
arguments=["--input_extract", processed_data1, "--output_extract", processed_data2],
inputs=[processed_data1],
outputs=[processed_data2],
compute_target=aml_compute,
source_directory=source_directory)
print("extractStep created")
```
#### Define a Step that consumes multiple intermediate data and produces intermediate data
In this step, we define a step that consumes multiple intermediate data and produces intermediate data.
### PipelineParameter
This step also has a [PipelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.graph.pipelineparameter?view=azure-ml-py) argument that help with calling the REST endpoint of the published pipeline.
```
# We will use this later in publishing pipeline
pipeline_param = PipelineParameter(name="pipeline_arg", default_value=10)
print("pipeline parameter created")
```
**Open `compare.py` in the local machine and examine the arguments, inputs, and outputs for the script. That will give you a good sense of why the script argument names used below are important.**
```
# Now define step6 that takes two inputs (both intermediate data), and produce an output
processed_data3 = PipelineData("processed_data3", datastore=def_blob_store)
source_directory = "publish_run_compare"
compareStep = PythonScriptStep(
script_name="compare.py",
arguments=["--compare_data1", processed_data1, "--compare_data2", processed_data2, "--output_compare", processed_data3, "--pipeline_param", pipeline_param],
inputs=[processed_data1, processed_data2],
outputs=[processed_data3],
compute_target=aml_compute,
source_directory=source_directory)
print("compareStep created")
```
#### Build the pipeline
```
pipeline1 = Pipeline(workspace=ws, steps=[compareStep])
print ("Pipeline is built")
```
## Run published pipeline
### Publish the pipeline
```
published_pipeline1 = pipeline1.publish(name="My_New_Pipeline", description="My Published Pipeline Description", continue_on_step_failure=True)
published_pipeline1
```
Note: the continue_on_step_failure parameter specifies whether the execution of steps in the Pipeline will continue if one step fails. The default value is False, meaning when one step fails, the Pipeline execution will stop, canceling any running steps.
### Publish the pipeline from a submitted PipelineRun
It is also possible to publish a pipeline from a submitted PipelineRun
```
# submit a pipeline run
pipeline_run1 = Experiment(ws, 'Pipeline_experiment').submit(pipeline1)
# publish a pipeline from the submitted pipeline run
published_pipeline2 = pipeline_run1.publish_pipeline(name="My_New_Pipeline2", description="My Published Pipeline Description", version="0.1", continue_on_step_failure=True)
published_pipeline2
```
### Get published pipeline
You can get the published pipeline using **pipeline id**.
To get all the published pipelines for a given workspace(ws):
```css
all_pub_pipelines = PublishedPipeline.get_all(ws)
```
```
from azureml.pipeline.core import PublishedPipeline
pipeline_id = published_pipeline1.id # use your published pipeline id
published_pipeline = PublishedPipeline.get(ws, pipeline_id)
published_pipeline
```
### Run published pipeline using its REST endpoint
[This notebook](https://aka.ms/pl-restep-auth) shows how to authenticate to AML workspace.
```
from azureml.core.authentication import InteractiveLoginAuthentication
import requests
auth = InteractiveLoginAuthentication()
aad_token = auth.get_authentication_header()
rest_endpoint1 = published_pipeline.endpoint
print("You can perform HTTP POST on URL {} to trigger this pipeline".format(rest_endpoint1))
# specify the param when running the pipeline
response = requests.post(rest_endpoint1,
headers=aad_token,
json={"ExperimentName": "My_Pipeline1",
"RunSource": "SDK",
"ParameterAssignments": {"pipeline_arg": 45}})
try:
response.raise_for_status()
except Exception:
raise Exception('Received bad response from the endpoint: {}\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(rest_endpoint, response.status_code, response.headers, response.content))
run_id = response.json().get('Id')
print('Submitted pipeline run: ', run_id)
```
# Next: Data Transfer
The next [notebook](https://aka.ms/pl-data-trans) will showcase data transfer steps between different types of data stores.
|
github_jupyter
|
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import statistics
import math
from sklearn.linear_model import LinearRegression
from scipy.optimize import curve_fit
er_cas_100_data = pd.read_csv('proc_er_cas_100.csv')
del er_cas_100_data['Unnamed: 0']
er_500_50_0012 = pd.read_csv('proc_er_500_50_0012.csv')
del er_500_50_0012['Unnamed: 0']
er_1000_50_0006 = pd.read_csv('proc_er_1000_50_0006.csv')
del er_1000_50_0006['Unnamed: 0']
er_1500_50_0004 = pd.read_csv('proc_er_1500_50_0004.csv')
del er_1500_50_0004['Unnamed: 0']
er_cas_100_data
er_500_50_0012
er_1000_50_0006
er_1500_50_0004
er_cas_100_dict = {}
for i in range(100):
target = list(range(i*30, (i+1)*30))
temp_er_cas_100 = er_cas_100_data[i*30 + 0 : (i+1)*30]
alive = 0
for index in target:
if (temp_er_cas_100['alive_nodes'][index] != 0) and (temp_er_cas_100['fin_larg_comp_a'][index] != 0):
alive += 1
p_k = 0.8 * 499 * temp_er_cas_100['t'][index]
if i == 0:
er_cas_100_dict['attack_size'] = [statistics.mean(temp_er_cas_100['attack_size'].values.tolist())]
er_cas_100_dict['t'] = [statistics.mean(temp_er_cas_100['t'].values.tolist())]
er_cas_100_dict['init_intra_edge_a'] = [statistics.mean(temp_er_cas_100['init_intra_edge_a'].values.tolist())]
er_cas_100_dict['alive ratio'] = [alive / 30]
er_cas_100_dict['p<k>'] = [p_k]
else:
er_cas_100_dict['attack_size'].append(statistics.mean(temp_er_cas_100['attack_size'].values.tolist()))
er_cas_100_dict['t'].append(statistics.mean(temp_er_cas_100['t'].values.tolist()))
er_cas_100_dict['init_intra_edge_a'].append(statistics.mean(temp_er_cas_100['init_intra_edge_a'].values.tolist()))
er_cas_100_dict['alive ratio'].append(alive / 30)
er_cas_100_dict['p<k>'].append(p_k)
plt.plot(er_cas_100_dict['p<k>'], er_cas_100_dict['alive ratio'])
plt.title('The ratio that shows whether largest component is alive or not')
plt.show()
er_500_50_0012_dict = {}
for i in range(100):
target = list(range(i*50, (i+1)*50))
temp_er_500_50_0012 = er_500_50_0012[i*50 + 0 : (i+1)*50]
alive = 0
for index in target:
if (temp_er_500_50_0012['alive_nodes'][index] != 0) and (temp_er_500_50_0012['fin_larg_comp_a'][index] != 0):
alive += 1
p_k = 0.8 * 499 * temp_er_500_50_0012['t'][index]
if i == 0:
er_500_50_0012_dict['attack_size'] = [statistics.mean(temp_er_500_50_0012['attack_size'].values.tolist())]
er_500_50_0012_dict['t'] = [statistics.mean(temp_er_500_50_0012['t'].values.tolist())]
er_500_50_0012_dict['init_intra_edge_a'] = [statistics.mean(temp_er_500_50_0012['init_intra_edge_a'].values.tolist())]
er_500_50_0012_dict['alive ratio'] = [alive / 50]
er_500_50_0012_dict['p<k>'] = [p_k]
er_500_50_0012_dict['alive_nodes'] = [statistics.mean(temp_er_cas_100['alive_nodes'].values.tolist())]
else:
er_500_50_0012_dict['attack_size'].append(statistics.mean(temp_er_500_50_0012['attack_size'].values.tolist()))
er_500_50_0012_dict['t'].append(statistics.mean(temp_er_500_50_0012['t'].values.tolist()))
er_500_50_0012_dict['init_intra_edge_a'].append(statistics.mean(temp_er_500_50_0012['init_intra_edge_a'].values.tolist()))
er_500_50_0012_dict['alive ratio'].append(alive / 50)
er_500_50_0012_dict['p<k>'].append(p_k)
er_500_50_0012_dict['alive_nodes'].append(statistics.mean(temp_er_cas_100['alive_nodes'].values.tolist()))
plt.plot(er_500_50_0012_dict['p<k>'], er_500_50_0012_dict['alive ratio'])
plt.axvline(x=2.4554, color='r', linestyle='--')
plt.title('N=500, K=100')
plt.xlabel("p<k>")
plt.ylabel("proportion of survived largest component")
plt.savefig("er_n500_k100")
plt.show()
X = er_500_50_0012_dict['p<k>']
Y = er_500_50_0012_dict['log_reg_p<k>']
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
p0 = [max(Y), np.median(X),1,min(Y)] # this is an mandatory initial guess
popt, pcov = curve_fit(sigmoid, X, Y,p0, method='dogbox')
plt.scatter(X, Y, marker='.')
plt.plot(X, Y, linewidth=2)
plt.plot(X, sigmoid(X, *popt), color='red', linewidth=2)
plt.show()
plt.plot(er_500_50_0012_dict['p<k>'], er_500_50_0012_dict['log_reg_p<k>'])
plt.axvline(x=2.4554, color='r', linestyle='--')
plt.title('N=500, K=100')
plt.xlabel("p<k>")
plt.ylabel("percentage of survived largest component")
plt.savefig("er_n500_k100")
plt.show()
er_1000_50_0006_dict = {}
for i in range(100):
target = list(range(i*50, (i+1)*50))
temp_er_1000_50_0006 = er_1000_50_0006[i*50 + 0 : (i+1)*50]
alive = 0
for index in target:
if (temp_er_1000_50_0006['alive_nodes'][index] != 0) and (temp_er_1000_50_0006['fin_larg_comp_a'][index] != 0):
alive += 1
p_k = 0.8 * 999 * temp_er_1000_50_0006['t'][index]
if i == 0:
er_1000_50_0006_dict['attack_size'] = [statistics.mean(temp_er_1000_50_0006['attack_size'].values.tolist())]
er_1000_50_0006_dict['t'] = [statistics.mean(temp_er_1000_50_0006['t'].values.tolist())]
er_1000_50_0006_dict['init_intra_edge_a'] = [statistics.mean(temp_er_1000_50_0006['init_intra_edge_a'].values.tolist())]
er_1000_50_0006_dict['alive ratio'] = [alive / 50]
er_1000_50_0006_dict['p<k>'] = [p_k]
else:
er_1000_50_0006_dict['attack_size'].append(statistics.mean(temp_er_1000_50_0006['attack_size'].values.tolist()))
er_1000_50_0006_dict['t'].append(statistics.mean(temp_er_1000_50_0006['t'].values.tolist()))
er_1000_50_0006_dict['init_intra_edge_a'].append(statistics.mean(temp_er_1000_50_0006['init_intra_edge_a'].values.tolist()))
er_1000_50_0006_dict['alive ratio'].append(alive / 50)
er_1000_50_0006_dict['p<k>'].append(p_k)
plt.plot(er_1000_50_0006_dict['p<k>'], er_1000_50_0006_dict['alive ratio'])
plt.axvline(x=2.4554, color='r', linestyle='--')
plt.title('N=1000, K=200')
plt.xlabel("p<k>")
plt.ylabel("proportion of survived largest component")
plt.savefig("er_n1000_k200")
plt.show()
er_1500_50_0004_dict = {}
for i in range(100):
target = list(range(i*50, (i+1)*50))
temp_er_1500_50_0004 = er_1500_50_0004[i*50 + 0 : (i+1)*50]
alive = 0
for index in target:
if (temp_er_1500_50_0004['alive_nodes'][index] != 0) and (temp_er_1500_50_0004['fin_larg_comp_a'][index] != 0):
alive += 1
p_k = 0.8 * 1499 * temp_er_1500_50_0004['t'][index]
if i == 0:
er_1500_50_0004_dict['attack_size'] = [statistics.mean(temp_er_1500_50_0004['attack_size'].values.tolist())]
er_1500_50_0004_dict['t'] = [statistics.mean(temp_er_1500_50_0004['t'].values.tolist())]
er_1500_50_0004_dict['init_intra_edge_a'] = [statistics.mean(temp_er_1500_50_0004['init_intra_edge_a'].values.tolist())]
er_1500_50_0004_dict['alive ratio'] = [alive / 50]
er_1500_50_0004_dict['p<k>'] = [p_k]
else:
er_1500_50_0004_dict['attack_size'].append(statistics.mean(temp_er_1500_50_0004['attack_size'].values.tolist()))
er_1500_50_0004_dict['t'].append(statistics.mean(temp_er_1500_50_0004['t'].values.tolist()))
er_1500_50_0004_dict['init_intra_edge_a'].append(statistics.mean(temp_er_1500_50_0004['init_intra_edge_a'].values.tolist()))
er_1500_50_0004_dict['alive ratio'].append(alive / 50)
er_1500_50_0004_dict['p<k>'].append(p_k)
plt.plot(er_1500_50_0004_dict['p<k>'], er_1500_50_0004_dict['alive ratio'])
plt.axvline(x=2.4554, color='r', linestyle='--')
plt.title('N=1500, K=300')
plt.xlabel("p<k>")
plt.ylabel("proportion of survived largest component")
plt.savefig("er_n1500_k300")
plt.show()
plt.plot(er_500_50_0012_dict['p<k>'], er_500_50_0012_dict['alive ratio'])
plt.plot(er_1000_50_0006_dict['p<k>'], er_1000_50_0006_dict['alive ratio'])
plt.plot(er_1500_50_0004_dict['p<k>'], er_1500_50_0004_dict['alive ratio'])
plt.axvline(x=2.4554, color='r', linestyle='--')
plt.title('Total Graph (Expanded)')
plt.xlabel("p<k>")
plt.ylabel("proportion of survived largest component")
plt.legend(['N=500', 'N=1000', 'N=1500'])
plt.savefig("er_total_expanded")
plt.show()
plt.plot(er_500_50_0012_dict['p<k>'], er_500_50_0012_dict['alive ratio'])
plt.plot(er_1000_50_0006_dict['p<k>'], er_1000_50_0006_dict['alive ratio'])
plt.plot(er_1500_50_0004_dict['p<k>'], er_1500_50_0004_dict['alive ratio'])
plt.axvline(x=2.4554, color='r', linestyle='--')
plt.title('Total Graph')
plt.xlabel("p<k>")
plt.ylabel("proportion of survived largest component")
plt.legend(['N=500', 'N=1000', 'N=1500'])
plt.xlim([2.36, 2.5])
plt.savefig("er_total")
plt.show()
```
|
github_jupyter
|
```
from pymongo import MongoClient
import pandas as pd
import datetime
client = MongoClient()
characters = client.ck2.characters
```
This notebook tries to build a world tree by drawing and edge between every character in the save file with their father and mother. Running this code will generate a network with over 270,000 nodes out of a total of almost 400,000. This was taking far too long for Gephi to graph so I did not continue.
The next 3 notebooks contain the first code I wrote for extracting data from the save file. I wild manually copy and paste out the dynasty data from both files, the character data and the title data and save them in seperate files.
## Get Parent/Child Edges
```
pipeline = [
{
"$unwind" : "$parents"
},
{
"$lookup" :
{
"from" : "dynasties",
"localField" : "dnt",
"foreignField" : "_id",
"as" : "dynasty"
}
},
{
"$unwind" : "$dynasty"
},
{
"$match" : {"parents" : {"$nin" : [None]}, "$or" : [{"cul" : "irish"}, {"dynasty.culture" : "irish"}]}
},
{
"$project" : {"_id" : 1, "parents" : 1}
}
]
relation_df = pd.DataFrame(list(characters.aggregate(pipeline)))
```
## Get all Characters
```
pipeline = [
{
"$lookup" :
{
"from" : "dynasties",
"localField" : "dnt",
"foreignField" : "_id",
"as" : "dynasty"
}
},
{
"$unwind" : "$dynasty"
},
{
"$project" : {"_id" : 1, "name" : {"$concat" : ["$bn", " ", "$dynasty.name"]},
"culture" : {"$ifNull" : ["$cul", "$dynasty.culture"]},
"religion" : {"$ifNull" : ["$rel", "$dynasty.religion"]} }
}
]
```
## Build Network
```
import networkx as nx
import matplotlib.pyplot as plt
chars = list(characters.aggregate(pipeline))
for char in chars:
for key in list(char.keys()):
val = char[key]
if isinstance(val, type(None)):
del char[key]
G = nx.Graph()
for char in chars: #characters.aggregate(pipeline):
if "culture" in char and "religion" in char and "name" in char:
G.add_node(char["_id"], name = char['name'], culture = char['culture'], religion = char['religion'])
relation_df = relation_df.dropna(axis=0, how='any')
for i in range(len(relation_df)):
G.add_edge(relation_df.loc[i, "_id"], relation_df.loc[i, "parents"])
G.remove_nodes_from(nx.isolates(G)) #drop unconnected nodes
#nx.draw(G)
#plt.show()
nx.write_graphml(max(nx.connected_component_subgraphs(G), key=len), "ck2-World-Tree-2.graphml")
```
|
github_jupyter
|
```
# General
from os import path
from random import randrange
from sklearn.model_selection import train_test_split, GridSearchCV #cross validation
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, make_scorer
from sklearn.metrics import accuracy_score, roc_auc_score, balanced_accuracy_score
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier
import pickle
import joblib
```
## TRAIN SET
```
trainDataFull = pd.read_csv("trainData.csv")
trainDataFull.head(3)
trainDataFull.info()
trainDataFull.describe()
trainData = trainDataFull.loc[:,'v1':'v99']
trainData.head(3)
trainLabels = trainDataFull.loc[:,'target']
trainLabels.unique()
# encode string class values as integers
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(trainLabels)
label_encoded_y = label_encoder.transform(trainLabels)
label_encoded_y
X_train, X_test, y_train, y_test = train_test_split(trainData.values,
label_encoded_y,
test_size = 0.3,
random_state = 33,
shuffle = True,
stratify = label_encoded_y)
```
## MODEL-2 (Random Forest Classifier)
```
RFC_model = RandomForestClassifier(n_estimators=800,
verbose=2,
random_state=0,
criterion='gini')
RFC_model
RFC_model.fit(X_train, y_train)
# make predictions for test data
y_pred = RFC_model.predict(X_test)
y_pred
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
#fig = plt.figure(figsize=(10,10))
plot_confusion_matrix(RFC_model,
X_test,
y_test,
values_format='d')
```
## Save Valid Score
```
y_score = RFC_model.predict_proba(X_test)
y_score[0]
valid_score = pd.DataFrame(y_score, columns=['c1','c2','c3','c4','c5','c6','c7','c8','c9'])
valid_score
valid_score.to_csv('./results/valid-submission-RFC.csv', index = False)
```
## Save & Load Model
## joblib
```
# Save the model as a pickle in a file
joblib.dump(RFC_model, './model/model_RFC.pkl')
# Load the model from the file
RFC_model_from_joblib = joblib.load('./model/model_RFC.pkl')
# Use the loaded model to make predictions
RFC_model_predictions = RFC_model_from_joblib.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, RFC_model_predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
```
## GridSearchCV
```
clf = GridSearchCV(RFC_model_model,
{'max_depth': [4, 6],
'n_estimators': [100, 200]},
verbose=1,
cv=2)
clf.fit(X_train,
y_train,
early_stopping_rounds=10,
eval_metric='mlogloss',
eval_set=[(X_train, y_train), (X_test, y_test)],
verbose=True)
print(clf.best_score_)
print(clf.best_params_)
# Save the model as a pickle in a file
joblib.dump(clf.best_estimator_, './model/clf.pkl')
# Load the model from the file
clf_from_joblib = joblib.load('./model/clf.pkl')
# Use the loaded model to make predictions
clf_predictions = clf_from_joblib.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, clf_predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
```
# TEST
```
testData = pd.read_csv("testData.csv")
testData
# Use the loaded model to make predictions
test_predictions = RFC_model.predict(testData.values)
test_predictions
# Use the loaded model to make predictions probability
test_predictions = RFC_model.predict_proba(testData.values)
test_predictions
result = pd.DataFrame(test_predictions, columns=['c1','c2','c3','c4','c5','c6','c7','c8','c9'])
result
result.to_csv('./results/test-submission-RFC.csv', index = False)
```
## REFERENCES
1- https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
2- https://github.com/dmlc/xgboost/blob/master/demo/guide-python/sklearn_examples.py
3- https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
4- https://www.datacamp.com/community/tutorials/xgboost-in-python
5- https://scikit-learn.org/stable/modules/ensemble.html#voting-classifier
6- https://www.datacamp.com/community/tutorials/random-forests-classifier-python?utm_source=adwords_ppc&utm_campaignid=1455363063&utm_adgroupid=65083631748&utm_device=c&utm_keyword=&utm_matchtype=b&utm_network=g&utm_adpostion=&utm_creative=332602034364&utm_targetid=aud-392016246653:dsa-429603003980&utm_loc_interest_ms=&utm_loc_physical_ms=1012782&gclid=EAIaIQobChMI49HTjNO06wIVB-ztCh23nwMLEAAYASAAEgKKEvD_BwE
|
github_jupyter
|
<a href="https://colab.research.google.com/github/ashraj98/rbf-sin-approx/blob/main/Lab2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Lab 2
### Ashwin Rajgopal
Start off by importing numpy for matrix math, random for random ordering of samples and pyplot for plotting results.
```
import matplotlib.pyplot as plt
import numpy as np
import random
```
#### Creating the samples
X variables can be generated by using `np.random.rand` to generate a array of random numbers between 0 and 1, which is what is required. The same can be done to generate the noise, but then it needs to be divided by 5 and subtracted by .1 to fit the interval [-0.1, 0.1]. The expected values can then by generated applying the function to the inputs and adding the noise.
For plotting the original function that will be approximated by the RBF network, `linspace` can be used to generate equally spaced inputs to make a smooth plot of the function.
```
X = np.random.rand(1, 75).flatten()
noise = np.random.rand(1, 75).flatten() / 5 - 0.1
D = 0.5 + 0.4 * np.sin(2 * np.pi * X) + noise
func_X = np.linspace(0, 1, 100)
func_Y = 0.5 + 0.4 * np.sin(2 * np.pi * func_X)
```
#### K-means algorithm
This function finds the centers and variances given uncategorized inputs and number of clusters. It also takes in a flag to determined whether to output an averaged variance for all clusters or use specialized variances for each cluster.
The algorithm begins by choosing random points from the inputs as the center of the clusters, so that every cluster will have at least point assigned to it. Then the algorithm repetitively assigns points to each cluster using Euclidean distance and averages the assigned points for each cluster to find the new centers. The new centers are compared with the old centers, and if they are the same, the algorithm is stopped.
Then using the last assignment of the points, the variance for each cluster is calculated. If a cluster does not have more than one point assigned to it, it is skipped.
If `use_same_width=True`, then an normalized variance is used for all clusters. The maximum distance is used by using an outer subtraction between the centers array and itself, and then it is divided by `sqrt(2 * # of clusters)`.
If `use_same_width=False`, then for all clusters that had only one point assigned to it, the average of all the other variances is used as the variance for these clusters.
```
def kmeans(clusters=2, X=X, use_same_width=False):
centers = np.random.choice(X, clusters, replace=False)
diff = 1
while diff != 0:
assigned = [[] for i in range(clusters)]
for x in X:
assigned_center = np.argmin(np.abs(centers - x))
assigned[assigned_center].append(x.item())
new_centers = np.array([np.average(points) for points in assigned])
diff = np.sum(np.abs(new_centers - centers))
centers = new_centers
variances = []
no_var = []
for i in range(clusters):
if len(assigned[i]) < 2:
no_var.append(i)
else:
variances.append(np.var(assigned[i]))
if use_same_width:
d_max = np.max(np.abs(np.subtract.outer(centers, centers)))
avg_var = d_max / np.sqrt(2 * clusters)
variances = [avg_var for i in range(clusters)]
else:
if len(no_var) > 0:
avg_var = np.average(variances)
for i in no_var:
variances.insert(i, avg_var)
return (centers, np.array(variances))
```
The function below defines the gaussian function. Given the centers and variances for all clusters, it calculates the output for all gaussians at once for a single input.
```
def gaussian(centers, variances, x):
return np.exp((-1 / (2 * variances)) * ((centers - x) ** 2))
```
#### Training the RBF Network
For each gaussian, a random weight is generated in the interval [-1, 1]. The same happens for a bias term as well.
Then, for the number of epochs specified, the algorithm calculates the gaussian outputs for each input, and then takes the weighted sum and adds the bias to get the output of the network. Then the LMS algorithm is applied.
Afterwards, the `linspace`d inputs are used to generate the outputs, which allows for plotting the approximating function. Then both the approximated function (red) and the approximating function (blue) are plot, as well as the training data with the noise.
```
def train(centers, variances, lr, epochs=100):
num_centers = len(centers)
W = np.random.rand(1, num_centers) * 2 - 1
b = np.random.rand(1, 1) * 2 - 1
order = list(range(len(X)))
for i in range(epochs):
random.shuffle(order)
for j in order:
x = X[j]
d = D[j]
G = gaussian(centers, variances, x)
y = W.dot(G) + b
e = d - y
W += lr * e * G.reshape(1, num_centers)
b += lr * e
est_Y = []
for x in func_X:
G = gaussian(centers, variances, x)
y = W.dot(G) + b
est_Y.append(y.item())
est_Y = np.array(est_Y)
fig = plt.figure()
ax = plt.axes()
ax.scatter(X, D, label='Sampled')
ax.plot(func_X, est_Y, '-b', label='Approximate')
ax.plot(func_X, func_Y, '-r', label='Original')
plt.title(f'Bases = ${num_centers}, Learning Rate = ${lr}')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc="upper right")
```
The learning rates and number of bases that needed to be tested are defined, and then K-means is run for each combination of base and learning rate. The output of the K-means is used as the input for the RBF training algorithm, and the results are plotted.
```
bases = [2, 4, 7, 11, 16]
learning_rates = [.01, .02]
for base in bases:
for lr in learning_rates:
centers, variances = kmeans(base, X)
train(centers=centers, variances=variances, lr=lr)
```
The best function approximates seem to be with 2 bases. As soon as the bases are increased to 4, overfitting starts to occur, with 16 bases having extreme overfitting.
Increasing the learning rate seems to decrease the training error but in some cases increases the overfitting of the data.
Run the same combinations or number of bases and learning rate again, but this time using the same Gaussian width for all bases.
```
for base in bases:
for lr in learning_rates:
centers, variances = kmeans(base, X, use_same_width=True)
train(centers=centers, variances=variances, lr=lr, epochs=100)
```
Using the same width for each base seems to drastically decrease overfitting. Even with 16 bases, the approximating function is very smooth. However, after 100 epochs, the training error is still very high, and the original function is not well approximated.
After running the training with significantly more epochs (10,000 to 100,000), the function becomes well approximated for large number of bases. But for smaller number of bases like 2, the approximating function is still not close to the approximated function, whereas when using different Gaussian widths, 2 bases was the best approximator of the original function.
So, using the same widths, the training takes significantly longer and requires many bases to be used to approximate the original function well.
|
github_jupyter
|
To finish, check out: http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1992AJ....104.2213L&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf
```
# Third-party
from astropy.io import ascii, fits
import astropy.coordinates as coord
import astropy.units as u
from astropy.constants import c
import matplotlib as mpl
import matplotlib.pyplot as pl
import numpy as np
from scipy.interpolate import interp1d
pl.style.use('apw-notebook')
%matplotlib inline
# pl.style.use('classic')
# %matplotlib notebook
data_files = ["../data/apVisit-r5-6994-56770-261.fits", "../data/apVisit-r5-6994-56794-177.fits"]
model_file = "../data/apStar-r5-2M00004994+1621552.fits"
min_wvln = 15329
max_wvln = 15359
def load_file(filename, chip):
hdulist1 = fits.open(filename)
wvln = hdulist1[4].data[chip]
ix = (wvln >= min_wvln) & (wvln <= max_wvln)
wvln = wvln[ix]
flux = hdulist1[1].data[chip,ix]
flux_err = hdulist1[2].data[chip,ix]
return {'wvln': wvln, 'flux': flux, 'flux_err': flux_err}
def load_model_file(filename):
hdulist1 = fits.open(filename)
flux = hdulist1[1].data[0]
flux_err = hdulist1[2].data[0]
wvln = 10**(hdulist1[0].header['CRVAL1'] + np.arange(flux.size) * hdulist1[0].header['CDELT1'])
# ix = (wvln >= min_wvln) & (wvln <= max_wvln)
ix = (wvln < 15750) & (wvln > 15150) # HACK: magic numbers
return {'wvln': wvln[ix], 'flux': flux[ix], 'flux_err': flux_err[ix]}
d = load_file(fn, chip=2)
d['wvln'].shape
chip = 2
fig,ax = pl.subplots(1,1,figsize=(12,6))
for fn in data_files:
d = load_file(fn, chip=chip)
ax.plot(d['wvln'], d['flux'], drawstyle='steps', marker=None)
ref_spec = load_model_file(model_file)
ax.plot(ref_spec['wvln'], 3.2*ref_spec['flux'], drawstyle='steps', marker=None, lw=2.) # HACK: scale up
# _d = 175
# ax.set_xlim(15150.+_d, 15175.+_d)
# ax.set_ylim(10000, 20000)
all_spectra = [load_file(f, chip=2) for f in files]
ref_spec['interp'] = interp1d(ref_spec['wvln'], ref_spec['flux'], kind='cubic', bounds_error=False)
def get_design_matrix(data, ref_spec, v1, v2):
"""
Note: Positive velocity is a redshift.
"""
X = np.ones((3, data['wvln'].shape[0]))
X[1] = ref_spec['interp'](data['wvln'] * (1 + v1/c)) # this is only good to first order in (v/c)
X[2] = ref_spec['interp'](data['wvln'] * (1 + v2/c))
return X
def get_optimal_chisq(data, ref_spec, v1, v2):
X = get_design_matrix(data, ref_spec, v1, v2)
return np.linalg.solve( X.dot(X.T), X.dot(data['flux']) )
spec_i = 1
v1 = 35 * u.km/u.s
v2 = -5 * u.km/u.s
X = get_design_matrix(all_spectra[spec_i], ref_spec, v1, v2)
opt_pars = get_optimal_chisq(all_spectra[spec_i], ref_spec, v1, v2)
opt_pars
def make_synthetic_spectrum(X, pars):
return X.T.dot(pars)
def compute_chisq(data, X, opt_pars):
synth_spec = make_synthetic_spectrum(X, opt_pars)
return -np.sum((synth_spec - data['flux'])**2)
# opt_pars = np.array([1.1E+4, 0.5, 0.5])
synth_spec = make_synthetic_spectrum(X, opt_pars)
pl.plot(all_spectra[spec_i]['wvln'], all_spectra[spec_i]['flux'], marker=None, drawstyle='steps')
pl.plot(all_spectra[spec_i]['wvln'], synth_spec, marker=None, drawstyle='steps')
_v1_grid = np.linspace(25, 45, 32)
_v2_grid = np.linspace(-15, 5, 32)
shp = (_v1_grid.size, _v2_grid.size)
v_grid = np.vstack(map(np.ravel, np.meshgrid(_v1_grid, _v2_grid))).T
v_grid.shape
chisq = np.zeros(v_grid.shape[0])
for i in range(v_grid.shape[0]):
v1,v2 = v_grid[i]
opt_pars = get_optimal_chisq(all_spectra[spec_i], ref_spec,
v1*u.km/u.s, v2*u.km/u.s)
chisq[i] = compute_chisq(all_spectra[spec_i], X, opt_pars)
fig,ax = pl.subplots(1,1,figsize=(9,8))
cb = ax.pcolormesh(v_grid[:,0].reshape(shp), v_grid[:,1].reshape(shp),
chisq.reshape(shp), cmap='magma')
fig.colorbar(cb)
fig,ax = pl.subplots(1,1,figsize=(9,8))
cb = ax.pcolormesh(v_grid[:,0].reshape(shp), v_grid[:,1].reshape(shp),
np.exp(chisq-chisq.max()).reshape(shp), cmap='magma')
fig.colorbar(cb)
fig,ax = pl.subplots(1,1,figsize=(9,8))
cb = ax.pcolormesh(v_grid[:,0].reshape(shp), v_grid[:,1].reshape(shp),
chisq.reshape(shp), cmap='magma')
fig.colorbar(cb)
fig,ax = pl.subplots(1,1,figsize=(9,8))
cb = ax.pcolormesh(v_grid[:,0].reshape(shp), v_grid[:,1].reshape(shp),
np.exp(chisq-chisq.max()).reshape(shp), cmap='magma')
fig.colorbar(cb)
```
---
try using levmar to optimize
```
from scipy.optimize import leastsq
def errfunc(pars, data_spec, ref_spec):
v1,v2,a,b,c = pars
X = get_design_matrix(data_spec, ref_spec, v1*u.km/u.s, v2*u.km/u.s)
synth_spec = make_synthetic_spectrum(X, [a,b,c])
return (synth_spec - data_spec['flux'])
levmar_opt_pars,ier = leastsq(errfunc, x0=[35,-5]+opt_pars.tolist(), args=(all_spectra[0], ref_spec))
levmar_opt_pars
data_spec = all_spectra[0]
X = get_design_matrix(data_spec, ref_spec, levmar_opt_pars[0]*u.km/u.s, levmar_opt_pars[1]*u.km/u.s)
synth_spec = make_synthetic_spectrum(X, levmar_opt_pars[2:])
pl.plot(data_spec['wvln'], data_spec['flux'], marker=None, drawstyle='steps')
pl.plot(data_spec['wvln'], synth_spec, marker=None, drawstyle='steps')
```
|
github_jupyter
|
# TensorFlow Tutorial
Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow:
- Initialize variables
- Start your own session
- Train algorithms
- Implement a Neural Network
Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code.
## 1 - Exploring the Tensorflow Library
To start, you will import the library:
```
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
```
Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example.
$$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$
```
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
```
Writing and running programs in TensorFlow has the following steps:
1. Create Tensors (variables) that are not yet executed/evaluated.
2. Write operations between those Tensors.
3. Initialize your Tensors.
4. Create a Session.
5. Run the Session. This will run the operations you'd written above.
Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.
Now let us look at an easy example. Run the cell below:
```
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
```
As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
```
sess = tf.Session()
print(sess.run(c))
```
Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**.
Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later.
To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session.
```
# Change the value of x in the feed_dict
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
```
When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session.
Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.
### 1.1 - Linear function
Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector.
**Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):
```python
X = tf.constant(np.random.randn(3,1), name = "X")
```
You might find the following functions helpful:
- tf.matmul(..., ...) to do a matrix multiplication
- tf.add(..., ...) to do an addition
- np.random.randn(...) to initialize randomly
```
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
### START CODE HERE ### (4 lines of code)
X = tf.constant(np.random.randn(3,1),name='X')
W = tf.constant(np.random.randn(4,3),name='W')
b = tf.constant(np.random.randn(4,1),name='b')
Y = tf.add(tf.matmul(W,X),b)
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.Session()
result = sess.run(Y)
### END CODE HERE ###
# close the session
sess.close()
return result
print( "result = " + str(linear_function()))
```
*** Expected Output ***:
<table>
<tr>
<td>
**result**
</td>
<td>
[[-2.15657382]
[ 2.95891446]
[-1.08926781]
[-0.84538042]]
</td>
</tr>
</table>
### 1.2 - Computing the sigmoid
Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input.
You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session.
** Exercise **: Implement the sigmoid function below. You should use the following:
- `tf.placeholder(tf.float32, name = "...")`
- `tf.sigmoid(...)`
- `sess.run(..., feed_dict = {x: z})`
Note that there are two typical ways to create and use sessions in tensorflow:
**Method 1:**
```python
sess = tf.Session()
# Run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
sess.close() # Close the session
```
**Method 2:**
```python
with tf.Session() as sess:
# run the variables initialization (if needed), run the operations
result = sess.run(..., feed_dict = {...})
# This takes care of closing the session for you :)
```
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
### START CODE HERE ### ( approx. 4 lines of code)
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32,name='x')
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run(sigmoid,feed_dict={x:z})
### END CODE HERE ###
return result
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
```
*** Expected Output ***:
<table>
<tr>
<td>
**sigmoid(0)**
</td>
<td>
0.5
</td>
</tr>
<tr>
<td>
**sigmoid(12)**
</td>
<td>
0.999994
</td>
</tr>
</table>
<font color='blue'>
**To summarize, you how know how to**:
1. Create placeholders
2. Specify the computation graph corresponding to operations you want to compute
3. Create the session
4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values.
### 1.3 - Computing the Cost
You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m:
$$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$
you can do it in one line of code in tensorflow!
**Exercise**: Implement the cross entropy loss. The function you will use is:
- `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`
Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes
$$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$
```
# GRADED FUNCTION: cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
### START CODE HERE ###
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32,name='z')
y = tf.placeholder(tf.float32,name='y')
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z,labels=y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost,feed_dict={z:logits,y:labels})
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return cost
logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
```
** Expected Output** :
<table>
<tr>
<td>
**cost**
</td>
<td>
[ 1.00538719 1.03664088 0.41385433 0.39956614]
</td>
</tr>
</table>
### 1.4 - Using One Hot encodings
Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:
<img src="images/onehot.png" style="width:600px;height:150px;">
This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code:
- tf.one_hot(labels, depth, axis)
**Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this.
```
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(C,name='C')
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(labels,C,axis=0)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return one_hot
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = " + str(one_hot))
```
**Expected Output**:
<table>
<tr>
<td>
**one_hot**
</td>
<td>
[[ 0. 0. 0. 1. 0. 0.]
[ 1. 0. 0. 0. 0. 1.]
[ 0. 1. 0. 0. 1. 0.]
[ 0. 0. 1. 0. 0. 0.]]
</td>
</tr>
</table>
### 1.5 - Initialize with zeros and ones
Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively.
**Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones).
- tf.ones(shape)
```
# GRADED FUNCTION: ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return ones
print ("ones = " + str(ones([3])))
```
**Expected Output:**
<table>
<tr>
<td>
**ones**
</td>
<td>
[ 1. 1. 1.]
</td>
</tr>
</table>
# 2 - Building your first neural network in tensorflow
In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:
- Create the computation graph
- Run the graph
Let's delve into the problem you'd like to solve!
### 2.0 - Problem statement: SIGNS Dataset
One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.
- **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).
- **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).
Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.
Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.
<img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>
Run the following code to load the dataset.
```
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
```
Change the index below and run the cell to visualize some examples in the dataset.
```
# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
```
As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.
```
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.
**Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one.
**The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes.
### 2.1 - Create placeholders
Your first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session.
**Exercise:** Implement the function below to create the placeholders in tensorflow.
```
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
### START CODE HERE ### (approx. 2 lines)
X = tf.placeholder(shape=[n_x,None],dtype='float')
Y = tf.placeholder(shape=[n_y,None],dtype='float')
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
```
**Expected Output**:
<table>
<tr>
<td>
**X**
</td>
<td>
Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)
</td>
</tr>
<tr>
<td>
**Y**
</td>
<td>
Tensor("Placeholder_2:0", shape=(10, ?), dtype=float32) (not necessarily Placeholder_2)
</td>
</tr>
</table>
### 2.2 - Initializing the parameters
Your second task is to initialize the parameters in tensorflow.
**Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use:
```python
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
```
Please use `seed = 1` to make sure your results match ours.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable('W1',[25,12288],initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable('b1',[25,1],initializer=tf.zeros_initializer())
W2 = tf.get_variable('W2',[12,25],initializer=tf.contrib.layers.xavier_initializer(seed=1))
b2 = tf.get_variable('b2',[12,1],initializer=tf.zeros_initializer())
W3 = tf.get_variable('W3',[6,12],initializer=tf.contrib.layers.xavier_initializer(seed=1))
b3 = tf.get_variable('b3',[6,1],initializer=tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table>
<tr>
<td>
**W1**
</td>
<td>
< tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
< tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
< tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
< tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >
</td>
</tr>
</table>
As expected, the parameters haven't been evaluated yet.
### 2.3 - Forward propagation in tensorflow
You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are:
- `tf.add(...,...)` to do an addition
- `tf.matmul(...,...)` to do a matrix multiplication
- `tf.nn.relu(...)` to apply the ReLU activation
**Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`!
```
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1,X),b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2,A1),b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3,Z2),b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
```
**Expected Output**:
<table>
<tr>
<td>
**Z3**
</td>
<td>
Tensor("Add_2:0", shape=(6, ?), dtype=float32)
</td>
</tr>
</table>
You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.
### 2.4 Compute cost
As seen before, it is very easy to compute the cost using:
```python
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))
```
**Question**: Implement the cost function below.
- It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.
- Besides, `tf.reduce_mean` basically does the summation over the examples.
```
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
```
**Expected Output**:
<table>
<tr>
<td>
**cost**
</td>
<td>
Tensor("Mean:0", shape=(), dtype=float32)
</td>
</tr>
</table>
### 2.5 - Backward propagation & parameter updates
This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.
After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.
For instance, for gradient descent the optimizer would be:
```python
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
```
To make the optimization you would do:
```python
_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
```
This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.
**Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable).
### 2.6 - Building the model
Now, you will bring it all together!
**Exercise:** Implement the model. You will be calling the functions you had previously implemented.
```
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
```
Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!
```
parameters = model(X_train, Y_train, X_test, Y_test)
```
**Expected Output**:
<table>
<tr>
<td>
**Train Accuracy**
</td>
<td>
0.999074
</td>
</tr>
<tr>
<td>
**Test Accuracy**
</td>
<td>
0.716667
</td>
</tr>
</table>
Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.
**Insights**:
- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting.
- Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.
### 2.7 - Test with your own image (optional / ungraded exercise)
Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right!
```
import scipy
from PIL import Image
from scipy import ndimage
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "thumbs_up.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
```
You indeed deserved a "thumbs-up" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any "thumbs-up", so the model doesn't know how to deal with it! We call that a "mismatched data distribution" and it is one of the various of the next course on "Structuring Machine Learning Projects".
<font color='blue'>
**What you should remember**:
- Tensorflow is a programming framework used in deep learning
- The two main object classes in tensorflow are Tensors and Operators.
- When you code in tensorflow you have to take the following steps:
- Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)
- Create a session
- Initialize the session
- Run the session to execute the graph
- You can execute the graph multiple times as you've seen in model()
- The backpropagation and optimization is automatically done when running the session on the "optimizer" object.
|
github_jupyter
|
# Project Description
Another CV2 tutorial
this one from https://pythonprogramming.net/loading-images-python-opencv-tutorial/
```
#http://tsaith.github.io/record-video-with-python-3-opencv-3-on-osx.html
import numpy as np
import cv2
cap = cv2.VideoCapture(0) # Capture video from camera
# Get the width and height of frame
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use the lower case
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (width, height))
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
frame = cv2.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit `q` to exit
break
else:
break
# Release everything if job is finished
out.release()
cap.release()
cv2.destroyAllWindows()
```
# Writting stuff on an image
```
import numpy as np
import cv2
img = cv2.imread('watch.jpg',cv2.IMREAD_COLOR)
cv2.line(img,(0,0),(200,300),(255,255,255),50)
cv2.rectangle(img,(500,250),(1000,500),(0,0,255),15)
cv2.circle(img,(447,63), 63, (0,255,0), -1)
pts = np.array([[100,50],[200,300],[700,200],[500,100]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img, [pts], True, (0,255,255), 3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV Tuts!',(0,130), font, 1, (200,255,155), 2, cv2.LINE_AA)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV Tuts!',(10,500), font, 6, (200,255,155), 13, cv2.LINE_AA)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
laplacian = cv2.Laplacian(frame,cv2.CV_64F)
sobelx = cv2.Sobel(frame,cv2.CV_64F,1,0,ksize=5)
sobely = cv2.Sobel(frame,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('Original',frame)
cv2.imshow('Mask',mask)
cv2.imshow('laplacian',laplacian)
cv2.imshow('sobelx',sobelx)
cv2.imshow('sobely',sobely)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('Original',frame)
edges = cv2.Canny(frame,100,200)
cv2.imshow('Edges',edges)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
import cv2
import numpy as np
img_rgb = cv2.imread('opencv-template-matching-python-tutorial.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('opencv-template-for-matching.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,255,255), 2)
cv2.imshow('Detected',img_rgb)
import cv2
# Opens the Video file
cap= cv2.VideoCapture('IMG_2128.MOV')
i=0
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
cv2.imwrite('kang'+str(i)+'.jpg',frame)
i+=1
cap.release()
cv2.destroyAllWindows()
import cv2
# Opens the Video file
cap= cv2.VideoCapture('IMG_2128.MOV')
i=1
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
if i%10 == 0:
cv2.imwrite('kang'+str(i)+'.jpg',frame)
i+=1
cap.release()
cv2.destroyAllWindows()
```
|
github_jupyter
|
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# import data from the github page of the book
data = pd.read_csv('https://raw.githubusercontent.com/Develop-Packt/Exploring-Absenteeism-at-Work/master/data/Absenteeism_at_work.csv', sep=";")
# print dimensionality of the data, columns, types and missing values
print(f"Data dimension: {data.shape}")
for col in data.columns:
print(f"Column: {col:35} | type: {str(data[col].dtype):7} | missing values: {data[col].isna().sum():3d}")
# compute statistics on numerical features
data.describe().T
# define encoding dictionaries
month_encoding = {1: "January", 2: "February", 3: "March", 4: "April",
5: "May", 6: "June", 7: "July", 8: "August",
9: "September", 10: "October", 11: "November", 12: "December", 0: "Unknown"}
dow_encoding = {2: "Monday", 3: "Tuesday", 4: "Wednesday", 5: "Thursday", 6: "Friday"}
season_encoding = {1: "Spring", 2: "Summer", 3: "Fall", 4: "Winter"}
education_encoding = {1: "high_school", 2: "graduate", 3: "postgraduate", 4: "master_phd"}
yes_no_encoding = {0: "No", 1: "Yes"}
# backtransform numerical variables to categorical
preprocessed_data = data.copy()
preprocessed_data["Month of absence"] = preprocessed_data["Month of absence"]\
.apply(lambda x: month_encoding[x])
preprocessed_data["Day of the week"] = preprocessed_data["Day of the week"]\
.apply(lambda x: dow_encoding[x])
preprocessed_data["Seasons"] = preprocessed_data["Seasons"]\
.apply(lambda x: season_encoding[x])
preprocessed_data["Education"] = preprocessed_data["Education"]\
.apply(lambda x: education_encoding[x])
preprocessed_data["Disciplinary failure"] = preprocessed_data["Disciplinary failure"]\
.apply(lambda x: yes_no_encoding[x])
preprocessed_data["Social drinker"] = preprocessed_data["Social drinker"]\
.apply(lambda x: yes_no_encoding[x])
preprocessed_data["Social smoker"] = preprocessed_data["Social smoker"]\
.apply(lambda x: yes_no_encoding[x])
# transform columns
preprocessed_data.head().T
```
**Exercise 01: Identifying Disease Reasons for Absence**
```
# define function, which checks if the provided integer value
# is contained in the ICD or not
def in_icd(val):
return "Yes" if val >= 1 and val <= 21 else "No"
# add Disease column
preprocessed_data["Disease"] = preprocessed_data["Reason for absence"]\
.apply(in_icd)
# plot value counts
plt.figure(figsize=(10, 8))
sns.countplot(data=preprocessed_data, x='Disease')
plt.savefig('figs/disease_plot.png', format='png', dpi=300)
```
|
github_jupyter
|
## 8.2 创建超链接
超链接指按内容链接,可以从一个文本内容指向文本其他内容或其他文件、网址等。超链接可以分为文本内链接、网页链接以及本地文件链接。LaTeX提供了`hyperref`宏包,可用于生成超链接。在使用时,只需在前导代码中申明宏包即可,即`\usepackage{hyperref}`。
### 8.2.1 超链接类型
#### 文本内链接
在篇幅较大的文档中,查阅内容会比较繁琐,因此,往往会在目录中使用超链接来进行文本内容的快速高效浏览。可以使用`hyperref`宏包创建文本内超链接。
【**例8-4**】使用`\usepackage{hyperref}`创建一个简单的目录链接文本内容的例子。
```tex
\documentclass{book}
\usepackage{blindtext}
\usepackage{hyperref} %超链接包
\begin{document}
\frontmatter
\tableofcontents
\clearpage
\addcontentsline{toc}{chapter}{Foreword}
{\huge {\bf Foreword}}
This is foreword.
\clearpage
\mainmatter
\chapter{First Chapter}
This is chapter 1.
\clearpage
\section{First section} \label{second}
This is section 1.1.
\end{document}
```
编译后文档如图8.2.1所示。
<p align="center">
<table>
<tr>
<td><img align="middle" src="graphics/example8_2_1_1.png" width="300"></td>
<td><img align="middle" src="graphics/example8_2_1_2.png" width="300"></td>
<td><img align="middle" src="graphics/example8_2_1_3.png" width="300"></td>
<td><img align="middle" src="graphics/example8_2_1_4.png" width="300"></td>
</tr>
</table>
</p>
<center><b>图8.2.4</b> 编译后的文档</center>
在导入 `hyperref` 时必须非常小心,一般而言,它必须是最后一个要导入的包。
#### 网址链接
众所周知,在文档中插入网址之类的文本同样需要用到超链接,同样的,使用`hyperref`宏包可以创建网页超链接。有时我们需要将超链接命名并隐藏网址,这时我们可以使用`href`命令进行插入;有时,我们插入的网址链接太长,但LaTeX不会自动换行,往往会造成格式混乱的问题,这时,我们可以使用`url`工具包,并在该工具包中声明一个参数即可解决这个问题,相关命令为`\usepackage[hyphens]{url}`。
> 参考[Line breaks in URLs](https://latex.org/forum/viewtopic.php?f=44&t=4022)。
【**例8-5**】在LaTeX中使用`hyperref`及`url`工具包插入网页链接并设置自动换行。
```tex
\documentclass[12pt]{article}
\usepackage[hyphens]{url}
\usepackage{hyperref}
\begin{document}
This is the website of open-source latex-cookbook repository: \href{https://github.com/xinychen/latex-cookbook}{LaTeX-cookbook} or go to the next url: \url{https://github.com/xinychen/latex-cookbook}.
\end{document}
```
编译后文档如图8.2.3所示。
<p align="center">
<table>
<tr>
<td><img align="middle" src="graphics/example8_2_2.png" width="300"></td>
</tr>
</table>
</p>
<center><b>图8.2.2</b> 编译后的文档</center>
#### 本地文件链接
有时,需要将文本与本地文件进行链接,`href`命令也可用于打开本地文件。
【**例8-6**】在LaTeX中使用`href`命令打开本地文件。
```tex
\documentclass[12pt]{article}
\usepackage[hyphens]{url}
\usepackage{hyperref}
\begin{document}
This is the text of open-source latex-cookbook repository: \href{run:./LaTeX-cookbook.dox}{LaTeX-cookbook}.
\end{document}
```
编译后文档如图8.2.3所示。
<p align="center">
<table>
<tr>
<td><img align="middle" src="graphics/example8_2_3.png" width="300"></td>
</tr>
</table>
</p>
<center><b>图8.2.3</b> 编译后的文档</center>
### 8.2.2 超链接格式
当然,有时候为了突出超链接,也可以在工具包`hyperref`中设置特定的颜色,设置的命令为`\hypersetup`,一般放在前导代码中,例如`colorlinks = true, linkcolor=blue, urlcolor = blue, filecolor=magenta`。默认设置以单色样式的空间字体打印链接,`\urlstyle{same}`命令将改变这个样式,并以与文本其余部分相同的样式显示链接。
> 参考[Website address](https://latex.org/forum/viewtopic.php?f=44&t=5115)。
【**例8-7**】在LaTeX中使用`hyperref`工具包插入超链接并设置超链接颜色为蓝色。
```tex
\documentclass{book}
\usepackage{blindtext}
\usepackage{hyperref} %超链接包
\hypersetup{colorlinks = true, %链接将被着色,默认颜色是红色
linkcolor=blue, % 内部链接显示为蓝色
urlcolor = cyan, % 网址链接为青色
filecolor=magenta} % 本地文件链接为洋红色
\urlstyle{same}
\begin{document}
\frontmatter
\tableofcontents
\clearpage
\addcontentsline{toc}{chapter}{Foreword}
{\huge {\bf Foreword}}
This is foreword.
\clearpage
\mainmatter
\chapter{First Chapter}
This is chapter 1.
\clearpage
\section{First section} \label{second}
This is section 1.1.
This is the website of open-source latex-cookbook repository: \href{https://github.com/xinychen/latex-cookbook}{LaTeX-cookbook} or go to the next url: \url{https://github.com/xinychen/latex-cookbook}.
This is the text of open-source latex-cookbook repository: \href{run:./LaTeX-cookbook.dox}{LaTeX-cookbook}
\end{document}
```
编译后文档如图8.2.4所示。
<p align="center">
<table>
<tr>
<td><img align="middle" src="graphics/example8_2_4_1.png" width="300"></td>
<td><img align="middle" src="graphics/example8_2_4_2.png" width="300"></td>
<td><img align="middle" src="graphics/example8_2_4_3.png" width="300"></td>
<td><img align="middle" src="graphics/example8_2_4_4.png" width="300"></td>
</tr>
</table>
</p>
<center><b>图8.2.4</b> 编译后的文档</center>
【回放】[**8.1 图表和公式的索引**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-8/section1.ipynb)
【继续】[**8.3 Bibtex用法**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-8/section3.ipynb)
### License
<div class="alert alert-block alert-danger">
<b>This work is released under the MIT license.</b>
</div>
|
github_jupyter
|
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
import seaborn as sns
# list of models
# Commented few models because they produced very big results which interfere visualization
models = [
# 'RandomForestRegressor',
# 'AdaBoostRegressor',
# 'BaggingRegressor',
# 'DecisionTreeRegressor',
'DummyRegressor',
'ExtraTreeRegressor',
#'ExtraTreesRegressor',
#'GaussianProcessRegressor',
#'GradientBoostingRegressor',
#'HuberRegressor',
'KNeighborsRegressor',
#'MLPRegressor',
#'PassiveAggressiveRegressor',
#'RANSACRegressor',
#'SGDRegressor',
#'TheilSenRegressor'
]
buildingtypes = ['Office', 'PrimClass', 'UnivClass', 'UnivDorm', 'UnivLab']
# Generate different line styles
# 24 different different lines will be generated
lineStyles = ['-', '--', '-.', ':']
lineColors = ['b', 'g', 'r', 'c', 'm', 'y']
styles = []
for j in range(3):
for i in range(5):
styles.append(lineColors[i] + lineStyles[(i + j) % 4])
def visualize(arg):
for buildingtype in buildingtypes:
# Draw lines on single plot
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(15,3))
for i in range(len(models)):
dataframes = []
data = pd.read_csv('../results/' + models[i] + '_metrics_' + buildingtype + '.csv')
data = data.drop(columns=['Unnamed: 0'])
data['buidingtype'] = buildingtype
dataframes.append(data)
result = pd.concat(dataframes)
rows = result[result['buidingtype']==buildingtype]['MAPE']
# Single line creator
value, = plt.plot(rows, styles[i], label=models[i])
# Draw plot
plt.title(buildingtype, loc='left')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.ylabel(arg)
plt.xlabel('Buildings')
plt.show()
visualize('MAPE')
```
# Box plot array visualization
Based on this: https://stackoverflow.com/questions/41384040/subplot-for-seaborn-boxplot
```
f, axes = plt.subplots(5, 3, figsize=(11,11), sharex='col')
plt.style.use('seaborn-whitegrid')
for buildingtype in buildingtypes:
# Draw lines on single plot
MAPE = {}
NMBE = {}
CVRSME = {}
for i in range(len(models)):
dataframes = []
data = pd.read_csv('../results/' + models[i] + '_metrics_' + buildingtype + '.csv')
data = data.drop(columns=['Unnamed: 0'])
data['buidingtype'] = buildingtype
dataframes.append(data)
result = pd.concat(dataframes)
MAPE[models[i]] = result[result['buidingtype']==buildingtype]['MAPE']
NMBE[models[i]] = result[result['buidingtype']==buildingtype]['NMBE']
CVRSME[models[i]] = result[result['buidingtype']==buildingtype]['CVRSME']
MAPE_df = pd.DataFrame(MAPE)
MAPE_df = MAPE_df[MAPE_df<100].melt()
ax1 = sns.boxplot(data=MAPE_df, x='value', y='variable', ax=axes[buildingtypes.index(buildingtype),0])
ax1.set(ylabel=buildingtype, xlabel="MAPE")
NMBE_df = pd.DataFrame(NMBE)
NMBE_df = NMBE_df.melt() #[NMBE_df<100]
ax2 = sns.boxplot(data=NMBE_df, x='value', y='variable', ax=axes[buildingtypes.index(buildingtype),1])
ax2.set(ylabel="", xlabel="NMBE", yticks=[])
CVRSME_df = pd.DataFrame(CVRSME)
CVRSME_df = CVRSME_df.melt() #[NMBE_df<100]
ax3 = sns.boxplot(data=CVRSME_df, x='value', y='variable', ax=axes[buildingtypes.index(buildingtype),2])
ax3.set(ylabel="", xlabel="CVRSME", yticks=[])
# sns.boxplot(y="b", x= "a", data=rows, orient='v' ) #, ax=axes[0]
# print(rows)
# Single line creator
# value, = plt.plot(rows, styles[i], label=models[i])
# sns.boxplot(y="b", x= "a", data=df, orient='v' , ax=axes[0])
# sns.boxplot(y="c", x= "a", data=df, orient='v' , ax=axes[1])
```
|
github_jupyter
|
```
suppressMessages(library("mc2d"))
library("scales")
library("ggplot2")
library("gridExtra")
```
# Risk Study for REPLACE ME
See the [ISO 27005 Risk Cookbook](http://www.businessofsecurity.com/docs/FAIR%20-%20ISO_IEC_27005%20Cookbook.pdf)
for a more detailed explanation of this template.
# Asset
Define the asset or assets at risk
# Threat Community
Explain the threat community. This should include where they operate, how effective they are, and any additional details that help understand them.
## Threat Capability
Define the ability for the threat agent to overcome the controls. The guideline for values here are as follows:
|Rating |Value |
|----------------------|------|
|Very High (Top 2%) |98-100|
|High (Top 16%) |84-97 |
|Moderate |17-84 |
|Low (Bottom 16%) |3-16 |
|Very Low (Bottom 2%) |0-2 |
```
tcap.min <- 0
tcap.likely <- 50
tcap.max <- 100
tcap.confidence <- 10
```
# Controls
Define the controls that resist the threat community. Provide any necessary links and descriptions.
## Control Strength
Define the ability of the controls in play to overcome the threat agents.
|Rating |Value |
|----------------------|------|
|Very High (Top 2%) |98-100|
|High (Top 16%) |84-97 |
|Moderate |17-84 |
|Low (Bottom 16%) |3-16 |
|Very Low (Bottom 2%) |0-2 |
```
cs.min <- 0
cs.likely <- 50
cs.max <- 100
cs.confidence <- 10
```
# Threat Event Frequency
Threat Event Frequency. Number assumes an annual value. Example values
are as follows:
|Rating |Value |
|---------|------|
|Very High|> 100 |
|High |10-100|
|Moderate |1-10 |
|Low |> .1 |
|Very Low |< .1 |
```
tef <- .25
```
# Loss Magnitude
Define the types of loss that could occur during a loss event for this study.
|Primary |ISO/IEC 27005 Direct Operational Impacts |
|:--------------|:-------------------------------------------------------------------------|
|Productivity |The financial replacement value of lost (part of) asset |
|Response |The cost of acquisition, configuration, and installation of the new asset |
|Replacement |The cost of suspended operations due to the incident |
| |Impact results in an information security breach |
|Secondary |ISO/IEC 27005 Indirect Operational Impacts |
|:-----------------------|:------------------------------------------------------------------------|
|Competitive Advantage |Opportunity cost |
|Fines/Judgments |Legal or regulatory actions levied against an organization including bail|
|Reputation |Potential misuse of information obtained through a security breach |
| |Violation of statutory or regulatory obligations |
| |Violation of ethical codes of conduct |
## Probable Loss
Set the probable amount for a single loss event. This is a combination in dollars of both the primary and secondary loss factors.
```
loss.probable <- 100000
```
## Worst Case Loss
Set the worst case amount a single loss event. This is a combination in dollars of both the primary and secondary loss factors
```
loss.worstCase <- 1000000
```
# Qualified risk based on loss tolerance
```
loss.veryHigh <- 10000000
loss.high <- 1000000
loss.moderate <- 100000
loss.low <- 50000
loss.veryLow <- 10000
```
# Generate distribution of samples
```
sampleSize <- 100000
cs <- rpert(sampleSize, cs.min, cs.likely, cs.max, cs.confidence)
tcap <- rpert(sampleSize, tcap.min, tcap.likely, tcap.max, tcap.confidence)
csPlot <- ggplot(data.frame(cs), aes(x = cs))
csPlot <- csPlot + geom_histogram(aes(y = ..density..), color="black",fill="white", binwidth=1)
csPlot <- csPlot + geom_density(fill="steelblue",alpha=2/3)
csPlot <- csPlot + theme_bw()
csPlot <- csPlot + labs(title="Control Strength", x="Sample Value", y="Density")
csPlot <- csPlot + scale_x_continuous(breaks=seq(0,100, by=10))
tcapPlot <- ggplot(data.frame(tcap), aes(x = tcap))
tcapPlot <- tcapPlot + geom_histogram(aes(y = ..density..), color="black",fill="white", binwidth=1)
tcapPlot <- tcapPlot + geom_density(fill="steelblue",alpha=2/3)
tcapPlot <- tcapPlot + theme_bw()
tcapPlot <- tcapPlot + labs(title="Threat Capability", x="Sample Value", y="Density")
tcapPlot <- tcapPlot + scale_x_continuous(breaks=seq(0,100, by=10))
grid.arrange(csPlot, tcapPlot, heights=4:5, ncol=2)
```
# Vulnerability Function
```
CalculateVulnerability <- function() {
if (sampleSize < 100) {
stop("Sample size needs to be at least 100 to get statistically significant results")
}
vulnerability <- 0
for (i in 1:sampleSize) {
if (tcap[i] > cs[i]) {
vulnerability <- vulnerability + 1
}
}
return(vulnerability / sampleSize)
}
```
# Loss Event Frequency Function
```
CalculateLossEventFrequency <- function() {
return(CalculateVulnerability() * tef)
}
```
# Risk Function
```
CalculateRisk <- function(loss) {
if (loss >= loss.veryHigh) {
return("Very High")
} else if (loss < loss.veryHigh && loss >= loss.high) {
return("High")
} else if (loss < loss.high && loss >= loss.moderate) {
return("Moderate")
} else if (loss < loss.moderate && loss >= loss.veryLow) {
return("Low")
} else {
return("Very Low")
}
}
```
# Annualized Loss Function
```
CalculateAnnualizedLoss <- function(lef, lm) {
return(lm * lef)
}
```
# Calculate
```
lossEventFrequency <- CalculateLossEventFrequency()
worstCaseLoss <- CalculateAnnualizedLoss(lossEventFrequency, loss.worstCase)
probableLoss <- CalculateAnnualizedLoss(lossEventFrequency, loss.probable)
worstCaseRisk <- CalculateRisk(worstCaseLoss)
probableRisk <- CalculateRisk(probableLoss)
```
# Final Results
```
cat("Probable Risk:", probableRisk, dollar_format()(probableLoss), "\n")
cat("Worst Case Risk:", worstCaseRisk, dollar_format()(worstCaseLoss), "\n")
```
# Risk Treatments
Document any risk treatments that may come out of this study.
|
github_jupyter
|
```
from planaritychecker import PlanarityChecker
from numpy.random import random, randint
import networkx as nx
from planarity.planarity_networkx import planarity
%matplotlib inline
```
# Check $K_5$ and $K_{3,3}$ without one edge
```
almost_K5 = PlanarityChecker(5)
graph_almost_K5 = nx.Graph()
graph_almost_K5.add_nodes_from(range(5))
for i in range(5):
for j in range(i + 1, 5):
if (i != 0 or j != 1):
almost_K5.add_edge(i, j)
graph_almost_K5.add_edge(i, j)
nx.draw(graph_almost_K5)
print("almost K5. number of edges: %d, is planar: %d" % (almost_K5.edges_count, almost_K5.is_planar()))
almost_K33 = PlanarityChecker(6)
graph_almost_K33 = nx.Graph()
graph_almost_K33.add_nodes_from(range(6))
for i in range(3):
for j in range(3, 6):
if i != 1 or j != 4:
almost_K33.add_edge(i, j)
graph_almost_K33.add_edge(i, j)
nx.draw(graph_almost_K33)
print("Almost K3,3. number of edges: %d, is planar: %d" % (almost_K33.edges_count, almost_K33.is_planar()))
```
# Check $K_5$ and $K_{3,3}$
```
K5 = almost_K5
K5.add_edge(0, 1)
graph_K5 = graph_almost_K5
graph_K5.add_edge(0, 1)
nx.draw(graph_K5)
print("K5. number of edges: %d, is planar: %d" % (K5.edges_count, K5.is_planar()))
K33 = almost_K33
K33.add_edge(1, 4)
graph_K33 = graph_almost_K33
graph_K33.add_edge(1, 4)
nx.draw(graph_K33)
print("K33. number of edges: %d, is planar: %d" % (K33.edges_count, K33.is_planar()))
```
# Stress test
# Generate a lot of graphs with probability of every edge=$p$ and check planarity with PlanarityChecker and planarity library (https://pypi.org/project/planarity/)
```
def generate_graphs(n, p):
"""Generate Graph and nx.Graph with n vertexes, where p is a probability of edge existance"""
G = PlanarityChecker(n)
nx_G = nx.Graph()
nx_G.add_nodes_from(range(n))
for i in range(n):
for j in range(i + 1, n):
if random() < p:
G.add_edge(i, j)
nx_G.add_edge(i, j)
return (G, nx_G)
n_planar, n_notplanar = 0, 0
for i in range(1000):
G, nxG = generate_graphs(100, 0.02)
if G.is_planar() != planarity.is_planar(nxG):
print("Custom: %d, Library: %d" % (G.is_planar(), planarity.is_planar(nxG)))
nx.draw(nxG)
break
else:
if (G.is_planar()):
n_planar += 1
else:
n_notplanar += 1
print(n_planar, n_notplanar)
```
# It works correctly. Check execution time
```
n = 20000
m = 40000
G = PlanarityChecker(n)
edges = set()
for i in range(m):
a = randint(0, n)
b = randint(0, n)
while (a, b) in edges or a == b:
a = randint(0, n)
b = randint(0, n)
edges.add((a, b))
for e in edges:
G.add_edge(e[0], e[1])
import sys
sys.setrecursionlimit(20000)
%%time
G.is_planar()
nx_G = nx.Graph()
nx_G.add_edges_from(edges)
%%time
planarity.is_planar(nx_G)
```
# Not bad for python. (planarity library has implementation on C)
|
github_jupyter
|
```
from __future__ import print_function
import os
from netCDF4 import Dataset
import requests
from lxml import etree
import matplotlib.pyplot as plt
from owslib.wps import WebProcessingService, ComplexDataInput
verify_ssl = True if 'DISABLE_VERIFY_SSL' not in os.environ else False
def parseStatus(execute):
o = requests.get(execute.statusLocation, verify=verify_ssl)
t = etree.fromstring(o.content)
ref = t.getchildren()[-1].getchildren()[-1].getchildren()[-1].get('{http://www.w3.org/1999/xlink}href')
return ref
# catalogue WPS url
wpsURL = 'https://pavics.ouranos.ca/twitcher/ows/proxy/catalog/pywps'
# Connection
wpsCatalogue = WebProcessingService(url=wpsURL, verify=verify_ssl)
for process in wpsCatalogue.processes:
print ('%s \t : %s \n' %(process.identifier, process.abstract))
wpsURL = 'https://pavics.ouranos.ca/twitcher/ows/proxy/flyingpigeon/wps'
wpsFP = WebProcessingService(wpsURL, verify=verify_ssl)
print(wpsFP.identification.title)
for process in wpsFP.processes:
print ('%s \t : %s \n' %(process.identifier, process.abstract))
proc_name = 'pavicsearch'
constraintString = 'variable:tasmax'
maxfiles = '1000000'
myinputs = [('constraints', constraintString),('type','File'), ('limit',maxfiles)]
execution = wpsCatalogue.execute(identifier=proc_name, inputs=myinputs)
print(execution.status)
print(execution.processOutputs[-1].reference)
proc_name = 'pavicsearch'
process = wpsCatalogue.describeprocess(proc_name) # get process info
for i in process.dataInputs:
print('inputs :', i.identifier, ' : ', i.abstract)
for i in process.processOutputs:
print('outputs :', i.identifier, ' : ', i.abstract)
proc_name = 'subset_bbox'
process = wpsFP.describeprocess(identifier=proc_name)
print(process.title,' : ',process.abstract,'\n')
for i in process.dataInputs:
print('inputs :', i.identifier, ' : ', i.abstract)
for i in process.processOutputs:
print('outputs :', i.identifier, ' : ', i.abstract)
# NBVAL_IGNORE_OUTPUT
# ignore output of this cell because different PAVICS host will have different quantity of netCDF files
ref = parseStatus(execution)
r = requests.get(ref, verify=verify_ssl)
list_nc = r.json()
print('Numer of files found :',len(list_nc), '\n')
print("\n".join(list_nc[1:15]),'\n...')
nrcan_nc = [i for i in list_nc if 'nrcan' in i and ('1991' in i or '1992' in i or '1993' in i)]
# sort the filtered list
nrcan_nc.sort()
print('Number of files :', "%s\n" % len(nrcan_nc), "\n".join(nrcan_nc))
nc_test = Dataset(nrcan_nc[0])
print(nc_test)
myinputs = []
# To keep things reasonably quick : subset jan-april
for i in nrcan_nc:
myinputs.append(('resource', i))
myinputs.append(('lon0', '-80.0'))
myinputs.append(('lon1', '-70.0'))
myinputs.append(('lat0', '44.0'))
myinputs.append(('lat1', '50'))
print(myinputs)
execution = wpsFP.execute(identifier=proc_name, inputs=myinputs)
print(execution.status)
print(execution.processOutputs[-1].reference)
print(execution.statusLocation)
```
|
github_jupyter
|
# Research
## Imports
```
import pandas as pd
import pandas_datareader as dr
from pandas_datareader import data as web
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
import datetime
import mplfinance as mpl
import plotly.graph_objects as go
import plotly
import yfinance as yf
```
## Data Import
```
df = pd.read_csv('data/data2.csv', index_col='Symbol')
```
## Sorting Data
```
df
isInfoTech = df['Sector']== 'Information Technology'
print(isInfoTech.head())
df_InfoTech = df[isInfoTech]
df_InfoTech
```
## IBM INTEL NVIDIA
```
#looking at IBM,INTEL,NVIDIA,
start = datetime.datetime(2017,1,1)
end = datetime.datetime(2021,6,22)
ibm = yf.download("IBM",start, end)
intel = yf.download("INTC",start, end)
nvidia = yf.download("NVDA",start, end)
trch = yf.download("TRCH",start, end)
ibm.to_csv('IBM_STOCK.csv')
#ibm stock
intel.to_csv('INTC_STOCK.csv')
nvidia.to_csv('NVDA_STOCK.csv')
trch.to_csv('TRCH_STOCK.csv')
ibm.head()
trch.tail()
intel.head()
nvidia.head()
ibm['Open'].plot(label='IBM',figsize=(15,7))
intel['Open'].plot(label='Intel')
nvidia['Open'].plot(label='Nvidia')
plt.legend()
plt.ylabel('Stock Price')
plt.title('Stock Prices of IBM,Intel and Nvidia')
```
## Volumes
```
ibm['Volume'].plot(label='IBM',figsize=(15,7))
intel['Volume'].plot(label='Intel')
nvidia['Volume'].plot(label='Nvidia')
plt.ylabel('Volume Traded')
plt.title('Volumes of IBM, Intel and Nvidia')
plt.legend()
```
## Total Traded / ~Market Cap
```
ibm['Total Traded'] = ibm['Open'] * ibm['Volume']
intel['Total Traded'] = intel['Open'] * intel['Volume']
nvidia['Total Traded'] = nvidia['Open'] * nvidia['Volume']
ibm['Total Traded'].plot(label=('IBM'),figsize=(15,7))
intel['Total Traded'].plot(label=('Intel'))
nvidia['Total Traded'].plot(label=('Nvidia'))
plt.ylabel('Total Traded')
plt.legend()
plt.title('Total Traded for IBM, Intel, and Nvidia')
```
## 50 and 200 Day Rolling EMA
```
intel['Open'].plot(figsize=(15,7))
intel['MA50']=intel['Open'].rolling(50).mean()
intel['MA50'].plot(label='MA50')
intel['MA200']=intel['Open'].rolling(200).mean()
intel['MA200'].plot(label='MA200')
plt.legend()
plt.title('Intel Open, 50EMA, 200EMA')
ibm['Open'].plot(figsize=(15,7))
ibm['MA50']=ibm['Open'].rolling(50).mean()
ibm['MA50'].plot(label='MA50')
ibm['MA200']=ibm['Open'].rolling(200).mean()
ibm['MA200'].plot(label='MA200')
plt.legend()
plt.title('IBM Open, 50EMA, 200EMA')
nvidia['Open'].plot(figsize=(12,7))
nvidia['MA50']=nvidia['Open'].rolling(50).mean()
nvidia['MA50'].plot(label='MA50')
nvidia['MA200']=nvidia['Open'].rolling(200).mean()
nvidia['MA200'].plot(label='MA200')
plt.legend()
plt.title('Nvidia Open, 50EMA, 200EMA')
trch['Open'].plot(figsize=(10,7))
trch['MA50']=trch['Open'].rolling(50).mean()
trch['MA50'].plot(label='MA50')
trch['MA200']=trch['Open'].rolling(200).mean()
trch['MA200'].plot(label='MA200')
plt.legend()
plt.title('Torchlight Open, 50EMA, 200EMA')
```
## Time Series Analysis AutoCorrelation
```
def autocorr_daily(intel):
returns = intel.pct_change()
autocorrelation = returns['Adj Close'].autocorr()
return autocorrelation
autocorr_daily(intel)
autocorr_daily(ibm)
autocorr_daily(nvidia)
autocorr_daily(trch)
```
## Scatter Matrix Based off Open Price
```
from pandas.plotting import scatter_matrix
tech_comp = pd.concat([ibm['Open'],intel['Open'],nvidia['Open']],axis =1)
tech_comp.columns = ['IBM Open','Intel Open','Nvidia Open']
scatter_matrix(tech_comp,figsize=(8,8),hist_kwds={'bins':50})
```
CandleStick Analysis
## CandleStick Analysis
```
candleIntel = intel.iloc[100:160]
mpl.plot(candleIntel,type='candle',volume=True)
candleIBM = ibm.iloc[100:160]
mpl.plot(candleIBM,type='candle',volume=True)
candleNvidia = nvidia.iloc[100:160]
mpl.plot(candleNvidia,type='candle',volume=True)
```
## Monte Carlo Stock Price Predictor
```
monte_end = datetime.datetime.now()
monte_start = monte_end - datetime.timedelta(days=300)
prices = yf.download("NVDA",monte_start,monte_end)['Close']
returns = prices.pct_change()
meanReturns = returns.mean()
last_price = prices[-1]
num_sims = 100
num_days = 300
sim_df = pd.DataFrame()
for x in range(num_sims):
count = 0
daily_volatility = returns.std()
price_series = []
price = last_price * (1 + np.random.normal(0,daily_volatility))
price_series.append(price)
for y in range(num_days):
if count == 299:
break
price = price_series[count] * (1 + np.random.normal(0,daily_volatility))
price_series.append(price)
count += 1
sim_df[x] = pd.Series(price_series)
fig = plt.figure()
fig.suptitle('Monte Carlo Sim NVDA')
plt.plot(sim_df)
plt.axhline(y = last_price, color = 'lime',linestyle = '-')
plt.xlabel('Days')
plt.ylabel('Price')
plt.show()
import plotly_express as px
fig2 = px.line(sim_df)
fig2.show()
pd.set_option('display.max_columns',100)
sim_df
sim_df.drop(index=1)
price_series
```
|
github_jupyter
|
[this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/fsharp/Docs)
# Object formatters
## Default formatting behaviors
When you return a value or a display a value in a .NET notebook, the default formatting behavior is to try to provide some useful information about the object. If it's an array or other type implementing `IEnumerable`, that might look like this:
```
display ["hello"; "world"]
Enumerable.Range(1, 5)
```
As you can see, the same basic structure is used whether you pass the object to the `display` method or return it as the cell's value.
Similarly to the behavior for `IEnumerable` objects, you'll also see table output for dictionaries, but for each value in the dictionary, the key is provided rather than the index within the collection.
```
// Cannot simply use 'dict' here, see https://github.com/dotnet/interactive/issues/12
let d = dict [("zero", 0); ("one", 1); ("two", 2)]
System.Collections.Generic.Dictionary<string, int>(d)
```
The default formatting behavior for other types of objects is to produce a table showing their properties and the values of those properties.
```
type Person = { FirstName: string; LastName: string; Age: int }
// Evaluate a new person
{ FirstName = "Mitch"; LastName = "Buchannon"; Age = 42 }
```
When you have a collection of such objects, you can see the values listed for each item in the collection:
```
let people =
[
{ FirstName = "Mitch"; LastName = "Buchannon"; Age = 42 }
{ FirstName = "Hobie "; LastName = "Buchannon"; Age = 23 }
{ FirstName = "Summer"; LastName = "Quinn"; Age = 25 }
{ FirstName = "C.J."; LastName = "Parker"; Age = 23 }
]
people
```
Now let's try something a bit more complex. Let's look at a graph of objects.
We'll redefine the `Person` class to allow a reference to a collection of other `Person` instances.
```
type Person =
{ FirstName: string
LastName: string
Age: int
Friends: ResizeArray<Person> }
let mitch = { FirstName = "Mitch"; LastName = "Buchannon"; Age = 42; Friends = ResizeArray() }
let hobie = { FirstName = "Hobie "; LastName = "Buchannon"; Age = 23; Friends = ResizeArray() }
let summer = { FirstName = "Summer"; LastName = "Quinn"; Age = 25; Friends = ResizeArray() }
mitch.Friends.AddRange([ hobie; summer ])
hobie.Friends.AddRange([ mitch; summer ])
summer.Friends.AddRange([ mitch; hobie ])
let people = [ mitch; hobie; summer ]
display people
```
That's a bit hard to read, right?
The defaut formatting behaviors are thorough, but that doesn't always mean they're as useful as they might be. In order to give you more control in these kinds of cases, the object formatters can be customized from within the .NET notebook.
## Custom formatters
Let's clean up the output above by customizing the formatter for the `Person.Friends` property, which is creating a lot of noise.
The way to do this is to use the `Formatter` API. This API lets you customize the formatting for a specific type. Since `Person.Friends` is of type `ResizeArray<Person>`, we can register a custom formatter for that type to change the output. Let's just list their first names:
```
Formatter<ResizeArray<Person>>.Register(
fun people writer ->
for person in people do
writer.Write("person")
, mimeType = "text/plain")
people
```
You might have noticed that `people` is of type `ResizeArray<Person>`, but the table output still includes columns for `LastName`, `Age`, and `Friends`. What's going on here?
Notice that the custom formatter we just registered was registered for the mime type `"text/plain"`. The top-level formatter that's used when we call `display` requests output of mime type `"text/html"` and the nested objects are formatted using `"text/plain"`. It's the nested objects, not the top-level HTML table, that's using the custom formatter here.
With that in mind, we can make it even more concise by registering a formatter for `Person`:
```
Formatter<Person>.Register(
fun person writer ->
writer.Write(person.FirstName)
, mimeType = "text/plain");
people
```
Of course, you might not want table output. To replace the default HTML table view, you can register a formatter for the `"text/html"` mime type. Let's do that, and write some HTML using PocketView.
|
github_jupyter
|
# Arrays
There are several kinds of sequences in Python. A [list](lists) is one. However, the sequence type that we will use most in the class, is the array.
The `numpy` package, abbreviated `np` in programs, provides Python programmers
with convenient and powerful functions for creating and manipulating arrays.
```
# Load the numpy package, and call it "np".
import numpy as np
```
## Creating arrays
The `array` function from the Numpy package creates an array from single values, or sequences of values.
For example, remember `my_list`?
```
my_list = [1, 2, 3]
```
This is a `list`:
```
type(my_list)
```
The `array` function from Numpy can make an array from this list:
```
my_array = np.array(my_list)
my_array
```
As you can see from the display above, this is an array. We confirm it with `type`:
```
type(my_array)
```
We can also create the list and then the array in one call, like this:
```
my_array = np.array([1, 2, 3])
my_array
```
Here `[1, 2, 3]` is an *expression* that returns a list. `np.array` then operates on the returned list, to create an array.
Arrays often contain numbers, but, like lists, they can also contain strings
or other types of values. However, a single array can only contain a single
kind of data. (It usually doesn't make sense to group together unlike data
anyway.)
For example,
```
english_parts_of_speech = np.array(["noun", "pronoun", "verb", "adverb", "adjective", "conjunction", "preposition", "interjection"])
english_parts_of_speech
```
We have not seen this yet, but Python allows us to spread expressions between
round and square brackets across many lines. It knows that the expression has
not finished yet because it is waiting for the closing bracket. For example, this cell works in the exactly the same way as the cell above, and may be easier to read:
```
# An expression between brackets spread across many lines.
english_parts_of_speech = np.array(
["noun",
"pronoun",
"verb",
"adverb",
"adjective",
"conjunction",
"preposition",
"interjection"]
)
english_parts_of_speech
```
Below, we collect four different temperatures into a list called `temps`.
These are the [estimated average daily high
temperatures](http://berkeleyearth.lbl.gov/regions/global-land) over all land
on Earth (in degrees Celsius) for the decades surrounding 1850, 1900, 1950,
and 2000, respectively, expressed as deviations from the average absolute high
temperature between 1951 and 1980, which was 14.48 degrees.
If you are interested, you can get more data from [this file of daily high
temperatures](http://berkeleyearth.lbl.gov/auto/Regional/TMAX/Text/global-land-TMAX-Trend.txt).
```
baseline_high = 14.48
highs = np.array([baseline_high - 0.880,
baseline_high - 0.093,
baseline_high + 0.105,
baseline_high + 0.684])
highs
```
## Calculations with arrays
Arrays can be used in arithmetic expressions to compute over their contents.
When an array is combined with a single number, that number is combined with
each element of the array. Therefore, we can convert all of these temperatures
to Fahrenheit by writing the familiar conversion formula.
```
(9/5) * highs + 32
```
<img src="https://matthew-brett.github.io/cfd2019/images/array_arithmetic.png">
As we saw for strings, arrays have *methods*, which are functions that
operate on the array values. The `mean` of a collection of numbers is its
average value: the sum divided by the length. Each pair of parentheses in the
examples below is part of a call expression; it's calling a function with no
arguments to perform a computation on the array called `highs`.
```
# The number of elements in the array
highs.size
highs.sum()
highs.mean()
```
## Functions on Arrays
Numpy provides various useful functions for operating on arrays.
For example, the `diff` function computes the difference between each adjacent
pair of elements in an array. The first element of the `diff` is the second
element minus the first.
```
np.diff(highs)
```
The [full Numpy reference](http://docs.scipy.org/doc/numpy/reference/) lists
these functions exhaustively, but only a small subset are used commonly for
data processing applications. These are grouped into different packages within
`np`. Learning this vocabulary is an important part of learning the Python
language, so refer back to this list often as you work through examples and
problems.
However, you **don't need to memorize these**. Use this as a reference.
Each of these functions takes an array as an argument and returns a single
value.
| **Function** | Description |
|--------------------|----------------------------------------------------------------------|
| `np.prod` | Multiply all elements together |
| `np.sum` | Add all elements together |
| `np.all` | Test whether all elements are true values (non-zero numbers are true)|
| `np.any` | Test whether any elements are true values (non-zero numbers are true)|
| `np.count_nonzero` | Count the number of non-zero elements |
Each of these functions takes an array as an argument and returns an array of values.
| **Function** | Description |
|--------------------|----------------------------------------------------------------------|
| `np.diff` | Difference between adjacent elements |
| `np.round` | Round each number to the nearest integer (whole number) |
| `np.cumprod` | A cumulative product: for each element, multiply all elements so far |
| `np.cumsum` | A cumulative sum: for each element, add all elements so far |
| `np.exp` | Exponentiate each element |
| `np.log` | Take the natural logarithm of each element |
| `np.sqrt` | Take the square root of each element |
| `np.sort` | Sort the elements |
Each of these functions takes an array of strings and returns an array.
| **Function** | **Description** |
|---------------------|--------------------------------------------------------------|
| `np.char.lower` | Lowercase each element |
| `np.char.upper` | Uppercase each element |
| `np.char.strip` | Remove spaces at the beginning or end of each element |
| `np.char.isalpha` | Whether each element is only letters (no numbers or symbols) |
| `np.char.isnumeric` | Whether each element is only numeric (no letters)
Each of these functions takes both an array of strings and a *search string*; each returns an array.
| **Function** | **Description** |
|----------------------|----------------------------------------------------------------------------------|
| `np.char.count` | Count the number of times a search string appears among the elements of an array |
| `np.char.find` | The position within each element that a search string is found first |
| `np.char.rfind` | The position within each element that a search string is found last |
| `np.char.startswith` | Whether each element starts with the search string
|
github_jupyter
|
# <center>Introduction on Using Python to access GeoNet's GNSS data
In this notebook we will learn how to get data from one GNSS(Global Navigation Satellite System) station. By the end of this tutorial you will have make a graph like the one below. <img src="plot.png">
## Table of contents
### 1. Introduction
### 2. Building the base FITS query
### 3. Get GNSS data
### 4. Plot data
### 5. Save data
## 1. Introduction
In this tutorial we will be learning how to use Python to access GNSS (commonly referred to at GPS) data from the continuous GNSS sites in the GeoNet and PositioNZ networks.
GeoNet has a API (Application Programming Interface) to access its GNSS data. You do not need to know anything about APIs to use this tutorial. If you would like more info see https://fits.geonet.org.nz/api-docs/.
To use this tutorial you will need to install the package pandas (https://pandas.pydata.org/).
This tutorial assumes that you have a basic knowledge of Python.
###### About GeoNet GNSS data
GeoNet uses GNSS technology to work out the precise positions of over 190 stations in and around NZ everyday. These positions are used to generate a displacement timeseries for each station, so we can observe how much and how quickly each station moves. <br>
This data comes split into 3 components:
<ul>
<li> The displacement in the east-west direction where east is positive displacement. This data has a typeID of "e"
<li> The displacement in the north-south direction where north is a positive displacement. This data has a typeID of "n"
<li> The displacement in the up-down direction where up is a positive displacement. This data has a typeID of "u"</ul>
For more on data types go to http://fits.geonet.org.nz/type (for best formatting use firefox)
## 2. Building the base FITS query
###### Import packages
```
import requests
import pandas as pd
import datetime
import matplotlib.pyplot as plt
pd.plotting.register_matplotlib_converters()
```
###### Set URL and endpoint
```
base_url = "http://fits.geonet.org.nz/"
endpoint = "observation"
```
The base URL should be set as above to access the FITS database webservice containing the GeoNet GNSS data. The endpoint is set to observation to get the data itself in csv format. There are other endpoints which will return different information such as plot and site. To learn more go to https://fits.geonet.org.nz/api-docs/
###### Combine URL and endpoint
```
url = base_url + endpoint
```
Combine the base URL and the endpoint to give the information to request the data.
## 3. Get GNSS data
In this section we will learn how to get all the GNSS observation data from a site and put it into a pandas dataframe, so we can plot and save the data
###### Set query parameters
```
parameters ={"typeID": "e", "siteID": "HANM"}
```
Set the parameters to get the east component(`'typeID':'e'`) of the GNSS station in the Hanmer Basin (`'siteID': 'HANM'`). To find the 4 letter site ID of a station you can use https://www.geonet.org.nz/data/network/sensor/search to find stations in an area of interest
##### Get GNSS data
```
response_e = requests.get(url, params=parameters)
```
We use `requests.get` to get the data using the URL we made earlier and the parameters we set in the last stage
```
parameters["typeID"] = "n"
response_n = requests.get(url, params=parameters)
parameters["typeID"] = "u"
response_u = requests.get(url, params=parameters)
```
Here we've changed the typeID in the parameters dictionary to get the other components for the GNSS station
###### Check that your requests worked
```
print ("The Response status code of the east channel is", response_e.status_code)
print ("The Response status code of the north channel is",response_n.status_code)
print ("The Response status code of the up channel is",response_u.status_code)
```
The response status code says whether we were successful in getting the data requested and why not if we were unsuccessful:
<ul>
<li>200 -- everything went okay, and the result has been returned (if any)
<li>301 -- the server is redirecting you to a different endpoint. This can happen when a company switches domain names, or an endpoint name is changed.
<li>400 -- the server thinks you made a bad request. This can happen when you don't send along the right data, among other things.
<li>404 -- the resource you tried to access wasn't found on the server.
</ul>
Now that we know our request for data was successful we want to transform it into a format that we can deal with in Python. Right now the data is one long string
###### Split the string of data
```
data_e = response_e.content.decode("utf-8").split("\n")
```
The above code decodes the response and then splits the east displacement data on the new line symbol as each line is one point of data. If you are using Python2 remove the code `.decode("utf-8")`
###### Split the points of data
```
for i in range(0, len(data_e)):
data_e[i]= data_e[i].split(",")
```
The above code uses a for loop to split each point of data on the "," symbol as each value is separated by a ",", producing a list of lists
###### Reformat data values
```
for i in range(1, (len(data_e)-1)):
data_e[i][0] = datetime.datetime.strptime(data_e[i][0], '%Y-%m-%dT%H:%M:%S.%fZ') #make 1st value into a datetime object
data_e[i][1] = float(data_e[i][1]) #makes 2nd value into a decimal number
data_e[i][2] = float(data_e[i][2]) #makes 3rd value into a decimal number
```
The above code uses a `for` loop to go over each point of data and reformat it, so that the first value in each point is seen as a time, and the second and third values are seen as numbers.<br>
Note that we choose to miss the first and last data points in our loop as the first data point has the names of the data values and the last point is empty due to how we split the data.
###### Convert nested list into dataframe object
```
df_e = pd.DataFrame(data_e[1:-1],index = range(1, (len(data_e)-1)), columns=data_e[0])
```
`data_e[1:-1]` makes the list of data be the data in the data frame, `index = range(1, (len(data_e)-1))` makes rows named 1, 2, ... n where n is the number of data points, and `columns=data_e[0]` gives the columns the names that where in the first line of the response string
###### Print the first few lines of the data frame
```
df_e.head()
```
Here we can see on the 4th of June 2014 how much the site HANM had moved east (with formal error) in mm from its reference position, this being the midpoint of the position timeseries.
###### Make everything we have just done into a function
```
def GNSS_dataframe(data):
"""
This function turns the string of GNSS data received by requests.get
into a data frame with GNSS data correctly formatted.
"""
data = data.split("\n") # splits data on the new line symbol
for i in range(0, len(data)):
data[i]= data[i].split(",")# splits data ponits on the , symbol
for i in range(1, (len(data)-1)):
data[i][0] = datetime.datetime.strptime(data[i][0], '%Y-%m-%dT%H:%M:%S.%fZ') #make 1st value into a datetime object
data[i][1] = float(data[i][1]) #makes 2nd value into a decimal number
data[i][2] = float(data[i][2]) #makes 3rd value into a decimal number
df = pd.DataFrame(data[1:-1],index = range(1, (len(data)-1)), columns=data[0]) #make the list into a data frame
return df
df_e.head()
```
This makes code cells 8 to 11 into a function to be called later in the notebook.
###### Run the above function on the North and Up data
```
df_n = GNSS_dataframe(response_n.content.decode("utf-8"))
df_u = GNSS_dataframe(response_u.content.decode("utf-8"))
```
Make sure to run this function on the content string of the requested data. If in Python2 use remove the code `.decode("utf-8")`
##### Why make the data into a data frame?
A data frame is a way of formatting data into a table with column and row name much like a csv file and makes long list of data a lot easier to use.
Data frame data can be called by column or row name making it easy to get the point(s) of data you want.
Data, much like in a table, can be “linked” so that you can do something like plot a data point on a 2D plot.
Sadly, data frames are not a built-in data format in Python, so we must use the pandas (https://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe) package to be able to make a data frame.
## 4. Plot data
###### Plot the east data
```
e_plot = df_e.plot(x='date-time', y= ' e (mm)', marker='o', title = 'Relative east displacement for HANM')
#plt.savefig("e_plot")
```
The above code plots time on the x axis and the displacement in millimetres on the y axis. `marker = ‘o’` makes each point of data a small circle. If you want to save the plot as a png file in the folder you are running this code from you can uncomment ` plt.savefig("e_plot")`
###### Plot the north data
```
n_plot = df_n.plot(x='date-time', y= ' n (mm)', marker='o', title = 'Relative north displacement for HANM')
#plt.savefig("n_plot")
```
###### Plot the up data
```
u_plot = df_u.plot(x='date-time', y= ' u (mm)', marker='o', title='Relative up displacement for HANM')
#plt.savefig("u_plot")
```
## 5. Save data
##### Make a copy of the east data frame
```
df = df_e
```
This makes what is call a deep copy of the data frame with the east displacement data in it. This means that if `df` is edited `df_e` is not effected.
###### Remove the error column from this copy of the data
```
df = df.drop(" error (mm)",axis=1)
```
The above code removes the column called error (mm) and all its data from `df`. ` axis=1` says that we are looking for a column. If we put ` axis=0` we would be looking for a row.
###### Add the up and north data to this data frame (but not the respective errors)
```
df["u (mm)"] = df_u[' u (mm)']
df["n (mm)"] = df_n[' n (mm)']
```
###### Print the first few lines of the data frame
```
df.head()
```
Here we can see the layout of the data frame with the columns date, east displacement, up displacement and north displacement
###### Save as CSV file
```
df.to_csv("HANM.csv")
```
This saves the data frame csv file with the same formatting as the data frame. It will have saved in the same place as this notebook is run from and be named HANM
## Useful links
<ul>
<li>This notebook uses Python https://www.python.org/
<li>This notebook also uses pandas https://pandas.pydata.org/
<li>There is a notebook on this data set in R at https://github.com/GeoNet/data-tutorials/tree/master/GNSS_Data/R/Introduction_to_GNSS_data_using_FITS_in_R.ipynb
<li>More tutorials on GNSS data can be found at https://github.com/GeoNet/data-tutorials/tree/master/GNSS_Data/R
<li>To learn more about station codes go to https://www.geonet.org.nz/data/supplementary/channels
<li>For more on data types in FITS go to http://fits.geonet.org.nz/type (for best formatting use firefox)
<li>For more on FITS go to https://fits.geonet.org.nz/api-docs/
</ul>
|
github_jupyter
|
```
import keras
import tensorflow as tf
print(keras.__version__)
print(tf.__version__)
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
NGRAMS = 2
SAMPLE = 1000000
EPOCHS = 15
# Florida voter
df = pd.read_csv('/opt/data/fl_voterreg/fl_reg_name_race.csv.gz')
df.dropna(subset=['name_first', 'name_last'], inplace=True)
sdf = df[df.race.isin(['multi_racial', 'native_indian', 'other', 'unknown']) == False].sample(SAMPLE, random_state=21)
del df
# Additional features
sdf['name_first'] = sdf.name_first.str.title()
sdf['name_last'] = sdf.name_last.str.title()
sdf
rdf = sdf.groupby('race').agg({'name_first': 'count'})
rdf.to_csv('./fl_voter_reg/lstm/fl_name_race.csv', columns=[])
rdf
sdf.groupby('race').agg({'name_last': 'nunique'})
```
## Preprocessing the input data
```
# concat last name and first name
sdf['name_last_name_first'] = sdf['name_last'] + ' ' + sdf['name_first']
# build n-gram list
vect = CountVectorizer(analyzer='char', max_df=0.3, min_df=3, ngram_range=(NGRAMS, NGRAMS), lowercase=False)
a = vect.fit_transform(sdf.name_last_name_first)
vocab = vect.vocabulary_
# sort n-gram by freq (highest -> lowest)
words = []
for b in vocab:
c = vocab[b]
#print(b, c, a[:, c].sum())
words.append((a[:, c].sum(), b))
#break
words = sorted(words, reverse=True)
words_list = ['UNK']
words_list.extend([w[1] for w in words])
num_words = len(words_list)
print("num_words = %d" % num_words)
def find_ngrams(text, n):
a = zip(*[text[i:] for i in range(n)])
wi = []
for i in a:
w = ''.join(i)
try:
idx = words_list.index(w)
except:
idx = 0
wi.append(idx)
return wi
# build X from index of n-gram sequence
X = np.array(sdf.name_last_name_first.apply(lambda c: find_ngrams(c, NGRAMS)))
# check max/avg feature
X_len = []
for x in X:
X_len.append(len(x))
max_feature_len = max(X_len)
avg_feature_len = int(np.mean(X_len))
print("Max feature len = %d, Avg. feature len = %d" % (max_feature_len, avg_feature_len))
y = np.array(sdf.race.astype('category').cat.codes)
# Split train and test dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=21, stratify=y)
```
## Train a LSTM model
ref: http://machinelearningmastery.com/sequence-classification-lstm-recurrent-neural-networks-python-keras/
```
'''The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF + LogReg.
Notes:
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.
- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
'''
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, Dropout, Activation
from keras.layers import LSTM
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.models import load_model
max_features = num_words # 20000
feature_len = 25 # avg_feature_len # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=feature_len)
X_test = sequence.pad_sequences(X_test, maxlen=feature_len)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
num_classes = np.max(y_train) + 1
print(num_classes, 'classes')
print('Convert class vector to binary class matrix '
'(for use with categorical_crossentropy)')
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(num_words, 32, input_length=feature_len))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(num_classes, activation='softmax'))
# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, epochs=EPOCHS,
validation_split=0.1, verbose=1)
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Test score:', score)
print('Test accuracy:', acc)
```
## Confusion Matrix
```
p = model.predict(X_test, verbose=2) # to predict probability
y_pred = np.argmax(p, axis=-1)
target_names = list(sdf.race.astype('category').cat.categories)
print(classification_report(np.argmax(y_test, axis=1), y_pred, target_names=target_names))
print(confusion_matrix(np.argmax(y_test, axis=1), y_pred))
```
## Save model
```
model.save('./fl_voter_reg/lstm/fl_all_name_lstm.h5')
words_df = pd.DataFrame(words_list, columns=['vocab'])
words_df.to_csv('./fl_voter_reg/lstm/fl_all_name_vocab.csv', index=False, encoding='utf-8')
```
|
github_jupyter
|
# Deploy and perform inference on Model Package from AWS Marketplace
This notebook provides you instructions on how to deploy and perform inference on model packages from AWS Marketplace object detection model.
This notebook is compatible only with those object detection model packages which this notebook is linked to.
#### Pre-requisites:
1. **Note**: This notebook contains elements which render correctly in Jupyter interface. Open this notebook from an Amazon SageMaker Notebook Instance or Amazon SageMaker Studio.
1. Ensure that IAM role used has **AmazonSageMakerFullAccess**
1. To deploy this ML model successfully, ensure that:
1. Either your IAM role has these three permissions and you have authority to make AWS Marketplace subscriptions in the AWS account used:
1. **aws-marketplace:ViewSubscriptions**
1. **aws-marketplace:Unsubscribe**
1. **aws-marketplace:Subscribe**
2. or your AWS account has a subscription to this object detection model. If so, skip step: [Subscribe to the model package](#1.-Subscribe-to-the-model-package)
#### Contents:
1. [Subscribe to the model package](#1.-Subscribe-to-the-model-package)
2. [Create an endpoint and perform real-time inference](#2.-Create-an-endpoint-and-perform-real-time-inference)
1. [Create an endpoint](#A.-Create-an-endpoint)
2. [Create input payload](#B.-Create-input-payload)
3. [Perform real-time inference](#C.-Perform-real-time-inference)
4. [Visualize output](#D.-Visualize-output)
5. [Delete the endpoint](#E.-Delete-the-endpoint)
3. [Perform batch inference](#3.-Perform-batch-inference)
4. [Clean-up](#4.-Clean-up)
1. [Delete the model](#A.-Delete-the-model)
2. [Unsubscribe to the listing (optional)](#B.-Unsubscribe-to-the-listing-(optional))
#### Usage instructions
You can run this notebook one cell at a time (By using Shift+Enter for running a cell).
**Note** - This notebook requires you to follow instructions and specify values for parameters, as instructed.
### 1. Subscribe to the model package
To subscribe to the model package:
1. Open the model package listing page you opened this notebook for.
1. On the AWS Marketplace listing, click on the **Continue to subscribe** button.
1. On the **Subscribe to this software** page, review and click on **"Accept Offer"** if you and your organization agrees with EULA, pricing, and support terms.
1. Once you click on **Continue to configuration button** and then choose a **region**, you will see a **Product Arn** displayed. This is the model package ARN that you need to specify while creating a deployable model using Boto3. Copy the ARN corresponding to your region and specify the same in the following cell.
```
model_package_arn='<Customer to specify Model package ARN corresponding to their AWS region>'
import json
from sagemaker import ModelPackage
import sagemaker as sage
from sagemaker import get_execution_role
import matplotlib.patches as patches
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from PIL import ImageColor
role = get_execution_role()
sagemaker_session = sage.Session()
boto3 = sagemaker_session.boto_session
bucket = sagemaker_session.default_bucket()
region = sagemaker_session.boto_region_name
s3 = boto3.client("s3")
runtime= boto3.client('runtime.sagemaker')
```
In next step, you would be deploying the model for real-time inference. For information on how real-time inference with Amazon SageMaker works, see [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-hosting.html).
### 2. Create an endpoint and perform real-time inference
```
model_name='object-detection-model'
#The object detection model packages this notebook notebook is compatible with, support application/x-image as the
#content-type.
content_type='application/x-image'
```
Review and update the compatible instance type for the model package in the following cell.
```
real_time_inference_instance_type='ml.g4dn.xlarge'
batch_transform_inference_instance_type='ml.p2.xlarge'
```
#### A. Create an endpoint
```
#create a deployable model from the model package.
model = ModelPackage(role=role,
model_package_arn=model_package_arn,
sagemaker_session=sagemaker_session)
#Deploy the model
predictor = model.deploy(1, real_time_inference_instance_type, endpoint_name=model_name)
```
Once endpoint has been created, you would be able to perform real-time inference.
#### B. Prepare input file for performing real-time inference
In this step, we will download class_id_to_label_mapping from S3 bucket. The mapping files has been downloaded from [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt). [Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0).
```
s3_bucket = f"jumpstart-cache-prod-{region}"
key_prefix = "inference-notebook-assets"
def download_from_s3(key_filenames):
for key_filename in key_filenames:
s3.download_file(s3_bucket, f"{key_prefix}/{key_filename}", key_filename)
img_jpg = "Naxos_Taverna.jpg"
#Download image
download_from_s3(key_filenames=[img_jpg])
#Mapping from model predictions to class labels
class_id_to_label = {"1": "person", "2": "bicycle", "3": "car", "4": "motorcycle", "5": "airplane", "6": "bus", "7": "train", "8": "truck", "9": "boat", "10": "traffic light", "11": "fire hydrant", "13": "stop sign", "14": "parking meter", "15": "bench", "16": "bird", "17": "cat", "18": "dog", "19": "horse", "20": "sheep", "21": "cow", "22": "elephant", "23": "bear", "24": "zebra", "25": "giraffe", "27": "backpack", "28": "umbrella", "31": "handbag", "32": "tie", "33": "suitcase", "34": "frisbee", "35": "skis", "36": "snowboard", "37": "sports ball", "38": "kite", "39": "baseball bat", "40": "baseball glove", "41": "skateboard", "42": "surfboard", "43": "tennis racket", "44": "bottle", "46": "wine glass", "47": "cup", "48": "fork", "49": "knife", "50": "spoon", "51": "bowl", "52": "banana", "53": "apple", "54": "sandwich", "55": "orange", "56": "broccoli", "57": "carrot", "58": "hot dog", "59": "pizza", "60": "donut", "61": "cake", "62": "chair", "63": "couch", "64": "potted plant", "65": "bed", "67": "dining table", "70": "toilet", "72": "tv", "73": "laptop", "74": "mouse", "75": "remote", "76": "keyboard", "77": "cell phone", "78": "microwave", "79": "oven", "80": "toaster", "81": "sink", "82": "refrigerator", "84": "book", "85": "clock", "86": "vase", "87": "scissors", "88": "teddy bear", "89": "hair drier", "90": "toothbrush"}
```
#### C. Query endpoint that you have created with the opened images
```
#perform_inference method performs inference on the endpoint and prints predictions.
def perform_inference(filename):
response = runtime.invoke_endpoint(EndpointName='test-tensorflow-test', ContentType=content_type, Body=input_img)
model_predictions = json.loads(response['Body'].read())
return model_predictions
with open(img_jpg, 'rb') as file: input_img = file.read()
model_predictions = perform_inference(input_img)
result = {key: np.array(value)[np.newaxis, ...] if isinstance(value, list) else np.array([value]) for key, value in model_predictions['predictions'][0].items()}
```
#### D. Display model predictions as bounding boxes on the input image
```
colors = list(ImageColor.colormap.values())
image_pil = Image.open(img_jpg)
image_np = np.array(image_pil)
plt.figure(figsize=(20,20))
ax = plt.axes()
ax.imshow(image_np)
classes = [class_id_to_label[str(int(index))] for index in result["detection_classes"][0]]
bboxes, confidences = result["detection_boxes"][0], result["detection_scores"][0]
for idx in range(20):
if confidences[idx] < 0.3:
break
ymin, xmin, ymax, xmax = bboxes[idx]
im_width, im_height = image_pil.size
left, right, top, bottom = xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height
x, y = left, bottom
color = colors[hash(classes[idx]) % len(colors)]
rect = patches.Rectangle((left, bottom), right-left, top-bottom, linewidth=3, edgecolor=color, facecolor='none')
ax.add_patch(rect)
ax.text(left, top, "{} {:.0f}%".format(classes[idx], confidences[idx]*100), bbox=dict(facecolor='white', alpha=0.5))
```
#### D. Delete the endpoint
Now that you have successfully performed a real-time inference, you do not need the endpoint any more. You can terminate the endpoint to avoid being charged.
```
model.sagemaker_session.delete_endpoint(model_name)
model.sagemaker_session.delete_endpoint_config(model_name)
```
### 3. Perform batch inference
In this section, you will perform batch inference using multiple input payloads together. If you are not familiar with batch transform, and want to learn more, see [How to run a batch transform job](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html)
```
#upload the batch-transform job input files to S3
transform_input_key_prefix = 'object-detection-model-transform-input'
transform_input = sagemaker_session.upload_data(img_jpg, key_prefix=transform_input_key_prefix)
print("Transform input uploaded to " + transform_input)
#Run the batch-transform job
transformer = model.transformer(1, batch_transform_inference_instance_type)
transformer.transform(transform_input, content_type=content_type)
transformer.wait()
# output is available on following path
transformer.output_path
```
### 4. Clean-up
#### A. Delete the model
```
model.delete_model()
```
#### B. Unsubscribe to the listing (optional)
If you would like to unsubscribe to the model package, follow these steps. Before you cancel the subscription, ensure that you do not have any [deployable model](https://console.aws.amazon.com/sagemaker/home#/models) created from the model package or using the algorithm. Note - You can find this information by looking at the container name associated with the model.
**Steps to unsubscribe to product from AWS Marketplace**:
1. Navigate to __Machine Learning__ tab on [__Your Software subscriptions page__](https://aws.amazon.com/marketplace/ai/library?productType=ml&ref_=mlmp_gitdemo_indust)
2. Locate the listing that you want to cancel the subscription for, and then choose __Cancel Subscription__ to cancel the subscription.
|
github_jupyter
|
```
import numpy as np
import matplotlib.pyplot as plt
```
# Parte 1: Iteração de Rayleigh
Vimos que podemos iterar um vetor $v$ pela matriz $A$, obtendo a sequência de vetores $A^nv$, por multiplicações sucessivas, e que isso permite encontrar um autovetor.
## Questão 1
Implemente uma função `itera(A,v,tol,debug)` que itera o vetor $v$, normalizando a cada iteração, e que retorna $(v_\lambda, \lambda, n)$, respectivamente:
- uma estimativa do autovetor
- uma estimativa do autovalor correspondente
- o número de iterações realizadas até atingir a precisão `tol`.
Se `debug == True`, retorne também a lista dos vetores (unitários) produzidos ao longo do processo.
```
def itera(A,v, tol=1e-12, maxiter=1000, debug=False):
v = np.array(v)
n,m = np.shape(A)
assert n==m, 'A must be square'
def eigenvector_normalizer(A,v,n=29):
answ = []
answ.append(v)
for i in range(0,n):
v_next = A @ answ[-1]
answ.append(v_next/np.linalg.norm(v_next))
return answ,n
def eigenvalue_picker(v,u):
v_max = vs[-1]
print(v_max)
return v_max/u
vs,it = eigenvector_normalizer(A,v)
l = np.linalg.norm(A@vs[-1])
if debug == True:
return vs[-1],l,it,vs
else:
return vs[-1],l,it
# Autovetores conhecidos
A = [[1,2],[2,1]]
alvo = np.array([1,1])/np.sqrt(2)
v, l, n = itera(A,[1,2])
assert(abs(l-3) < 1e-15)
assert(all(abs(v-alvo) < 1e-12))
assert(n < 30)
# Autovetores aleatórios: verificando que satisfaz (aproximadamente) a definição
np.random.seed(4444)
A = np.random.rand(4,4)
v, l, n = itera(A, np.random.rand(4))
err = np.dot(A,v) - l*v
assert(np.linalg.norm(err) < 1e-12)
assert(n < 30)
```
## Questão 2: Convergência
Temos o número de iterações, mas não vimos como o algoritmo "converge" para o autovetor.
Assim, use os vetores intermediários e faça um gráfico da evolução do erro entre os $v$'s produzidos e o autovetor $v_\lambda$.
```
ax = None
v,l,n, vs_intermediarios = itera(A, np.random.rand(4), debug=True)
tam = len(vs_intermediarios)
rng = (0,n)
ks = [([v]*tam)/i for i in vs_intermediarios]
#plt.plot(ks,rng)
ax = plt.gca()
plt.show()
assert ax.title.get_text() != ""
assert len(ax.lines) == 1
ys = ax.lines[0].get_ydata()
assert min(ys) < 1e-12
assert np.all(ys[:-1] > ys[1:])
```
O que o último assert quer dizer?
Compara se o $ys[i]$ é menor que $ys[i-1]$, ou seja, se a lista está convergindo
## Questão 3: Convergência comparada
Para cada um dos vetores `d1` e `d2` abaixo, considere a matriz $A = \operatorname{diag}(d_i)$ correspondente.
```
d1 = [1,10,20,30,31,32]
d2 = [1,10,20,29,30,32]
```
Qual é o autovetor com o maior autovalor para $A_1$ e $A_2$?
YOUR ANSWER HERE
Agora, compare a velocidade de convergência do autovetor usando `itera` para cada uma destas matrizes,
fazendo o gráfico do erro entre os vetores gerados para $A_1$ e $A_2$ no mesmo eixo.
```
ax = []
_,_,_,l_1 = itera(np.diag(d1), np.ones_like(d1), debug=True)
_,_,_,l_2 = itera(np.diag(d2), np.ones_like(d2), debug=True)
# YOUR CODE HERE
raise NotImplementedError()
ax = plt.gca()
plt.show()
assert ax.title.get_text() != ""
assert len(ax.lines) == 2
assert len(ax.legend().texts) == 2
```
Para qual matriz há convergência mais rápida? Como você explicaria isso?
YOUR ANSWER HERE
## Questão 4: Convergência?
Sejam $\theta \in [0,2\pi]$ e $\alpha \in \mathbb{R}$,
e considere a matriz
$$A(\theta, \alpha) = \begin{bmatrix}
\cos(\theta) & \sin(\theta) & 0\\
-\sin(\theta) &\cos(\theta) & 0\\
0 & 0 & \alpha\\
\end{bmatrix}.$$
Qual a interpretação geométrica dessa matriz?
YOUR ANSWER HERE
Quais são os autovetores de $A$ (em função de $\theta$ e $\alpha$)?
YOUR ANSWER HERE
Implemente a função abaixo que gera a matriz $A$:
```
def make_matrix(theta,alpha):
# YOUR CODE HERE
raise NotImplementedError()
assert np.allclose(make_matrix(0,1),np.eye(3))
assert np.allclose(make_matrix(np.pi,0.5),[[-1,0,0],[0,-1,0],[0,0,0.5]])
```
Fixando $\theta = \dfrac{\pi}{4}$,
faça um gráfico do número de iterações necessários para calcular o maior autovetor,
em função de $\alpha \in [0.5,1.5]$.
```
alphas = np.linspace(0.5,1.5,100)
ax = []
# YOUR CODE HERE
raise NotImplementedError()
ax = plt.gca()
plt.show()
assert ax.title.get_text() != ""
assert len(ax.lines) == 1
assert ax.get_xlabel() != ""
ys = ax.lines[0].get_ydata()
assert 100 > ys.min() > 60
assert ys[55] < 600
assert ys[50] > 900
```
Agora, faça o gráfico com a estimativa do autovalor, novamente em função de $\alpha$.
```
ax = []
# YOUR CODE HERE
raise NotImplementedError()
ax = plt.gca()
plt.show()
assert ax.title.get_text() != ""
assert len(ax.lines) == 1
assert ax.get_xlabel() != ""
ys = ax.lines[0].get_ydata()
assert np.all(0.7 <= ys) and np.all(ys <= 1.5)
```
Como explicar a variação no número de iterações? O que isso tem a ver com o autovalor retornado?
YOUR ANSWER HERE
# Parte 2: Generalizando
## Questão 5: Outra iteração, novos limites
Em vez de iterar $A^n v$, é possível iterar $A^{-n} v$.
Assim, em vez de "aumentar" os vetores correspondentes aos autovalores de módulo grande,
estes serão "diminuídos", e sobra o vetor do "menor" (de novo, em módulo) autovalor.
Mostre que $\dfrac{A^{-n}v_0}{\lVert A^{-n}v_0 \rVert} \rightarrow v_{min}$, onde $v_{min}$ é o "menor" autovalor de $A$.
YOUR ANSWER HERE
Agora, generalize um pouco mais:
Seja $\alpha \in C$ um número complexo qualquer.
Mostre que
$$\frac{(A - \alpha I)^{-n}v_0}{\lVert (A - \alpha I)^{-n}v_0 \rVert} \rightarrow v_{\alpha},$$
onde $v_{\alpha}$ é o autovetor de $A$ com autovalor mais próximo de $\alpha$.
Este método é conhecido como "Iteração inversa deslocada".
YOUR ANSWER HERE
## Questão 6: Iteração inversa com deslocamento
Implemente a iteração inversa com deslocamento, com argumentos semelhantes a função `itera`.
```
def inverse_iteration(A, v, alpha=0, tol=1e-12, maxiter=1000, debug=False):
v = np.array(v)
n,m = np.shape(A)
assert n==m, 'A must be square'
# YOUR CODE HERE
raise NotImplementedError()
A = [[1,2],[2,1]]
ans = np.array([-1,1])/np.sqrt(2)
v, l, n = inverse_iteration(A,[1,2])
assert np.allclose(np.linalg.norm(v),1)
assert np.allclose(v,ans) or np.allclose(v, -ans)
assert 20 < n < 40
A = [[1,2],[2,1]]
ans = np.array([1,1])/np.sqrt(2)
v, l, n = inverse_iteration(A,[1,2], alpha=2, maxiter=50)
assert np.allclose(np.linalg.norm(v),1)
assert np.allclose(v,ans) or np.allclose(v, -ans)
assert 20 < n < 40
A = [[1,2],[2,1]]
ans = np.array([1,1])/np.sqrt(2)
v, l, n = inverse_iteration(A,[1,2], alpha=2.5, maxiter=50)
assert np.allclose(np.linalg.norm(v),1)
assert np.allclose(v,ans) or np.allclose(v, -ans)
assert 10 < n < 20
```
## Questão 7: Convergência comparada
Faça o gráfico da velocidade de convergência dos autovetores da iteração inversa aplicada à matriz $A$ acima,
para $\alpha \in \{-2,0,2\}$.
```
np.random.seed(1234)
ax = []
v0 = np.random.rand(2)
# YOUR CODE HERE
raise NotImplementedError()
plt.ylabel('Distance to eigenvector')
ax = plt.gca()
plt.show()
assert ax.title.get_text() != ""
assert len(ax.lines) == 3
assert len(ax.legend().texts) == 3
assert ax.get_xlabel() != ""
ys = [l.get_ydata() for l in ax.lines]
assert np.isclose(max(max(y) for y in ys),2)
assert min(min(y) for y in ys) <= 1e-16
```
Qual valor de $\alpha$ levou à convergência mais rápida?
Como você explicaria isso?
YOUR ANSWER HERE
O que mais você observa neste gráfico?
YOUR ANSWER HERE
## Questão 8: Zoom da convergência
Agora, repita o mesmo gráfico para $\alpha \in \{2, 2.5, 2.9, 2.99 \}$.
```
np.random.seed(1234)
ax = []
v0 = np.random.rand(2)
# YOUR CODE HERE
raise NotImplementedError()
plt.ylabel('Distance to eigenvector')
ax = plt.gca()
plt.show()
assert ax.title.get_text() != ""
assert len(ax.lines) == 4
assert len(ax.legend().texts) == 4
assert ax.get_xlabel() != ""
ys = [l.get_ydata() for l in ax.lines]
assert min(min(y) for y in ys) <= 1e-16
```
O que este gráfico sugere quanto à velocidade de convergência da iteração inversa?
Será que isso já era possível de "ver" no outro gráfico?
YOUR ANSWER HERE
|
github_jupyter
|
<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title"><b>The Knapsack Problem</b></span> by <a xmlns:cc="http://creativecommons.org/ns#" href="http://mate.unipv.it/gualandi" property="cc:attributionName" rel="cc:attributionURL">Stefano Gualandi</a> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>.<br />Based on a work at <a xmlns:dct="http://purl.org/dc/terms/" href="https://github.com/mathcoding/opt4ds" rel="dct:source">https://github.com/mathcoding/opt4ds</a>.
**NOTE:** Run the following script whenever running this script on a Google Colab.
```
import shutil
import sys
import os.path
if not shutil.which("pyomo"):
!pip install -q pyomo
assert(shutil.which("pyomo"))
if not (shutil.which("glpk") or os.path.isfile("glpk")):
if "google.colab" in sys.modules:
!apt-get install -y -qq glpk-utils
else:
try:
!conda install -c conda-forge glpk
except:
pass
```
# $n$-Queens Problem
The $n$-Queens puzzle is the problem of placing eight chess queens on an $n \times n$ chessboard so that no two queens threaten each other; thus, a solution requires that no two queens share the same row, column, or diagonal (source: [wikipedia](https://en.wikipedia.org/wiki/Eight_queens_puzzle)).
A solution exists for all natural numbers n with the exception of $n = 2$ and $n = 3$.
**Example:** For $n=8$, we have the following solution:
```
1 . . . . . Q . .
2 . . . Q . . . .
3 . . . . . . Q .
4 Q . . . . . . .
5 . . . . . . . Q
6 . Q . . . . . .
7 . . . . Q . . .
8 . . Q . . . . .
a b c d e f g h
```
## Integer Linear Programming Model
The $n$-Queens problem can be formalized with the following **ILP** model.
**Data:** Size of the board $n\times n$. Let $I=:\{1,\dots,n\}$ a set of indices.
**Decision Variables:** The variable $x_{ij} \in \{0,1\}$ is equal to 1 if we place a queen in position $(i,j)$ on the chessboard.
**Objective function:** Since the problem is a feasibility problem, we can set the objective function equal to any constant value.
**Constraints:** We need the following linear constraints, which encode the puzzle rules:
1. Each queen appears once per row:
$$
\sum_{j \in I} x_{ij} = 1, \forall i \in I
$$
2. Each queen appears once per column:
$$
\sum_{i \in I} x_{ij} = 1, \forall j \in I
$$
3. Each queen appears once per main diagonals:
$$
\sum_{(i,j) \in D_k} x_{ij} \leq 1, D_k \mbox{ main diagonals}
$$
4. Each queen appears once per off-diagonals:
$$
\sum_{(i,j) \in O_k} x_{ij} \leq 1, O_k \mbox{ off diagonals}
$$
### Main Diagonals $D_k$
Since we need to specify the pairs of indices that define as a function of $n$, we first defined the following nested loop:
```
n = 5
for j in range(-n+2,n-1):
for i in range(1, n+1):
if 0 < j+i <= n:
print(i, j+i, end='\t')
else:
print(' ', end='\t')
print()
```
### Off Diagonals $_k$
Similarly, we can define the off diagonals as follows:
```
for i in reversed(range(-n+3, n)):
for j in range(1, n):
if 0 < n - j+i <= n:
print(j, n-j+i, end='\t')
else:
print(' ', end='\t')
print()
```
### Full Model defined in Pyomo
If we put all the definitions together, we can solve the $n$-Queens problem with the script below.
Please, note the following Pyomo syntax used to define variable $x_{ij}$ over the [RangeSet](https://pyomo.readthedocs.io/en/stable/library_reference/aml/index.html#pyomo.environ.RangeSet) $I$ and $J$:
```
model.I = RangeSet(1, n)
model.J = RangeSet(1, n)
model.x = Var(model.I, model.J, within=Binary)
```
Notice also the syntax used to define the row and column constraints, which uses `lambda` function to define constraint rules:
```
model.row = Constraint(model.I,
rule = lambda mod, i: sum(mod.x[i,j] for j in mod.J) == 1)
```
Finally, to define the main and of diagonals, we use the [ConstraintList](https://pyomo.readthedocs.io/en/stable/working_models.html) class:
```
model.mainD = ConstraintList()
#...
model.mainD.add( expr <= 1 )
```
The complete Pyomo script is as follows.
```
# Import the libraries
from pyomo.environ import ConcreteModel, Var, Objective, Constraint, SolverFactory
from pyomo.environ import maximize, Binary, RangeSet, ConstraintList
n = 8
# Create concrete model
model = ConcreteModel()
model.I = RangeSet(1, n)
model.J = RangeSet(1, n)
# Variables
model.x = Var(model.I, model.J, within=Binary)
# Objective Function: Maximize Profit
model.obj = Objective(expr = n, sense = maximize)
# 1. Row constraints
def VincoloRighe(mod, i):
return sum(mod.x[i,j] for j in mod.J) == 1
model.row = Constraint(model.I,
rule = VincoloRighe)
# 2. Column constraints
model.column = Constraint(model.J,
rule = lambda mod, j: sum(mod.x[i,j] for i in mod.I) == 1)
# 3. Main Diagonal constraints
model.mainD = ConstraintList()
# Build the list of possible pairs
for j in range(-n+2,n-1):
expr = 0
for i in model.I:
if 0 < j+i <= n:
expr += model.x[i, j+i]
model.mainD.add( expr <= 1 )
# 4. Off Diagonal constraints
model.offD = ConstraintList()
# Build the list of possible pairs
for i in range(-n+3,n+1):
expr = 0
for j in model.J:
if 0 < n-j+i <= n:
expr += model.x[j, n-j+i]
model.offD.add( expr <= 1 )
```
To solve the script, we use a solver factory, specifying the GLPK solver, and we inspect the Solver **status** (infeasible, unbounded, or optimal).
```
# Solve the model
sol = SolverFactory('glpk').solve(model)
# Basic info about the solution process
for info in sol['Solver']:
print(info)
```
We aspect the optimal decision variables (only the positive variables).
```
# Report solution value
print("Optimal solution value: z =", model.obj())
print("Decision variables:")
for i in model.I:
for j in model.J:
if model.x[i,j]() > 0:
print("x({},{}) = {}".format(i, j, model.x[i,j]()))
```
And finally, we print a solution on a simplified chessboard $n\times n$.
```
print('\nChessboard Solution:')
for i in model.I:
for j in model.J:
if model.x[i,j]() > 0:
print('Q', end=' ')
else:
print('.', end=' ')
print()
```
## Plotting a solution with a Chessboard
```
# CREDIT: Solution original appeared on Stackoverflow at:
# https://stackoverflow.com/questions/60608055/insert-queen-on-a-chessboard-with-pyplot
def PlotSolution(n, x, size=6):
import matplotlib.pyplot as plt
import numpy as np
chessboard = np.zeros((n, n))
chessboard[1::2,0::2] = 1
chessboard[0::2,1::2] = 1
plt.figure(figsize=(size, size))
plt.imshow(chessboard, cmap='binary')
for i, j in x:
if x[i,j]() > 0:
plt.text(i-1, j-1, '♕', color='darkorange',
fontsize=56*size/n, fontweight='bold', ha='center', va='center')
plt.xticks([])
plt.yticks([])
plt.show()
PlotSolution(n, model.x)
```
|
github_jupyter
|
# Content:
1. [Definitions](#1.-Definitions)
2. [The root finding problem](#2.-The-root-finding-problem)
3. [Fixed point iteration](#3.-Fixed-point-iteration)
>3.1 [The cobweb diagram](#3.1-The-cobweb-diagram)
>3.2 [Fixed point iteration theorem](#3.2-Fixed-point-iteration-theorem)
>3.3 [The code](#3.3-The-code)
4. [Bisection method](#4.-Bisection-method)
# 1. Definitions

[Weierstrass function](https://en.wikipedia.org/wiki/Weierstrass_function) is a peculiar function. It is continuous on the real number line but not differentiable anywhere.
```
import numpy as np
import matplotlib.pyplot as plt
def weierstrass(a,b,M,x):
val = 0.0
for n in range(0,M):
val = val + a**n * np.cos(b**n*np.pi*x)
return val
x = np.linspace(-2,2,1000) # 1000 points between -2 and +2
a=0.5
b=3.0
N=x.size
y=np.zeros(N)
M=1
for i in range(N):
y[i]=weierstrass(a,b,M,x[i])
plt.plot(x, y, 'b-', label='M=1')
plt.title('Weierstrass function, M=1')
plt.legend()
plt.show()
M=3
for i in range(N):
y[i]=weierstrass(a,b,M,x[i])
plt.plot(x, y, 'b-', label='M=3')
plt.title('Weierstrass function, M=3')
plt.legend()
plt.show()
M=10
for i in range(N):
y[i]=weierstrass(a,b,M,x[i])
plt.plot(x, y, 'b-', label='M=10')
plt.title('Weierstrass function, M=10')
plt.legend()
plt.show()
```
---
Homework-16: Find examples for polynomial, rational, trigonometric, exponential and logarithmic functions that are in $C^\infty[{\bf R}],~{\rm where}~{\bf R}$ is the real number line.
---
## 2. The root-finding problem

## 3. Fixed point iteration

```
import numpy as np
def f(x):
val=x-np.sqrt(10.0/x)
return val
def g(x):
val=np.sqrt(10.0/x)
return val
x=1 # initial guess, x0
dx=x
i=0
while dx > 1e-3:
dx=np.abs(x-g(x))
print('Iteration: ',i,' x:',x,' g(x):',g(x),' f(x): ', f(x))
x=g(x)
i=i+1
print('Exact root is x:',np.power(10.0,1.0/3.0))
```
Here is another elegant way to print the output
```
x=1.0
for i in range(0,15):
gx=g(x)
fx=f(x)
fstring=(f'''Iteration={i:5d} x={x:10.4f} g(x)={gx:10.4f} f(x)={np.abs(fx):10.4f}''') # using f-string
print(fstring)
x=g(x)
out=(f'''Exact root is x={np.power(10.0,1.0/3.0):10.4f}''')
print(out)
#other way for formatted print
#mynumber=3.14
#print('{:10.8f}'.format(mynumber))
```
### 3.1 The cobweb diagram



```
def g_fn(x):
val=np.sqrt(10.0/x)
return val
N=15
x=np.zeros(N,float)
g=np.zeros(N,float)
x0=1.0 # initial guess
Ni=10
for i in range(0,Ni):
x[i]=x0
g[i]=g_fn(x0)
x0=g[i]
#print(x,g)
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure() # comment if square plot is not needed
ax = fig.add_subplot(111) # comment if square plot is not needed
plt.xlim(0, 4)
plt.ylim(0, 4)
x_grids = np.linspace(0,4,100)
N=x_grids.size
g_grids=np.zeros(N)
for i in range(N):
g_grids[i]=g_fn(x_grids[i])
plt.plot(x_grids,x_grids,'k-',label='x')
plt.plot(x_grids,g_grids,'b-',label='g(x)')
xval=[x[0],x[0]]
gval=[x[0],g[0]]
plt.plot(xval,gval)
plt.grid()
for i in range(0,6):
# horizontal line, same y-value
xval=[x[i],g[i]]
gval=[g[i],g[i]]
plt.plot(xval,gval)
# vertical line, same x-value
xval=[g[i],x[i+1]]
gval=[g[i],g[i+1]]
plt.plot(xval,gval)
ax.set_aspect('equal', adjustable='box') # comment if square plot is not needed
plt.title('Cobweb diagram for $x=\sqrt{10/x}$')
plt.legend()
plt.show()
```
### Let's try another problem: $x - 1/x^2 = 0;~g(x)=1/x^2;~x_0 = 0.1$
```
import numpy as np
def g_fn(x):
val=1.0/x**2
return val
def f_fn(x):
val=x-1.0/x**2
return val
x=0.1
for i in range(0,4):
gx=g_fn(x)
fx=f_fn(x)
fstring=(f'''Iteration={i:5d} x={x:10.4f} g(x)={gx:10.4f} f(x)={np.abs(fx):10.4f}''') # using f-string
print(fstring)
x=g_fn(x)
```
Diverges!
### 3.2 Fixed point iteration theorem






### 3.3 The code
```
import numpy as np
# fn is the g(x) in x = g(x) that we want to solve
# x is the initial guess, x0
# xthresh is convergence thershold
# maxeval - maximum number of evaluation of fn
# iprint control printing, iprint = 1 for extra output
def fixedpoint(fn, x, xthresh, maxeval, iprint):
if iprint == 1:
print('#iter x g(x) dx')
ieval=0
g=fn(x)
ieval=ieval+1
dx=np.abs(x-g)
iiter=0
while dx > xthresh:
g=fn(x)
ieval=ieval+1
dx=np.abs(x-g)
if iprint == 1:
print('{:5d}{:15.6e}{:15.6e}{:15.6e}'.format(iiter,x, g, dx))
if ieval >= maxeval:
print('Exiting fixed-point iteration, maximum function evaluations reached')
break
x=g
iiter=iiter+1
return x
print('Exiting fixed-point iteration, convergence reached')
def fn_g(x):
val=np.sqrt(10.0/x)
return val
x0 = 1.0
xthresh = 1E-5
maxeval = 100
iprint=1
x = fixedpoint(fn_g, x0, xthresh, maxeval,iprint)
print('The solution is: ',x)
```
### Let's try another problem: $\exp(-x) + x/5 - 1 = 0$
Let's look at the graphical solution by plotting the function $f(x)$ and see where it takes the value zero.
```
import numpy as np
import matplotlib.pyplot as plt
def f(x):
val=np.exp(-x)+x/5.0-1
return val
xmin=-5.0
xmax=10.0
plt.xlim(xmin, xmax)
plt.ylim(-3, 10)
x = np.linspace(xmin,xmax,100)
N=x_grids.size
y=np.zeros(N)
for i in range(N):
y[i]=f(x[i])
plt.plot(x,x*0,'k-')
plt.plot(x,y,'b-')
plt.grid()
plt.show()
```
There are two roots for this equation. One at 0.0 and another near 5.0.
There are two ways of rearranging the equation to apply the fixed-point iteration $x_{n+1}=g(x_n)$.
* Option-1: $g_1(x)=5\left[ 1- \exp(-x) \right]$
* Option-2: $g_2(x)=-\log\left[ 1 - x/5 \right]$
```
def g1(x):
val=5 * ( 1 - np.exp(-x) )
return val
x0 = 2 # somewhere in between both the solutions
maxeval = 20
xthresh = 0.0001
iprint=1
x = fixedpoint(g1, x0, xthresh, maxeval,iprint)
print('The solution is: ',x)
def g2(x):
val=-np.log(1-x/5.0)
return val
x0 = 2.0 # somewhere in between both the solutions
maxeval = 10
xthresh = 0.0001
iprint=1
x = fixedpoint(g2, x0, xthresh, maxeval,iprint)
print('The solution is: ',x)
```
---
Homework-17: $For~the~above~example,~using~the~fixed~point~convergence~relation~explain~why~using~g_1(x)~results~in~the~solution~x^*=4.965~while~g_2(x)~results~in~x^*=0.0.~In~both~cases,~use~x_0=2.0~as~the~initial~guess.$
---
## 4. Bisection method



```
import numpy as np
def bisection(fn, a0, b0, xthresh, maxeval, iprint):
if iprint == 1:
print('#iter a b x dx')
ieval=0
iiter=1
a=a0
b=b0
dx = abs(a-b)
while dx > xthresh or iiter < 10:
x = (a+b)/2.0
dx = abs(a-b)
fx = fn(x)
fb = fn(b)
if (fb < xthresh): # handle an exception
print('The upper limit seems to be a root. Stopping program.')
x=b
break
if iprint == 1:
print('{:5d}{:15.6e}{:15.6e}{:15.6e}{:15.6e}{:15.6e}{:15.6e}'.format(iiter, a, b, x, dx,fx,fb))
if fx*fb > 0:
b = x
else:
a = x
ieval=ieval+2
if ieval >= maxeval:
print('Exiting fixed-point iteration, maximum function evaluations reached')
break
iiter=iiter+1
print('Exiting fixed-point iteration, convergence reached')
return x
def fn_f(x):
val=np.exp(-x)+x/5.0-1
return val
a = -25.0
b = 30.0
maxeval = 100
xthresh = 0.0001
iprint=1
x = bisection(fn_f, a, b, xthresh, maxeval,iprint)
print('The solution is: ',x)
def fn_f(x):
val=(x-1)**2
return val
a = -10
b = 1
maxeval = 100
xthresh = 0.0001
iprint=1
x = bisection(fn_f, a, b, xthresh, maxeval,iprint)
print('The solution is: ',x)
```
|
github_jupyter
|
# 第二十三讲 微分方程和$e^{At}$
## 微分方程$\frac{du}{dt} = Au$
现有一阶(First-order)微分方程组:$\left\{\begin{matrix} \frac{du_1}{dt} & = & -u_1 & + 2u_2\\ \frac{du_2}{dt} & = & u_1 & -2u_2 \end{matrix}\right.$,其中初始状态 $u(0) = \begin{bmatrix}u_1 \\ u_2 \end{bmatrix} = \begin{bmatrix} 1 \\ 0 \end{bmatrix}$,现在我们需要求解方程的一般形式 $u(t)$。
首先,通过微分方程组可以得到系数矩阵 $A = \begin{bmatrix} -1 & 2 \\ 1 & -2 \end{bmatrix}$,并且该矩阵的特征值为 $\left\{\begin{matrix} \lambda_1 & = & 0\\ \lambda_2 & = & -3 \end{matrix}\right.$,特征向量为 $\left\{\begin{matrix} x_1 & = & \begin{bmatrix} 2 \\ 1 \end{bmatrix} \\ x_2 & = & \begin{bmatrix} 1 \\ -1 \end{bmatrix} \end{matrix}\right.$。此时,解可以写成 $u(t) = c_1 e^{\lambda_1 t} x_1 + c_2 e^{\lambda_2 t}x_2$,这里可以进行检验,取 $u = \lambda_1 e^{\lambda_1 t}x_1$,有 $\frac{du}{dt} = \lambda_1 e^{\lambda_1 t}x_1 = A e^{\lambda_1 t}x_1 = Au$。
接下来,通过 $u(0)$ 解出 $c_1 = \frac{1}{3}, c_2=\frac{1}{3}$。于是最终的解形式为 $u(t) = \frac{1}{3}\begin{bmatrix}2 \\ 1\end{bmatrix} + \frac{1}{3}e^{-3t}\begin{bmatrix}1 \\ -1\end{bmatrix}$,并且方程有一个稳定的状态 $u(\infty) = \frac{1}{3}\begin{bmatrix}2 \\ 1\end{bmatrix}$,因为当 $t \rightarrow \infty$ 时,$e^{-3t} \rightarrow 0$。
这里引入方程趋势和特征向之间的关系:
* 稳定态(Stability):方程的值最终会趋向于 $0$,即 $u(t) \rightarrow 0$,这就要求 $e^{\lambda t} \rightarrow 0$,所以当所有的特征值的实数部分(虚数部分只是噪音)均小于 $0$ 时,方程具有稳定性。
* 收敛态(Steady State):方程最终收敛于某个值,此时需要有一个特征值为 $0$,而剩余的特征值全小于 $0$。
* 发散态(Blowup):如果存在一个特征值的实数部分大于零,那么函数不会收敛。
小技巧:直接判断任意二阶矩阵的特征值是否均小于零。对于二阶矩阵 $A = \begin{bmatrix} a & b \\ c & d \end{bmatrix}$,特征值具有性质即 $\lambda_1 + \lambda_2 = a + d, \lambda_1 * \lambda_2 = detA = a * d - b * c$。如果特征向量均小于零,那么 $a + d < 0$ 且 $detA > 0$。
总结:原方程组有两个相互耦合的未知函数,即 $u1,u2$ 相互耦合,而特征值和特征向量的作则就是解耦,也就是对角化。回到原方程 $\frac{du}{dt} = Au$,如果将 $u$ 表示为特征向量的线性组合 $u=Sv$,那么有 $\frac{du}{dt} = Au \Rightarrow S\frac{dv}{dt} = ASv \Rightarrow \frac{dv}{dt} = S^{-1}ASv = \Lambda v$。此时新方程组线性无关 $\left\{\begin{matrix} \frac{dv_1}{dt} & = & \lambda_1v1 \\ \frac{dv_2}{dt} & = & \lambda_2v_2 \\ & \vdots & \\ \frac{dv_n}{dt} & = & \lambda_nv_n \end{matrix}\right.$,分向量的解形式为 $v(t) = e^{\lambda t}v(0)$,而原方程的解形式为 $u(t)=Se^{\Lambda t}S^{-1}u(0)$。
## 指数矩阵$e^{At}$
指数矩阵(Exponential Matrix)为指数部分的矩阵,例如 $e^{At}$。上个部分中用到结论 $e^{At} = Se^{\Lambda t}S^{-1}$,这里给出证明。
根据泰勒级数 $e^x = \sum ^{\infty}_{0} \frac{x^n}{n!}$ 将 $e^{At}$ 展开,得 $$e^{At} = I + At + \frac{(At)^2}{2} + \dots + \frac{(At)^n}{n!}$$
$$e^{At} = I + At + \frac{A^2}{2}t^2 + \dots + \frac{A^n}{n!}t^n$$
$$e^{At} = SS^{-1} + S\Lambda S^{1}t + S\frac{\Lambda^2}{2}S^{-1}t^2 + \dots + S\frac{\Lambda^n}{n!}S^{-1}t^n$$
$$e^{At} = S(I + \Lambda t + \frac{\Lambda^2}{2}t^2 + \dots + \frac{\Lambda^n}{n!}t^n)S^{-1}$$
$$e^{At} = Se^{\Lambda t}S^{-1}$$
拓展:
* 几何级数:$\frac{1}{1-x} = \sum^{\infty}_{0} x^n$
* 第二个泰勒级数:$ (I - At)^{-1} = I + At + (At)^2 + \dots (At)^n)$
## 高阶微分方程
对于一个二阶(Second-order)微分方程 ${y}''+b{y}'+ky=0$,可以构造方程组 $\left\{\begin{matrix}{y}'' & = & -b{y}' & -ky\\ {y}' & = & {y}' & \end{matrix}\right.$,写成矩阵形式有 $\begin{bmatrix}{y}'' \\ {y}' \end{bmatrix} = \begin{bmatrix} -b & -k \\ 1 & 0 \end{bmatrix}\begin{bmatrix}{y}' \\ {y} \end{bmatrix}$。
拓展到五阶 ${y}'''''+b{y}''''+c{y}'''+d{y}''+ey′+fy=0$,矩阵形式有 $\begin{bmatrix} {y}''''' \\ {y}'''' \\ {y}''' \\ {y}'' \\ {y}' \end{bmatrix} = \begin{bmatrix} -b & -c & -d & -e & -f \\ 1 & 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0\end{bmatrix}\begin{bmatrix} {y}'''' \\ {y}''' \\ {y}'' \\ {y}' \\ y \end{bmatrix}$。
|
github_jupyter
|
```
!git clone https://github.com/huggingface/transformers.git
%cd transformers
!pwd
!git reset --hard 52f44dd
!cp ./examples/token-classification/run_ner.py ../
%cd ..
#!wget https://raw.githubusercontent.com/huggingface/transformers/master/examples/token-classification/run_ner.py
!wget https://raw.githubusercontent.com/huggingface/transformers/master/examples/token-classification/utils_ner.py
!wget https://raw.githubusercontent.com/huggingface/transformers/master/examples/token-classification/tasks.py
!git clone https://github.com/huggingface/transformers
%cd transformers
!pip install .
!pip install -r ./examples/requirements.txt
%cd ..
!pip install pyarrow --upgrade
import transformers
!mkdir data
# aynı dizinde data klasöründe train,test,dev tsv dosyaları yükleniyor.
# eğer text dosyaları varsa hazır, direk run_ner.py çalıştırılabilir.
blankLineIndicator = "BlankLineIndicator"
blank = ""
firstColumnIndex = 0
secondColumnIndex = 1
!cp -r ./NCBI-disease/ ./data/
!mv ./data/NCBI-disease/devel.tsv ./data/NCBI-disease/dev.tsv
!mv ./dev.tsv ./NCBI-disease/
!pwd
#!unzip NERdata.zip -d data
#!ls data/
train_dev_tsv = []
with open('./data/NCBI-disease/train_dev.tsv', 'r') as f:
train_dev_pd = f.readlines()
for row in train_dev_pd:
row = row.split('\n')[0].split('\t')
#Token Sütun İsmi
if row:
pass
if row[firstColumnIndex] == '':
train_dev_tsv.append(blank)
else: #Token Sütun İsmi
#print(row)
train_dev_tsv.append(row[firstColumnIndex] + " " + row[secondColumnIndex])
train_dev_tsv[0]
test_tsv = []
with open('./data/NCBI-disease/test.tsv', 'r') as f:
test_pd = f.readlines()
for row in test_pd:
row = row.split('\n')[0].split('\t')
#Token Sütun İsmi
if row:
pass
if row[firstColumnIndex] == '':
test_tsv.append(blank)
else: #Token Sütun İsmi
#print(row)
test_tsv.append(row[firstColumnIndex] + " " + row[secondColumnIndex])
dev_tsv = []
with open('./data/NCBI-disease/dev.tsv', 'r') as f:
test_pd = f.readlines()
for row in test_pd:
row = row.split('\n')[0].split('\t')
#Token Sütun İsmi
if row:
pass
if row[firstColumnIndex] == '':
dev_tsv.append(blank)
else: #Token Sütun İsmi
#print(row)
dev_tsv.append(row[firstColumnIndex] + " " + row[secondColumnIndex])
print(len(train_dev_pd))
print(len(train_dev_tsv))
train_dev_tsv[12288].split()[0]
train_dev_pd[0], train_dev_tsv[0]
l = []
for item in train_dev_tsv:
try:
item = item.split()[1]
if item != 'B' and item != 'I' and item != 'O':
print(item)
l.append(item)
except:
pass
l = set(l)
print(l)
with open('labels.txt', 'w') as f:
for item in list(l):
f.write(item + '\n')
#!cut -f2 BC2GM/train.tsv | sort | uniq
dev_tsv[0]
def create_txt(file_name, lines):
file = open(file_name, 'w')
for line in lines:
file.write(line + "\n")
file.close()
#create_txt("./data/train.txt",train_tsv)
create_txt("data/train.txt", train_dev_tsv)
create_txt("data/test.txt", test_tsv)
create_txt("data/dev.txt", dev_tsv)
# txt file'a çeviriyoruz
# !cat data/NCBI-disease/train.tsv | tr "\t" " " | head -10
#labels.txt -> unique varlık ismi sınıflarının olduğu text dosyası
OUTPUT_DIR = "electra-ner"
!cd data
!ls ./data
!python3 run_ner.py --data_dir ./data/ \
--labels ./labels.txt \
--model_name_or_path enelpi/med-electra-small-discriminator \
--output_dir $OUTPUT_DIR \
--max_seq_length 128 \
--num_train_epochs 3 \
--per_device_train_batch_size 16 \
--overwrite_output_dir \
--save_steps 10000 \
--seed 41 \
--do_train \
--do_eval \
--do_predict
import torch
print(torch.__version__)
print(torch.cuda.is_available())
!lshw -c video
!nvcc --version
!modinfo nvidia
# 11/06/2020 20:45:35 - INFO - __main__ - eval_accuracy_score = 0.9825284728742295
# 11/06/2020 20:45:35 - INFO - __main__ - eval_precision = 0.8032166508987701
# 11/06/2020 20:45:35 - INFO - __main__ - eval_recall = 0.884375
# 11/06/2020 20:45:35 - INFO - __main__ - eval_f1 = 0.8418443232523549
!nvidia-smi
Seq length 256 distilbert-base-uncased
torch.version.cuda
!export CUDA_VISIBLE_DEVICES=0
```
|
github_jupyter
|
# Introducing Scikit-Learn
There are several Python libraries which provide solid implementations of a range of machine learning algorithms.
One of the best known is [Scikit-Learn](http://scikit-learn.org), a package that provides efficient versions of a large number of common algorithms.
Scikit-Learn is characterized by a clean, uniform, and streamlined API, as well as by very useful and complete [online documentation](https://scikit-learn.org/stable/documentation.html).
A benefit of this uniformity is that once you understand the basic use and syntax of Scikit-Learn for one type of model, switching to a new model or algorithm is very straightforward.
This section provides an overview of the Scikit-Learn API.
We will start by covering *data representation* in Scikit-Learn, followed by covering the *Estimator* API, and finally go through a couple examples.
## Data Representation in Scikit-Learn
Machine learning is about creating models from data: for that reason, we'll start by discussing how data can be represented in order to be understood by the computer.
The best way to think about data within Scikit-Learn is in terms of tables of data.
### Data as table
A basic table is a two-dimensional grid of data, in which the rows represent individual elements of the dataset, and the columns represent quantities related to each of these elements.
For example, consider the [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set), famously analyzed by Ronald Fisher in 1936.
We can download this dataset in the form of a Pandas ``DataFrame`` using the [seaborn](http://seaborn.pydata.org/) library:
```
import pandas as pd
import numpy as np
from IPython.display import Pretty as disp
hint = 'https://raw.githubusercontent.com/soltaniehha/Business-Analytics/master/docs/hints/' # path to hints on GitHub
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set(rc={'figure.figsize':(10,8)}) # Figure size
iris = sns.load_dataset('iris')
iris.head()
```
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/11-01-Petal-sepal.jpg?raw=true" width="300" align="center"/>
```
iris.species.unique()
```
Here each row of the data refers to a single observed flower, and the number of rows is the total number of flowers in the dataset.
In general, we will refer to the rows of the matrix as *samples*, and the number of rows as ``n_samples``.
Likewise, each column of the data refers to a particular quantitative piece of information that describes each sample.
In general, we will refer to the columns of the matrix as *features*, and the number of columns as ``n_features``.
#### Features matrix
This table layout makes clear that the information can be thought of as a two-dimensional numerical array or matrix, which we will call the *features matrix*.
By convention, this features matrix is often stored in a variable named ``X``.
The features matrix is assumed to be two-dimensional, with shape ``[n_samples, n_features]``, and is most often contained in a NumPy array or a Pandas ``DataFrame``.
The samples (i.e., rows) always refer to the individual objects described by the dataset.
For example, the sample might be a flower, a person, a document, an image, a sound file, a video, an astronomical object, or anything else you can describe with a set of quantitative measurements.
The features (i.e., columns) always refer to the distinct observations that describe each sample in a quantitative manner.
Features are generally real-valued, but may be Boolean or discrete-valued in some cases.
#### Target array
In addition to the feature matrix ``X``, we also generally work with a *label* or *target* array, which by convention we will usually call ``y``.
The target array is usually one dimensional, with length ``n_samples``, and is generally contained in a NumPy array or Pandas ``Series``.
The target array may have continuous numerical values, or discrete classes/labels.
Often one point of confusion is how the target array differs from the other features columns. The distinguishing feature of the target array is that it is usually the quantity we want to *predict from the data*: in statistical terms, it is the dependent variable.
For example, in the preceding data we may wish to construct a model that can predict the species of flower based on the other measurements; in this case, the ``species`` column would be considered the target array.
With this target array in mind, we can use Seaborn to conveniently visualize the data:
```
sns.pairplot(iris, hue='species', height=2.5);
```
For use in Scikit-Learn, we will extract the features matrix and target array from the ``DataFrame``, which we can do using some of the Pandas ``DataFrame`` operations we've learned:
```
X_iris = iris.drop('species', axis=1)
X_iris.shape
y_iris = iris['species']
y_iris.shape
```
To summarize, the expected layout of features and target values is visualized in the following diagram:
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/11-01-samples-features.png?raw=true" width="700" align="center"/>
With this data properly formatted, we can move on to consider the *estimator* API of Scikit-Learn:
## Scikit-Learn's Estimator API
The Scikit-Learn API is designed with the following guiding principles in mind, as outlined in the [Scikit-Learn API paper(2013)](http://arxiv.org/abs/1309.0238):
- *Consistency*: All objects share a common interface drawn from a limited set of methods, with consistent documentation.
- *Inspection*: All specified parameter values are exposed as public attributes.
- *Limited object hierarchy*: Only algorithms are represented by Python classes; datasets are represented
in standard formats (NumPy arrays, Pandas ``DataFrame``s) and parameter
names use standard Python strings.
- *Composition*: Many machine learning tasks can be expressed as sequences of more fundamental algorithms,
and Scikit-Learn makes use of this wherever possible.
- *Sensible defaults*: When models require user-specified parameters, the library defines an appropriate default value.
In practice, these principles make Scikit-Learn very easy to use, once the basic principles are understood.
Every machine learning algorithm in Scikit-Learn is implemented via the Estimator API, which provides a consistent interface for a wide range of machine learning applications.
### Basics of the API
Most commonly, the steps in using the Scikit-Learn estimator API are as follows
(we will step through a couple of detailed examples in the sections that follow).
1. Choose a class of model by importing the appropriate estimator class from Scikit-Learn.
2. Choose model hyperparameters by instantiating this class with desired values.
3. Arrange data into a features matrix and target vector following the discussion above.
4. Fit the model to your data by calling the ``fit()`` method of the model instance.
5. Apply the Model to new data:
- For supervised learning, often we predict labels for unknown data using the ``predict()`` method.
- For unsupervised learning, we often transform or infer properties of the data using the ``transform()`` or ``predict()`` method.
We will now step through simple examples of applying supervised learning methods.
### Supervised learning example: Simple linear regression
As an example of this process, let's consider a simple linear regression—that is, the common case of fitting a line to $(x, y)$ data.
We will use the following columns from `iris` for our regression example: `petal_width` & `petal_length`
```
X = iris[['petal_width']]
y = iris[['petal_length']]
plt.scatter(X, y);
```
With this data in place, we can use the recipe outlined earlier. Let's walk through the process:
#### 1. Choose a class of model
In Scikit-Learn, every class of model is represented by a Python class.
So, for example, if we would like to compute a simple linear regression model, we can import the linear regression class:
```
from sklearn.linear_model import LinearRegression
```
Note that other more general linear regression models exist as well; you can read more about them in the [``sklearn.linear_model`` module documentation](http://Scikit-Learn.org/stable/modules/linear_model.html).
#### 2. Choose model hyperparameters
An important point is that *a class of model is not the same as an instance of a model*.
Once we have decided on our model class, there are still some options open to us.
Depending on the model class we are working with, we might need to answer one or more questions like the following:
- Would we like to fit for the offset (i.e., *y*-intercept)?
- Would we like the model to be normalized?
- Would we like to preprocess our features to add model flexibility?
- What degree of regularization would we like to use in our model?
- How many model components would we like to use?
These are examples of the important choices that must be made *once the model class is selected*.
These choices are often represented as *hyperparameters*, or parameters that must be set before the model is fit to data.
In Scikit-Learn, hyperparameters are chosen by passing values at model instantiation.
For our linear regression example, we can instantiate the ``LinearRegression`` class and specify that we would like to fit the intercept using the ``fit_intercept`` hyperparameter:
```
model = LinearRegression(fit_intercept=True)
model
```
Keep in mind that when the model is instantiated, the only action is the storing of these hyperparameter values.
In particular, we have not yet applied the model to any data: the Scikit-Learn API makes very clear the distinction between *choice of model* and *application of model to data*.
#### 3. Arrange data into a features matrix and target vector
Previously we detailed the Scikit-Learn data representation, which requires a two-dimensional features matrix and a one-dimensional target array.
Here our target variable ``y`` is already in the correct form (a length-``n_samples`` array). Our features matrix is also in the right shape since we only have 1 feature it is a matrix of size ``[n_samples, n_features]``.
Let's check the shapes:
```
print(X.shape)
print(y.shape)
```
#### 4. Fit the model to your data
Now it is time to apply our model to data.
This can be done with the ``fit()`` method of the model:
```
model.fit(X, y)
```
This ``fit()`` command causes a number of model-dependent internal computations to take place, and the results of these computations are stored in model-specific attributes that the user can explore.
In Scikit-Learn, by convention all model parameters that were learned during the ``fit()`` process have trailing underscores; for example in this linear model, we have the following:
```
model.coef_
model.intercept_
```
These two parameters represent the slope and intercept of the simple linear fit to the data.
Comparing to the data definition, we see that they are very close to the input slope of 2.2 and intercept of 1.
One question that frequently comes up regards the uncertainty in such internal model parameters.
In general, Scikit-Learn does not provide tools to draw conclusions from internal model parameters themselves: interpreting model parameters is much more a *statistical modeling* question than a *machine learning* question.
Machine learning rather focuses on what the model *predicts*.
If you would like to dive into the meaning of fit parameters within the model, other tools are available, including the [Statsmodels Python package](http://statsmodels.sourceforge.net/).
#### 5. Predict labels for unknown data
Once the model is trained, the main task of supervised machine learning is to evaluate it based on what it says about new data that was not part of the training set.
In Scikit-Learn, this can be done using the ``predict()`` method.
For the sake of this example, our "new data" will be a grid of `x` values, and we will ask what `y` values the model predicts:
```
xfit = np.linspace(0, 2.5)
xfit = pd.DataFrame(xfit)
xfit.shape
```
We have coerced these *x* values into a ``[n_samples, n_features]`` features matrix, after which we can feed it to the model:
```
yfit = model.predict(xfit)
```
Finally, let's visualize the results by plotting first the raw data, and then this model fit:
```
plt.scatter(X, y)
plt.plot(xfit, yfit, c='gray')
plt.xlabel('petal_width')
plt.ylabel('petal_length');
```
Typically the efficacy of the model is evaluated by comparing its results to some known baseline, as we will see in the next example
### Supervised learning example: Iris classification
Let's take a look at another example of this process, using the Iris dataset we discussed earlier.
Our question will be this: given a model trained on a portion of the Iris data, how well can we predict the remaining labels?
For this task, we will use an extremely simple generative model known as Gaussian naive Bayes, which proceeds by assuming each class is drawn from an axis-aligned Gaussian distribution.
Because it is so fast and has no hyperparameters to choose, Gaussian naive Bayes is often a good model to use as a baseline classification, before exploring whether improvements can be found through more sophisticated models.
We would like to evaluate the model on data it has not seen before, and so we will split the data into a *training set* and a *testing set*.
This could be done by hand, but it is more convenient to use the ``train_test_split`` utility function:
```
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X_iris, y_iris, test_size=0.3, random_state=833)
```
With the data arranged, we can follow our recipe to predict the labels:
```
from sklearn.naive_bayes import GaussianNB # 1. choose model class
model = GaussianNB() # 2. instantiate model
model.fit(Xtrain, ytrain) # 3. fit model to data
y_model = model.predict(Xtest) # 4. predict on new data
```
Finally, we can use the ``accuracy_score`` utility to see the fraction of predicted labels that match their true value:
```
from sklearn.metrics import accuracy_score
accuracy_score(ytest, y_model)
```
With an accuracy topping 93%, we see that even this very naive classification algorithm is effective for this particular dataset!
To learn more about Gaussian naive Bayes check out [this YouTube video](https://www.youtube.com/watch?v=r1in0YNetG8).
# Your Turn
For this exercise we are going to make some predictions using Telco customer churn data. Our goal is to make a simple model that can predict whether a customer will churn or not based on the historical data. We will follow the steps above.
But first, let's load the data:
```
df = pd.read_csv('https://raw.githubusercontent.com/soltaniehha/Business-Analytics/master/data/Telco-Customer-Churn.csv')
df.head(3)
```
Column `TotalCharges` has 11 rows with an empty string (" "). Replace these values by `0` as they represent new customers that haven't received a bill yet. Once you replaced the values, convert that column to a `float32` data type.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-TotalCharges')
```
If we check the `df.info()` now we should see that `TotalCharges` is now a `float32`:
```
df.info()
```
Before we get to splitting our dataset into train/test we would have to make sure all of our values are numerical as most of ML algorithms work with numerical values only.
### How to Convert Categorical Data to Numerical Data?
In order to do this we have to convert all of the categorical variables to numerical values. This can be done with a process called one-hot encoding. Take the "Churn" column as an example. We have two unique values: "Yes"/"No". One-hot encoding will create two variables, one called `Churn_Yes` and the other one `Churn_No`. We will go from
| Churn |
|--|
|Yes|
|No|
|Yes|
|...|
to
|Churn_Yes | Churn_No |
|--|--|
|1|0|
|0|1|
|1|0|
|...|...|
As you can see, having two variables is redundant since they mirror each other. So, in any one-hot encoding scenario, we need `n-1` variables for a categorical variable that had `n` categories.
Below, we will use a *pandas* function called `get_dummies()`. If we want *pandas* to automcatically drop one of the extra variables for us we use the `drop_first=True` argument.
```
churn_df = df.drop('customerID', axis=1) # dropping customerID as it doesn't have any predictive power
df_dummified = pd.get_dummies(churn_df, drop_first=True) # One-hot encoding
df_dummified.rename(columns={'Churn_Yes': 'Churn'}, inplace=True) # renaming Churn_Yes to Churn
df_dummified.head()
```
Using `df_dummified` dataframe, create two dataframes for features matrix and target vector and call them `X_df` and `y_df` respectively:
```
# Your answer goes here
# X_df
# Your answer goes here
# y_df
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-X_df-y_df')
```
Check their shape:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-shape')
```
From `X_df` and `y_df` create the train/test splits.
* Set 30% of the data to test and the remainder to train.
* Use `random_state=833`, so you get the same result as in the notebook.
* Name the resulting objects: `Xtrain`, `Xtest`, `ytrain`, `ytest`.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-split')
```
From `sklearn.naive_bayes` import `GaussianNB`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-import')
```
Instantiate a `GaussianNB` model and call it `model`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-model')
```
Fit model to data:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-fit')
```
Now let's make some predictions. Use the `Xtest` feature dataframe to predict whether these customers are churning or not. Call the outcome (predictions) `y_model`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-predict')
```
Accuracy is not the most reliable metric when it comes to evaluating classification algorithms, but it's one of the most simple ones. Let's calculate it below:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-accuracy')
```
### Is this model any good?
If we had said no one will churn, what the accuracy would have been?
We would have identified all the ones who didn't churn correctly, but missed all the ones who did actually churn. Let's calculate the accuracy for this simplistic model below:
hint: all you need to work with is `ytest`
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-accuracy-base')
```
So, after all our ML model is not that great. But the scope of this exercise is not to fine-tune this model, but understand the pipeline and how to read the outcome. Let's continue with some simple questions.
How many people did we have in the test dataset?
Save it to a variable called `n_test`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-test-size')
```
How many of these customers churned?
Save it to a variable and call it `P`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-churned')
```
How many true positives did the model generate? (True positive = correctly identified in the "positive" class. Take "Yes" as the positive class).
Save it to a variable and call it `TP`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-TP')
```
What is the true positive rate (or sensitivity)?
**Sensitivity** (also called the **true positive rate (TPR)**, the recall, or probability of detection in some fields) measures the proportion of actual positives that are correctly identified as such (e.g., the percentage of sick people who are correctly identified as having the condition). ~Wikipedia
$TPR=\frac{TP}{P}$, where $TP$ is ture positives and $P$ is count of all positives.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-sensitivity')
```
How many of the customers in the test set didn't churn?
Save it to `N`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-N')
```
How many true negatives did the model generate? (True negative = correctly rejected).
Save a to a variable and call it `TN`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-TN')
```
What is the true negative rate (or specificity)?
**Specificity** (also called the **true negative rate**) measures the proportion of actual negatives that are correctly identified as such (e.g., the percentage of healthy people who are correctly identified as not having the condition). ~Wikipedia
$TNR=\frac{TN}{N}$, where $TN$ is ture negatives and $N$ is count of all negatives.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '09-02-specificity')
```
You may check out [this Wikipedia page](https://en.wikipedia.org/wiki/Sensitivity_and_specificity) for more info on sensitivity and specificity.
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/09-02-sensitivity.png?raw=true" width="400" align="center"/>
|
github_jupyter
|
```
given = """
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
Grey LEFT LEFT 2
BLACK RIGHT RIGHT 2
Grey LEFT LEFT 2
BLACK RIGHT RIGHT 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey RIGHT RIGHT 5
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey TOP TOP 5
BLACK RIGHT RIGHT 3
Grey LEFT LEFT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 3
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 3
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 3
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 3
BLACK LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 5
Grey LEFT LEFT 5
Grey LEFT LEFT 2
BLACK RIGHT RIGHT 5
Grey RIGHT RIGHT 5
BLACK RIGHT RIGHT 5
Grey RIGHT RIGHT 5
Grey LEFT LEFT 4
BLACK TOP TOP 2
BLACK RIGHT RIGHT 5
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey LEFT LEFT 3
Grey TOP TOP 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey LEFT LEFT 3
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
MAP CIRCLE
Grey LEFT LEFT 3
Grey EMPTY EMPTY 5
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey LEFT LEFT 3
BLACK EMPTY EMPTY 1
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 3
Grey RIGHT RIGHT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 3
BLACK RIGHT RIGHT 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 3
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey BOTTOM BOTTOM 2
Grey TOP TOP 5
Grey LEFT LEFT 5
BLACK TOP TOP 4
Grey LEFT LEFT 2
BLACK LEFT LEFT 2
BLACK BOTTOM BOTTOM 5
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey TOP TOP 4
Grey TOP TOP 2
BLACK TOP TOP 5
BLACK TOP TOP 4
Grey LEFT LEFT 2
Grey BOTTOM BOTTOM 3
Grey RIGHT RIGHT 2
Grey TOP TOP 5
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 4
Grey LEFT LEFT 2
Grey LEFT LEFT 3
BLACK LEFT LEFT 2
BLACK BOTTOM BOTTOM 5
BLACK RIGHT RIGHT 4
Grey LEFT LEFT 2
Grey LEFT LEFT 3
BLACK LEFT LEFT 2
BLACK BOTTOM BOTTOM 5
BLACK LEFT LEFT 2
BLACK BOTTOM BOTTOM 5
Grey TOP TOP 5
Grey LEFT LEFT 2
Grey RIGHT RIGHT 5
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey LEFT LEFT 2
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
MAP CIRCLE
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey TOP TOP 4
Grey TOP TOP 2
BLACK TOP TOP 5
Grey LEFT LEFT 2
Grey TOP TOP 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 5
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey EMPTY EMPTY 5
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
MAP CIRCLE
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 1
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 1
Grey EMPTY EMPTY 2
BLACK TOP TOP 3
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 1
Grey EMPTY EMPTY 2
BLACK TOP TOP 3
Grey LEFT LEFT 2
BLACK LEFT LEFT 4
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey RIGHT RIGHT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey LEFT LEFT 2
BLACK LEFT LEFT 4
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey LEFT LEFT 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK TOP TOP 4
Grey EMPTY EMPTY 3
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey LEFT LEFT 5
Grey LEFT LEFT 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK LEFT LEFT 5
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey TOP TOP 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
BLACK TOP TOP 3
Grey EMPTY EMPTY 3
Grey EMPTY EMPTY 5
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK EMPTY EMPTY 1
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 1
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey LEFT LEFT 2
BLACK LEFT LEFT 4
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey EMPTY EMPTY 5
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 5
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey LEFT LEFT 2
BLACK LEFT LEFT 4
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey EMPTY EMPTY 5
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 3
Grey EMPTY EMPTY 5
BLACK EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey TOP TOP 5
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
MAP CIRCLE
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK TOP TOP 4
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey BOTTOM BOTTOM 2
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey TOP TOP 4
Grey TOP TOP 2
BLACK TOP TOP 5
Grey RIGHT RIGHT 2
Grey TOP TOP 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 5
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
BLACK TOP TOP 3
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey LEFT LEFT 2
BLACK LEFT LEFT 4
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
Grey EMPTY EMPTY 1
Grey EMPTY EMPTY 2
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey TOP TOP 4
Grey TOP TOP 2
BLACK TOP TOP 5
Grey LEFT LEFT 2
Grey TOP TOP 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 5
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
BLACK LEFT LEFT 5
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
BLACK RIGHT RIGHT 4
Grey LEFT LEFT 2
Grey LEFT LEFT 5
BLACK TOP TOP 4
Grey LEFT LEFT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
BLACK RIGHT RIGHT 4
Grey LEFT LEFT 2
Grey TOP TOP 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
MAP CIRCLE
BLACK TOP TOP 2
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey TOP TOP 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey LEFT LEFT 2
BLACK LEFT LEFT 4
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
Grey EMPTY EMPTY 3
BLACK LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
Grey EMPTY EMPTY 3
Grey LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
BLACK LEFT LEFT 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey RIGHT RIGHT 4
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK EMPTY EMPTY 4
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
MAP CIRCLE
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
BLACK BOTTOM BOTTOM 1
Grey EMPTY EMPTY 2
BLACK BOTTOM BOTTOM 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 5
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
BLACK EMPTY EMPTY 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
BLACK BOTTOM BOTTOM 4
BLACK BOTTOM BOTTOM 2
BLACK RIGHT RIGHT 3
Grey RIGHT RIGHT 3
BLACK LEFT LEFT 3
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey LEFT LEFT 2
BLACK BOTTOM BOTTOM 4
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK LEFT LEFT 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey EMPTY EMPTY 4
Grey LEFT LEFT 3
BLACK TOP TOP 1
BLACK EMPTY EMPTY 5
Grey TOP TOP 3
Grey LEFT LEFT 2
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK BOTTOM BOTTOM 2
Grey TOP TOP 3
Grey EMPTY EMPTY 3
Grey BOTTOM BOTTOM 5
Grey BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
BLACK BOTTOM BOTTOM 2
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
Grey RIGHT RIGHT 2
Grey RIGHT RIGHT 2
Grey BOTTOM BOTTOM 4
Grey BOTTOM BOTTOM 2
"""
transcript = """
I AM SAM. I AM SAM. SAM I AM.
THAT SAM-I-AM! THAT SAM-I-AM! I DO NOT LIKE THAT SAM-I-AM!
DO WOULD YOU LIKE GREEN EGGS AND HAM?
I DO NOT LIKE THEM,SAM-I-AM.
I DO NOT LIKE GREEN EGGS AND HAM.
WOULD YOU LIKE THEM HERE OR THERE?
I WOULD NOT LIKE THEM HERE OR THERE.
I WOULD NOT LIKE THEM ANYWHERE.
I DO NOT LIKE GREEN EGGS AND HAM.
I DO NOT LIKE THEM, SAM-I-AM.
WOULD YOU LIKE THEM IN A HOUSE?
WOULD YOU LIKE THEN WITH A MOUSE?
I DO NOT LIKE THEM IN A HOUSE.
I DO NOT LIKE THEM WITH A MOUSE.
I DO NOT LIKE THEM HERE OR THERE.
I DO NOT LIKE THEM ANYWHERE.
I DO NOT LIKE GREEN EGGS AND HAM.
I DO NOT LIKE THEM, SAM-I-AM.
WOULD YOU EAT THEM IN A BOX?
WOULD YOU EAT THEM WITH A FOX?
NOT IN A BOX. NOT WITH A FOX.
NOT IN A HOUSE. NOT WITH A MOUSE.
I WOULD NOT EAT THEM HERE OR THERE.
I WOULD NOT EAT THEM ANYWHERE.
I WOULD NOT EAT GREEN EGGS AND HAM.
I DO NOT LIKE THEM, SAM-I-AM.
WOULD YOU? COULD YOU? IN A CAR?
EAT THEM! EAT THEM! HERE THEY ARE.
I WOULD NOT, COULD NOT, IN A CAR.
YOU MAY LIKE THEM. YOU WILL SEE.
YOU MAY LIKE THEM IN A TREE!
I WOULD NOT, COULD NOT IN A TREE.
NOT IN A CAR! YOU LET ME BE.
I DO NOT LIKE THEM IN A BOX.
I DO NOT LIKE THEM WITH A FOX.
I DO NOT LIKE THEM IN A HOUSE.
I DO NOT LIKE THEM WITH A MOUSE.
I DO NOT LIKE THEM HERE OR THERE.
I DO NOT LIKE THEM ANYWHERE.
I DO NOT LIKE GREEN EGGS AND HAM.
I DO NOT LIKE THEM, SAM-I-AM.
A TRAIN! A TRAIN! A TRAIN! A TRAIN!
COULD YOU, WOULD YOU ON A TRAIN?
NOT ON TRAIN! NOT IN A TREE!
NOT IN A CAR! SAM! LET ME BE!
I WOULD NOT, COULD NOT, IN A BOX.
I WOULD NOT, COULD NOT, WITH A FOX.
I WILL NOT EAT THEM IN A HOUSE.
I WILL NOT EAT THEM HERE OR THERE.
I WILL NOT EAT THEM ANYWHERE.
I DO NOT EAT GREEM EGGS AND HAM.
I DO NOT LIKE THEM, SAM-I-AM.
SAY! IN THE DARK? HERE IN THE DARK!
WOULD YOU, COULD YOU, IN THE DARK?
I WOULD NOT, COULD NOT, IN THE DARK.
WOULD YOU COULD YOU IN THE RAIN?
I WOULD NOT, COULD NOT IN THE RAIN.
NOT IN THE DARK. NOT ON A TRAIN.
NOT IN A CAR. NOT IN A TREE.
I DO NOT LIKE THEM, SAM, YOU SEE.
NOT IN A HOUSE. NOT IN A BOX.
NOT WITH A MOUSE. NOT WITH A FOX.
I WILL NOT EAT THEM HERE OR THERE.
I DO NOT LIKE THEM ANYWHERE!
YOU DO NOT LIKE GREEN EGGS AND HAM?
I DO NOT LIKE THEM, SAM-I-AM.
COULD YOU, WOULD YOU, WITH A GOAT?
I WOULD NOT, COULD NOT WITH A GOAT!
WOULD YOU, COULD YOU, ON A BOAT?
I COULD NOT, WOULD NOT, ON A BOAT.
I WILL NOT, WILL NOT, WITH A GOAT.
I WILL NOT EAT THEM IN THE RAIN.
NOT IN THE DARK! NOT IN A TREE!
NOT IN A CAR! YOU LET ME BE!
I DO NOT LIKE THEM IN A BOX.
I DO NOT LIKE THEM WITH A FOX.
I WILL NOT EAT THEM IN A HOUSE.
I DO NOT LIKE THEM WITH A MOUSE.
I DO NOT LIKE THEM HERE OR THERE.
I DO NOT LIKE THEM ANYWHERE!
I DO NOT LIKE GREEN EGGS AND HAM!
I DO NOT LIKE THEM, SAM-I-AM.
YOU DO NOT LIKE THEM. SO YOU SAY.
TRY THEM! TRY THEM! AND YOU MAY.
TRY THEM AND YOU MAY, I SAY.
sAM! IF YOU LET ME BE,
I WILL TRY THEM. YOU WILL SEE.
(... and he tries them ...)
SAY! I LIKE GREEN EGGS AND HAM!
I DO! I LIKE THEM, SAM-I-AM!
AND I WOULD EAT THEM IN A BOAT.
AND I WOULD EAT THEM WITH A GOAT...
AND I WILL EAT THEM, IN THE RAIN.
AND IN THE DARK. AND ON A TRAIN.
AND IN A CAR. AND IN A TREE.
THEY ARE SO GOOD, SO GOOD, YOU SEE!
SO I WILL EAT THEM IN A BOX.
AND I WILL EAT THEM WITH A FOX.
AND I WILL EAT THEM IN A HOUSE.
AND I WILL EAT THEM WITH A MOUSE.
AND I WILL EAT THEM HERE AND THERE.
SAY! I WILL EAT THEM ANYWHERE!
I DO SO LIKE GREEN EGGS AND HAM!
THANK YOU! THANK YOU, SAM I AM.
"""
VALUES = {
'GREY': 0,
'BLACK': 0,
'EMPTY': 0,
'TOP': 1,
'RIGHT': 2,
'BOTTOM': 3,
'LEFT': 4,
'1': 0,
'2': 1,
'3': 2,
'4': 3,
'5': 4,
}
def parse(txt):
chunk = []
result = [chunk]
for line in txt.strip('\n').split('\n'):
if 'MAP CIRCLE' in line:
print('chunk length', len(chunk))
chunk = []
result.append(chunk)
continue
color, _, position, fill = line.upper().split('\t')
chunk.append((color, position, fill))
#chunk.append((VALUES[color], VALUES[position], VALUES[fill]))
return result
import collections
parsed = parse(given)
ngram_qs = [
collections.deque([], maxlen=i) for i in range(1, 10)
]
counter = collections.Counter()
for chunk in parsed:
for ngram_q in ngram_qs:
ngram_q.clear()
for piece in chunk:
for ngram_q in ngram_qs:
ngram_q.append(''.join(map(str, piece)))
if len(ngram_q) == ngram_q.maxlen:
# print(ngram_q.maxlen, '\n', ngram_q)
counter[','.join(ngram_q)] += 1
for s, freq in sorted(counter.items(), key=lambda x: x[1], reverse=True):
print(str(freq).rjust(8), s)
if freq == 1:
break
N = 8
# ⇦ ⇨ ⇧ ⇩
# ⬅ ➡ ⬆ ⬇
VIZ = {
'BLACKTOP': '\t⬆',
'BLACKRIGHT': '\t➡',
'BLACKBOTTOM': '\t⬇',
'BLACKLEFT': '\t⬅',
'BLACKEMPTY': '\t⬤',
'GREYTOP': '\t⇧',
'GREYRIGHT': '\t⇨',
'GREYBOTTOM': '\t⇩',
'GREYLEFT': '\t⇦',
'GREYEMPTY': '\t◯',
# '1': '▕',
# '2': '▕▎',
# '3': '▕▍',
# '4': '▕▋',
# '5': '▕▉',
}
def print_lines(n_start, lines):
for offset, line in enumerate(lines):
n = n_start + offset
if n % N == 0:
nth = '*'
else:
nth = ' '
#if len(line) > 2:
# for k, v in VIZ.items():
# line = line.replace(k, v)
print(str(n).ljust(3), nth, line)
for chunk in parsed:
buffer.clear()
print('-' * 20)
for i, piece in enumerate(chunk):
if len(buffer) == buffer.maxlen:
word = ','.join(buffer)
if word in SEQUENCES:
print_lines(i - buffer.maxlen, ['%s = %s' % (word, SEQUENCES[word])] * 5)
buffer.clear()
buffer.append(''.join(map(str, piece))) # Don't lose this piece.
continue
print_lines(i - buffer.maxlen, [buffer[0]])
buffer.append(''.join(map(str, piece)))
"""
1BOTTOMBLACK
1BOTTOMGrey
1EMPTYBLACK
1EMPTYGrey
1LEFTBLACK
1LEFTGrey
1RIGHTBLACK
1RIGHTGrey
1TOPBLACK EGGS
1TOPGrey
2BOTTOMBLACK THAT
2BOTTOMGrey I
2EMPTYBLACK
2EMPTYGrey
2LEFTBLACK
2LEFTGrey YOU
2RIGHTBLACK THANK
2RIGHTGrey SAM
2TOPBLACK
2TOPGrey
3BOTTOMBLACK
3BOTTOMGrey
3EMPTYBLACK
3EMPTYGrey
3LEFTBLACK
3LEFTGrey AND
3RIGHTBLACK
3RIGHTGrey
3TOPBLACK
3TOPGrey
4BOTTOMBLACK
4BOTTOMGrey AM
4EMPTYBLACK
4EMPTYGrey HAM
4LEFTBLACK
4LEFTGrey
4RIGHTBLACK
4RIGHTGrey
4TOPBLACK
4TOPGrey
5BOTTOMBLACK
5BOTTOMGrey
5EMPTYBLACK
5EMPTYGrey
5LEFTBLACK
5LEFTGrey
5RIGHTBLACK
5RIGHTGrey
5TOPBLACK
5TOPGrey
"""
WORDS = {
'BLACKTOP1': 'EGGS',
'GREYLEFT3': 'AND',
'GREYEMPTY4': 'HAM',
'GREYEMPTY2': 'A',
'GREYEMPTY1': 'TRAIN',
'BLACKLEFT4': 'COULD',
'GREYLEFT2': 'YOU',
'BLACKBOTTOM4': 'WOULD',
'BLACKTOP3': 'ON',
'GREYEMPTY3': 'NOT',
'BLACKBOTTOM3': 'IN',
'BLACKLEFT5': 'TREE',
'GREYTOP1': 'CAR',
'GREYRIGHT2': 'SAM',
'BLACKTOP5': 'LET',
'GREYTOP2': 'ME',
'GREYTOP4': 'BE',
'GREYBOTTOM2': 'I',
'BLACKBOTTOM1': 'BOX',
'BLACKEMPTY4': 'IN',
'GREYLEFT1': 'FOX',
'BLACKTOP4': 'WILL',
'GREYRIGHT4': 'EAT',
'BLACKLEFT2': 'THEM',
'GREYBOTTOM1': 'HOUSE',
'BLACKLEFT3': 'HERE',
'GREYRIGHT3': 'OR',
'BLACKRIGHT3': 'THERE',
'BLACKEMPTY3': ''
}
for chunk in reversed(parsed):
for i, piece in enumerate(reversed(chunk)):
word = ''.join(map(str, piece))
if word in WORDS:
print(WORDS[word])
else:
print(word)
```
|
github_jupyter
|
# Demonstration of integrating POI Points to OSM road network
1. Use anyway you like to get the sample [POI data](https://assets.onemap.sg/shp/supermarkets.zip) consisting of supermarkets from [OneMap SG](https://www.onemap.sg/).
2. Use [OSMnx](https://osmnx.readthedocs.io/en/stable/index.html) to download the pedestrian network from [OpenStreetMap](https://openstreetmap.org), we use a bounding box of Toa Payoh for the demo.
3. Save the network as `.shp` and read in as two `GeoDataFrame`s: junction as `nodes` and road segment as `edges`.
4. Integrate POIs into the network using the `connect_poi` function.
```
import os
import wget
import osmnx as ox
import geopandas as gpd
from toolbox import connect_poi
```
## 1. Prepare POIs
```
# get POI data
url = "https://assets.onemap.sg/shp/supermarkets.zip"
PATH = 'data/supermarkets.zip'
if os.path.exists(PATH):
print('File existed.')
else:
PATH = wget.download(url, PATH)
print('File downloaded.')
# load and subset the POI based on a bounding box
bbox = (103.8427, 1.3308, 103.8601, 1.3416) # set bbox of Toa Payoh
pois = gpd.read_file('supermarkets', vfs='zip://{}'.format(PATH), crs='epsg:3857')
pois = pois.to_crs(epsg=4326)
pois['lon'] = pois['geometry'].apply(lambda p: p.x)
pois['lat'] = pois['geometry'].apply(lambda p: p.y)
pois = pois[(pois['lon'] >= bbox[0]) & (pois['lon'] <= bbox[2]) &
(pois['lat'] >= bbox[1]) & (pois['lat'] <= bbox[3])]
pois['key'] = pois.index # set a primary key column
pois.head(3)
```
[NOTE] For use in pandana, you may want to ensure the key column for the input is numeric-only to avoid processing errors. Preferably use unique integers (int or str) only, and be aware not to intersect with the node key, which is 'osmid' if you use OSM data, in the nodes gdf.
## 2. Prepare network
```
# get road network and save as .shp
G = ox.graph_from_bbox(bbox[3], bbox[1], bbox[2], bbox[0], network_type='walk')
ox.save_graph_shapefile(G, filepath='data/sample/', encoding='utf-8')
# load as GeoDataFrame
nodes = gpd.read_file('data/sample/nodes.shp')
edges = gpd.read_file('data/sample/edges.shp')
```
## 3. Integrate POIs and network
```
connect_poi?
# it's a one-liner, but is still at beta at the moment
new_nodes, new_edges = connect_poi(pois, nodes, edges, key_col='key', path=None)
```
## 4. Check output
1. First is an example of how edges will be broken into segments when there is a POI to be linked onto it. This process accommodates for multiple POIs. E.g., for 2 POIs projecting onto a same edge (but not overlapping nor on either vertices of the edge), the edge will be replaced with 3 segments.
2. Then a figure illustrating how the new network looks like after the update.
[NOTE] Be noted that the aggregated length of segments will not equal exactly to the length of the original edge for some reasons that are not handled at the moment.
```
# original edge
edges[edges['from'] == 3370311549][['from', 'to', 'length']]
# new edges replacing the original (953, 954) and connecting the poi (964)
new_edges[(new_edges['from'] == 3370311549) |
(new_edges['from'] == 9990000005) |
(new_edges['to'] == 9990000005) ][['from', 'to', 'length']]
# output
poi_links = new_edges[new_edges['highway'] == 'projected_footway']
ax = edges.plot(linewidth=0.8, figsize=(18,10), label='Original Road Edges')
poi_links.plot(color='indianred', linewidth=2, ax=ax, label='New Connection Edges')
pois.plot(color='indianred', marker='.', markersize=200, ax=ax, label='POI')
ax.legend(loc=2, fontsize=18)
ax.set_title('The integrated network of supermarkets and road network at Toa Payoh', fontsize=22);
```
|
github_jupyter
|
```
from google.colab import drive
drive.mount('/content/drive')
# from google.colab import drive
# drive.mount('/content/drive')
!pwd
path = '/content/drive/MyDrive/Research/AAAI/cifar_new/k_001/sixth_run1_'
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
n_seed = 0
k = 0.001
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark= False
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
#foreground_classes = {'bird', 'cat', 'deer'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}
#background_classes = {'plane', 'car', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]])#.type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx])#.type("torch.DoubleTensor"))
label = foreground_label[fg_idx]- fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(i)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
plt.imshow(torch.transpose(mosaic_list_of_images[0][1],dim0= 0,dim1 = 2))
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
class Focus(nn.Module):
def __init__(self):
super(Focus, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0,bias=False)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0,bias=False)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=0,bias=False)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0,bias=False)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=0,bias=False)
self.conv6 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1,bias=False)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_norm1 = nn.BatchNorm2d(32,track_running_stats=False)
self.batch_norm2 = nn.BatchNorm2d(64,track_running_stats=False)
self.batch_norm3 = nn.BatchNorm2d(256,track_running_stats=False)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.fc1 = nn.Linear(256,64,bias=False)
self.fc2 = nn.Linear(64, 32,bias=False)
self.fc3 = nn.Linear(32, 10,bias=False)
self.fc4 = nn.Linear(10, 1,bias=False)
torch.nn.init.xavier_normal_(self.conv1.weight)
torch.nn.init.xavier_normal_(self.conv2.weight)
torch.nn.init.xavier_normal_(self.conv3.weight)
torch.nn.init.xavier_normal_(self.conv4.weight)
torch.nn.init.xavier_normal_(self.conv5.weight)
torch.nn.init.xavier_normal_(self.conv6.weight)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
torch.nn.init.xavier_normal_(self.fc3.weight)
torch.nn.init.xavier_normal_(self.fc4.weight)
def forward(self,z): #y is avg image #z batch of list of 9 images
y = torch.zeros([batch,256, 3,3], dtype=torch.float64)
x = torch.zeros([batch,9],dtype=torch.float64)
ftr = torch.zeros([batch,9,256,3,3])
y = y.to("cuda")
x = x.to("cuda")
ftr = ftr.to("cuda")
for i in range(9):
out,ftrs = self.helper(z[:,i])
#print(out.shape)
x[:,i] = out
ftr[:,i] = ftrs
log_x = F.log_softmax(x,dim=1) # log_alpha
x = F.softmax(x,dim=1)
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],ftr[:,i])
return x, y, log_x, #alpha, log_alpha, avg_data
def helper(self, x):
#x1 = x
#x1 =x
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm3(x))
x = self.conv6(x)
x1 = F.tanh(x)
x = F.relu(x)
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = F.relu(self.fc3(x))
x = self.fc4(x)
x = x[:,0]
# print(x.shape)
return x,x1
torch.manual_seed(n_seed)
focus_net = Focus().double()
focus_net = focus_net.to("cuda")
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.conv1 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2,padding=1)
self.batch_norm1 = nn.BatchNorm2d(128,track_running_stats=False)
self.batch_norm2 = nn.BatchNorm2d(256,track_running_stats=False)
self.batch_norm3 = nn.BatchNorm2d(512,track_running_stats=False)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.global_average_pooling = nn.AvgPool2d(kernel_size=2)
self.fc1 = nn.Linear(512,128)
# self.fc2 = nn.Linear(128, 64)
# self.fc3 = nn.Linear(64, 10)
self.fc2 = nn.Linear(128, 3)
torch.nn.init.xavier_normal_(self.conv1.weight)
torch.nn.init.xavier_normal_(self.conv2.weight)
torch.nn.init.xavier_normal_(self.conv3.weight)
torch.nn.init.xavier_normal_(self.conv4.weight)
torch.nn.init.xavier_normal_(self.conv5.weight)
torch.nn.init.xavier_normal_(self.conv6.weight)
torch.nn.init.zeros_(self.conv1.bias)
torch.nn.init.zeros_(self.conv2.bias)
torch.nn.init.zeros_(self.conv3.bias)
torch.nn.init.zeros_(self.conv4.bias)
torch.nn.init.zeros_(self.conv5.bias)
torch.nn.init.zeros_(self.conv6.bias)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
torch.nn.init.zeros_(self.fc1.bias)
torch.nn.init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm3(x))
x = (F.relu(self.conv6(x)))
x = self.pool(x)
#print(x.shape)
x = self.global_average_pooling(x)
x = x.squeeze()
#x = x.view(x.size(0), -1)
#print(x.shape)
x = self.dropout2(x)
x = F.relu(self.fc1(x))
#x = F.relu(self.fc2(x))
#x = self.dropout2(x)
#x = F.relu(self.fc3(x))
x = self.fc2(x)
return x
torch.manual_seed(n_seed)
classify = Classification().double()
classify = classify.to("cuda")
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i+30000)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
criterion = nn.CrossEntropyLoss()
def my_cross_entropy(x, y,alpha,log_alpha,k):
# log_prob = -1.0 * F.log_softmax(x, 1)
# loss = log_prob.gather(1, y.unsqueeze(1))
# loss = loss.mean()
loss = criterion(x,y)
#alpha = torch.clamp(alpha,min=1e-10)
b = -1.0* alpha * log_alpha
b = torch.mean(torch.sum(b,dim=1))
closs = loss
entropy = b
loss = (1-k)*loss + ((k)*b)
return loss,closs,entropy
import torch.optim as optim
# criterion_classify = nn.CrossEntropyLoss()
optimizer_focus = optim.Adam(focus_net.parameters(), lr=0.001)#, momentum=0.9)
optimizer_classify = optim.Adam(classify.parameters(), lr=0.001)#, momentum=0.9)
col1=[]
col2=[]
col3=[]
col4=[]
col5=[]
col6=[]
col7=[]
col8=[]
col9=[]
col10=[]
col11=[]
col12=[]
col13=[]
col14 = [] # train average sparsity
col15 = [] # test average sparsity
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
sparse_val = 0
focus_net.eval()
classify.eval()
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images,_ = focus_net(inputs)
# print(inputs.shape, alphas.shape, avg_images.shape)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
sparse_val += torch.sum(torch.sum(alphas>0.01,dim=1)).item()
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %f %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %f %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %f %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %f %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %f %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("Sparsity_Value %d =============> AVG Sparsity : %f " % (sparse_val,(sparse_val)/total))
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
print(count)
print("="*100)
col1.append(0)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
col14.append(sparse_val)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
sparse_val = 0
focus_net.eval()
classify.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images,_ = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
sparse_val += torch.sum(torch.sum(alphas>0.01,dim=1)).item()
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %f %%' % (100 * correct / total))
print("total correct", correct)
print("total test set images", total)
print("focus_true_pred_true %d =============> FTPT : %f %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %f %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %f %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %f %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("Sparsity_Value %d =============> AVG Sparsity : %f " % (sparse_val,(sparse_val)/total))
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
col15.append(sparse_val)
nos_epochs = 100
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
focus_net.train()
classify.train()
tr_loss = []
for epoch in range(nos_epochs): # loop over the dataset multiple times
focus_net.train()
classify.train()
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
sparse_val = 0
running_loss = 0.0
epoch_loss = []
cnt=0
iteration = desired_num // batch
#training data set
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
# zero the parameter gradients
optimizer_focus.zero_grad()
optimizer_classify.zero_grad()
alphas, avg_images,log_alphas = focus_net(inputs)
outputs = classify(avg_images)
# outputs, alphas, avg_images = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
# print(outputs)
# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))
loss,_,_ = my_cross_entropy(outputs, labels,alphas,log_alphas,k)
loss.backward()
optimizer_focus.step()
optimizer_classify.step()
running_loss += loss.item()
mini = 60
if cnt % mini == mini-1: # print every 40 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))
epoch_loss.append(running_loss/mini)
running_loss = 0.0
cnt=cnt+1
if epoch % 1 == 0:
sparse_val += torch.sum(torch.sum(alphas>0.01,dim=1)).item()
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
tr_loss.append(np.mean(epoch_loss))
if epoch % 1 == 0:
col1.append(epoch+1)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
col14.append(sparse_val)
#************************************************************************
#testing data set
focus_net.eval()
classify.eval()
with torch.no_grad():
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
sparse_val = 0
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images,log_alphas = focus_net(inputs)
outputs = classify(avg_images)
#outputs, alphas, avg_images = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
sparse_val += torch.sum(torch.sum(alphas>0.01,dim=1)).item()
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
col15.append(sparse_val)
if(np.mean(epoch_loss) <= 0.05):
break;
print('Finished Training')
torch.save(focus_net.state_dict(),path+"weights_focus_0.pt")
torch.save(classify.state_dict(),path+"weights_classify_0.pt")
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ,"sparse_val"]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
len(col1),col9
plt.plot(np.arange(1,epoch+2),tr_loss)
plt.xlabel("epochs", fontsize=14, fontweight = 'bold')
plt.ylabel("Loss", fontsize=14, fontweight = 'bold')
plt.title("Train Loss")
plt.grid()
plt.show()
np.save("train_loss.npy",{"training_loss":tr_loss})
df_train[columns[0]] = col1
df_train[columns[1]] = col2
df_train[columns[2]] = col3
df_train[columns[3]] = col4
df_train[columns[4]] = col5
df_train[columns[5]] = col6
df_train[columns[6]] = col7
df_train[columns[7]] = col14
df_test[columns[0]] = col1
df_test[columns[1]] = col8
df_test[columns[2]] = col9
df_test[columns[3]] = col10
df_test[columns[4]] = col11
df_test[columns[5]] = col12
df_test[columns[6]] = col13
df_test[columns[7]] = col15
df_train
df_train.to_csv(path+"_train.csv",index=False)
# plt.figure(12,12)
plt.plot(col1,col2, label='argmax > 0.5')
plt.plot(col1,col3, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.show()
plt.figure(figsize=(6,5))
plt.plot(col1,np.array(col4)/300, label ="FTPT ")
plt.plot(col1,np.array(col5)/300, label ="FFPT ")
plt.plot(col1,np.array(col6)/300, label ="FTPF ")
plt.plot(col1,np.array(col7)/300, label ="FFPF ")
plt.title("On Training set")
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs", fontsize=14, fontweight = 'bold')
plt.ylabel("percentage train data", fontsize=14, fontweight = 'bold')
# plt.xlabel("epochs")
# plt.ylabel("training data")
plt.legend()
plt.savefig(path + "_train.png",bbox_inches="tight")
plt.savefig(path + "_train.pdf",bbox_inches="tight")
plt.grid()
plt.show()
plt.figure(figsize=(6,5))
plt.plot(col1,np.array(col14)/30000, label ="sparsity_val")
plt.title("On Training set")
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs", fontsize=14, fontweight = 'bold')
plt.ylabel("average sparsity value", fontsize=14, fontweight = 'bold')
# plt.xlabel("epochs")
# plt.ylabel("sparsity_value")
plt.savefig(path + "sparsity_train.png",bbox_inches="tight")
plt.savefig(path + "sparsity_train.pdf",bbox_inches="tight")
plt.grid()
plt.show()
df_test
df_test.to_csv(path+"_test.csv")
# plt.figure(12,12)
plt.plot(col1,col8, label='argmax > 0.5')
plt.plot(col1,col9, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.title("On Testing set")
plt.show()
plt.figure(figsize=(6,5))
plt.plot(col1,np.array(col10)/100, label ="FTPT ")
plt.plot(col1,np.array(col11)/100, label ="FFPT ")
plt.plot(col1,np.array(col12)/100, label ="FTPF ")
plt.plot(col1,np.array(col13)/100, label ="FFPF ")
plt.title("On Testing set")
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs", fontsize=14, fontweight = 'bold')
plt.ylabel("percentage test data", fontsize=14, fontweight = 'bold')
# plt.xlabel("epochs")
# plt.ylabel("training data")
plt.legend()
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
# plt.ylabel("Testing data")
plt.savefig(path + "_test.png",bbox_inches="tight")
plt.savefig(path + "_test.pdf",bbox_inches="tight")
plt.grid()
plt.show()
plt.figure(figsize=(6,5))
plt.plot(col1,np.array(col15)/10000, label ="sparsity_val")
plt.title("On Testing set")
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs", fontsize=14, fontweight = 'bold')
plt.ylabel("average sparsity value", fontsize=14, fontweight = 'bold')
plt.grid()
plt.savefig(path + "sparsity_test.png",bbox_inches="tight")
plt.savefig(path + "sparsity_test.pdf",bbox_inches="tight")
plt.show()
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
sparse_val = 0
focus_net.eval()
classify.eval()
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images,_ = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
sparse_val += torch.sum(torch.sum(alphas>0.01,dim=1)).item()
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %f %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %f %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %f %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %f %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %f %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("Sparsity_Value %d =============> AVG Sparsity : %f " % (sparse_val,(sparse_val)/total))
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
print(count)
print("="*100)
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
sparse_val = 0
focus_net.eval()
classify.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
alphas, avg_images , _ = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
sparse_val += torch.sum(torch.sum(alphas>0.01,dim=1)).item()
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %f %%' % (
100 * correct / total))
print("total correct", correct)
print("total test set images", total)
print("focus_true_pred_true %d =============> FTPT : %f %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %f %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %f %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %f %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("Sparsity_Value %d =============> AVG Sparsity : %f " % (sparse_val,(sparse_val)/total))
print("argmax_more_than_half ==================> ",argmax_more_than_half)
print("argmax_less_than_half ==================> ",argmax_less_than_half)
correct = 0
total = 0
focus_net.eval()
classify.eval()
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images,_ = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %f %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
correct = 0
total = 0
focus_net.eval()
classify.eval()
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"), labels.to("cuda")
alphas, avg_images,_ = focus_net(inputs)
outputs = classify(avg_images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %f %%' % ( 100 * correct / total))
print("total correct", correct)
print("total test set images", total)
```
|
github_jupyter
|
```
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
# The folder when dumped big 3D array has been stored from previous excercise
data_root = 'D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data'
#a big 3D array to a big file.
pickle_file = 'notMNIST.pickle'
with open(data_root + '\\' + pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# It loads all the data into TensorFlow and build the computation graph corresponding to our training:
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random values following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 10000 #why 801?
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
#it performs the train
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.global_variables_initializer().run()
print('Tensorflow graph initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
#TODO plot graph from accuracy data
# Let's now switch to stochastic gradient descent training instead, which is much faster.
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 10000
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
#TODO measure time
# Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units nn.relu()
# and 1024 hidden nodes. This model should improve your validation / test accuracy.
#Do TODOs
```
|
github_jupyter
|
## Fitting a diagonal covariance Gaussian mixture model to text data
In a previous assignment, we explored k-means clustering for a high-dimensional Wikipedia dataset. We can also model this data with a mixture of Gaussians, though with increasing dimension we run into two important issues associated with using a full covariance matrix for each component.
* Computational cost becomes prohibitive in high dimensions: score calculations have complexity cubic in the number of dimensions M if the Gaussian has a full covariance matrix.
* A model with many parameters require more data: observe that a full covariance matrix for an M-dimensional Gaussian will have M(M+1)/2 parameters to fit. With the number of parameters growing roughly as the square of the dimension, it may quickly become impossible to find a sufficient amount of data to make good inferences.
Both of these issues are avoided if we require the covariance matrix of each component to be diagonal, as then it has only M parameters to fit and the score computation decomposes into M univariate score calculations. Recall from the lecture that the M-step for the full covariance is:
\begin{align*}
\hat{\Sigma}_k &= \frac{1}{N_k^{soft}} \sum_{i=1}^N r_{ik} (x_i-\hat{\mu}_k)(x_i - \hat{\mu}_k)^T
\end{align*}
Note that this is a square matrix with M rows and M columns, and the above equation implies that the (v, w) element is computed by
\begin{align*}
\hat{\Sigma}_{k, v, w} &= \frac{1}{N_k^{soft}} \sum_{i=1}^N r_{ik} (x_{iv}-\hat{\mu}_{kv})(x_{iw} - \hat{\mu}_{kw})
\end{align*}
When we assume that this is a diagonal matrix, then non-diagonal elements are assumed to be zero and we only need to compute each of the M elements along the diagonal independently using the following equation.
\begin{align*}
\hat{\sigma}^2_{k, v} &= \hat{\Sigma}_{k, v, v} \\
&= \frac{1}{N_k^{soft}} \sum_{i=1}^N r_{ik} (x_{iv}-\hat{\mu}_{kv})^2
\end{align*}
In this section, we will use an EM implementation to fit a Gaussian mixture model with **diagonal** covariances to a subset of the Wikipedia dataset. The implementation uses the above equation to compute each variance term.
We'll begin by importing the dataset and coming up with a useful representation for each article. After running our algorithm on the data, we will explore the output to see whether we can give a meaningful interpretation to the fitted parameters in our model.
**Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook.
## Import necessary packages
```
from __future__ import print_function # to conform python 2.x print to python 3.x
import turicreate
```
We also have a Python file containing implementations for several functions that will be used during the course of this assignment.
```
from em_utilities import *
```
## Load Wikipedia data and extract TF-IDF features
Load Wikipedia data and transform each of the first 5000 document into a TF-IDF representation.
```
wiki = turicreate.SFrame('people_wiki.sframe/').head(5000)
wiki['tf_idf'] = turicreate.text_analytics.tf_idf(wiki['text'])
```
Using a utility we provide, we will create a sparse matrix representation of the documents. This is the same utility function you used during the previous assignment on k-means with text data.
```
wiki = wiki.add_row_number()
tf_idf, map_word_to_index = sframe_to_scipy(wiki, 'tf_idf')
map_index_to_word = dict([[map_word_to_index[i], i] for i in map_word_to_index.keys()])
```
As in the previous assignment, we will normalize each document's TF-IDF vector to be a unit vector.
```
%%time
tf_idf = normalize(tf_idf)
```
We can check that the length (Euclidean norm) of each row is now 1.0, as expected.
```
for i in range(5):
doc = tf_idf[i]
print(np.linalg.norm(doc.todense()))
```
## EM in high dimensions
EM for high-dimensional data requires some special treatment:
* E step and M step must be vectorized as much as possible, as explicit loops are dreadfully slow in Python.
* All operations must be cast in terms of sparse matrix operations, to take advantage of computational savings enabled by sparsity of data.
* Initially, some words may be entirely absent from a cluster, causing the M step to produce zero mean and variance for those words. This means any data point with one of those words will have 0 probability of being assigned to that cluster since the cluster allows for no variability (0 variance) around that count being 0 (0 mean). Since there is a small chance for those words to later appear in the cluster, we instead assign a small positive variance (~1e-10). Doing so also prevents numerical overflow.
We provide the complete implementation for you in the file `em_utilities.py`. For those who are interested, you can read through the code to see how the sparse matrix implementation differs from the previous assignment.
You are expected to answer some quiz questions using the results of clustering.
**Initializing mean parameters using k-means**
Recall from the lectures that EM for Gaussian mixtures is very sensitive to the choice of initial means. With a bad initial set of means, EM may produce clusters that span a large area and are mostly overlapping. To eliminate such bad outcomes, we first produce a suitable set of initial means by using the cluster centers from running k-means. That is, we first run k-means and then take the final set of means from the converged solution as the initial means in our EM algorithm.
```
%%time
from sklearn.cluster import KMeans
np.random.seed(5)
num_clusters = 25
# Use scikit-learn's k-means to simplify workflow
#kmeans_model = KMeans(n_clusters=num_clusters, n_init=5, max_iter=400, random_state=1, n_jobs=-1) # uncomment to use parallelism -- may break on your installation
kmeans_model = KMeans(n_clusters=num_clusters, n_init=5, max_iter=400, random_state=1, n_jobs=1)
kmeans_model.fit(tf_idf)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
means = [centroid for centroid in centroids]
```
**Initializing cluster weights**
We will initialize each cluster weight to be the proportion of documents assigned to that cluster by k-means above.
```
%%time
num_docs = tf_idf.shape[0]
weights = []
for i in range(num_clusters):
# Compute the number of data points assigned to cluster i:
num_assigned = np.sum(cluster_assignment == i) # YOUR CODE HERE
w = float(num_assigned) / num_docs
weights.append(w)
np.sum(cluster_assignment == 1)
```
**Initializing covariances**
To initialize our covariance parameters, we compute $\hat{\sigma}_{k, j}^2 = \sum_{i=1}^{N}(x_{i,j} - \hat{\mu}_{k, j})^2$ for each feature $j$. For features with really tiny variances, we assign 1e-8 instead to prevent numerical instability. We do this computation in a vectorized fashion in the following code block.
```
covs = []
for i in range(num_clusters):
member_rows = tf_idf[cluster_assignment==i]
cov = (member_rows.multiply(member_rows) - 2*member_rows.dot(diag(means[i]))).sum(axis=0).A1 / member_rows.shape[0] \
+ means[i]**2
cov[cov < 1e-8] = 1e-8
covs.append(cov)
```
**Running EM**
Now that we have initialized all of our parameters, run EM.
```
out = EM_for_high_dimension(tf_idf, means, covs, weights, cov_smoothing=1e-10)
out['loglik']
```
## Interpret clustering results
In contrast to k-means, EM is able to explicitly model clusters of varying sizes and proportions. The relative magnitude of variances in the word dimensions tell us much about the nature of the clusters.
Write yourself a cluster visualizer as follows. Examining each cluster's mean vector, list the 5 words with the largest mean values (5 most common words in the cluster). For each word, also include the associated variance parameter (diagonal element of the covariance matrix).
A sample output may be:
```
==========================================================
Cluster 0: Largest mean parameters in cluster
Word Mean Variance
football 1.08e-01 8.64e-03
season 5.80e-02 2.93e-03
club 4.48e-02 1.99e-03
league 3.94e-02 1.08e-03
played 3.83e-02 8.45e-04
...
```
```
# Fill in the blanks
def visualize_EM_clusters(tf_idf, means, covs, map_index_to_word):
print('')
print('==========================================================')
num_clusters = len(means)
for c in range(num_clusters):
print('Cluster {0:d}: Largest mean parameters in cluster '.format(c))
print('\n{0: <12}{1: <12}{2: <12}'.format('Word', 'Mean', 'Variance'))
# The k'th element of sorted_word_ids should be the index of the word
# that has the k'th-largest value in the cluster mean. Hint: Use np.argsort().
sorted_word_ids = np.argsort(means[c])[::-1] # YOUR CODE HERE
for i in sorted_word_ids[:5]:
print('{0: <12}{1:<10.2e}{2:10.2e}'.format(map_index_to_word[i],
means[c][i],
covs[c][i]))
print('\n==========================================================')
'''By EM'''
visualize_EM_clusters(tf_idf, out['means'], out['covs'], map_index_to_word)
```
**Quiz Question**. Select all the topics that have a cluster in the model created above. [multiple choice]
- Baseball
- Basketball
- Soccer/Football
- Music
- Politics
- Law
- Finance
## Comparing to random initialization
Create variables for randomly initializing the EM algorithm. Complete the following code block.
```
np.random.seed(5) # See the note below to see why we set seed=5.
num_clusters = len(means)
num_docs, num_words = tf_idf.shape
random_means = []
random_covs = []
random_weights = []
for k in range(num_clusters):
# Create a numpy array of length num_words with random normally distributed values.
# Use the standard univariate normal distribution (mean 0, variance 1).
# YOUR CODE HERE
mean = np.random.normal(0, 1, size=num_words)
# Create a numpy array of length num_words with random values uniformly distributed between 1 and 5.
# YOUR CODE HERE
cov = np.random.uniform(1, 5, size=(num_words))
# Initially give each cluster equal weight.
# YOUR CODE HERE
weight = 1
random_means.append(mean)
random_covs.append(cov)
random_weights.append(weight)
```
**Quiz Question**: Try fitting EM with the random initial parameters you created above. (Use `cov_smoothing=1e-5`.) Store the result to `out_random_init`. What is the final loglikelihood that the algorithm converges to?
```
out_random_init = EM_for_high_dimension(tf_idf, random_means, random_covs, random_weights, cov_smoothing=1e-5)
print("{:e}".format(out_random_init['loglik'][-1]))
```
**Quiz Question:** Is the final loglikelihood larger or smaller than the final loglikelihood we obtained above when initializing EM with the results from running k-means?
```
out['loglik']
```
**Quiz Question**: For the above model, `out_random_init`, use the `visualize_EM_clusters` method you created above. Are the clusters more or less interpretable than the ones found after initializing using k-means?
```
# YOUR CODE HERE. Use visualize_EM_clusters, which will require you to pass in tf_idf and map_index_to_word.
visualize_EM_clusters(tf_idf, out_random_init['means'], out_random_init['covs'], map_index_to_word)
```
**Note**: Random initialization may sometimes produce a superior fit than k-means initialization. We do not claim that random initialization is always worse. However, this section does illustrate that random initialization often produces much worse clustering than k-means counterpart. This is the reason why we provide the particular random seed (`np.random.seed(5)`).
## Takeaway
In this assignment we were able to apply the EM algorithm to a mixture of Gaussians model of text data. This was made possible by modifying the model to assume a diagonal covariance for each cluster, and by modifying the implementation to use a sparse matrix representation. In the second part you explored the role of k-means initialization on the convergence of the model as well as the interpretability of the clusters.
|
github_jupyter
|
# Text Data Explanation Benchmarking: Emotion Multiclass Classification
This notebook demonstrates how to use the benchmark utility to benchmark the performance of an explainer for text data. In this demo, we showcase explanation performance for partition explainer on an Emotion Multiclass Classification model. The metrics used to evaluate are "keep positive" and "keep negative". The masker used is Text Masker.
The new benchmark utility uses the new API with MaskedModel as wrapper around user-imported model and evaluates masked values of inputs.
```
import copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import shap.benchmark as benchmark
import shap
import scipy as sp
import nlp
import torch
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', None)
```
### Load Data and Model
```
train, test = nlp.load_dataset("emotion", split = ["train", "test"])
data={'text':train['text'],
'emotion':train['label']}
data = pd.DataFrame(data)
tokenizer = AutoTokenizer.from_pretrained("nateraw/bert-base-uncased-emotion",use_fast=True)
model = AutoModelForSequenceClassification.from_pretrained("nateraw/bert-base-uncased-emotion")
```
### Class Label Mapping
```
# set mapping between label and id
id2label = model.config.id2label
label2id = model.config.label2id
labels = sorted(label2id, key=label2id.get)
```
### Define Score Function
```
def f(x):
tv = torch.tensor([tokenizer.encode(v, padding='max_length', max_length=128,truncation=True) for v in x])
attention_mask = (tv!=0).type(torch.int64)
outputs = model(tv,attention_mask=attention_mask)[0].detach().numpy()
scores = (np.exp(outputs).T / np.exp(outputs).sum(-1)).T
val = sp.special.logit(scores)
return val
```
### Create Explainer Object
```
explainer = shap.Explainer(f,tokenizer,output_names=labels)
```
### Run SHAP Explanation
```
shap_values = explainer(data['text'][0:20])
```
### Define Metrics (Sort Order & Perturbation Method)
```
sort_order = 'positive'
perturbation = 'keep'
```
### Benchmark Explainer
```
sequential_perturbation = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation)
xs, ys, auc = sequential_perturbation.model_score(shap_values, data['text'][0:20])
sequential_perturbation.plot(xs, ys, auc)
sort_order = 'negative'
perturbation = 'keep'
sequential_perturbation = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation)
xs, ys, auc = sequential_perturbation.model_score(shap_values, data['text'][0:20])
sequential_perturbation.plot(xs, ys, auc)
```
|
github_jupyter
|
# Unsupervised outliers detection (event detection)
```
import drama as drm
import numpy as np
import matplotlib.pylab as plt
from matplotlib import gridspec
from drama.outlier_finder import grid_run_drama
from keras.datasets import mnist
%matplotlib inline
n_try = 5
# MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
image_size = x_train.shape[1]
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
inlier_labels = [2,3,4,6,8]
outlier_labels = [5,0]
n_inliers = 500
n_outliers = 10
X = []
y = []
for i in inlier_labels:
filt = y_train==i
ns = np.sum(filt)
X.extend(x_train[filt][:n_inliers])
y.extend(n_inliers*[0])
for i in outlier_labels:
filt = y_train==i
ns = np.sum(filt)
X.extend(x_train[filt][:n_outliers])
y.extend(n_outliers*[1])
X = np.array(X)
y = np.array(y)
X.shape,y.shape
X = np.reshape(X, [-1, image_size*image_size])
lof_all = np.zeros((n_try,3))
ifr_all = np.zeros((n_try,3))
df = drm.sk_check(X.reshape(-1,784),X.reshape(-1,784),y,[1])
for i in range(n_try):
for j,scr in enumerate(['AUC','MCC','RWS']):
lof_all[i,j] = df[scr][0]
ifr_all[i,j] = df[scr][1]
df
```
# Outlier detection
```
metrics = ['cityblock', 'L2', 'L4', 'braycurtis', 'canberra', 'chebyshev',
'correlation', 'mahalanobis', 'wL2', 'wL4']
drt_list = ['DAE1D', 'DVAE1D']
result = []
for i in range(n_try):
# auc,mcc,rws,conf = grid_run_drama(X,y)
auc,mcc,rws,conf = grid_run_drama(X,y,
drt_list=drt_list,
metrics=metrics,
n_split=2)
arr = np.stack([auc,mcc,rws],axis=-1)
result.append(arr)
result = np.array(result)
drts = np.unique(conf[:,1])
metrs = np.unique(conf[:,2])
res = result.reshape(n_try,len(drt_list),len(metrics),-1)
drm.plot_table(np.mean(res,axis=0),drts,metrs)
auc = np.sum((res[:, :, :, 0].T>lof_all[:, 0]) & (res[:, :, :, 0].T>ifr_all[:, 0]),axis=-1).T
mcc = np.sum((res[:, :, :, 1].T>lof_all[:, 1]) & (res[:, :, :, 1].T>ifr_all[:, 1]),axis=-1).T
rws = np.sum((res[:, :, :, 2].T>lof_all[:, 2]) & (res[:, :, :, 2].T>ifr_all[:, 2]),axis=-1).T
fig = plt.figure(figsize=(20,10))
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect('auto')
ax.imshow(auc, cmap=plt.cm.jet,interpolation='nearest')
width, height = auc.shape
for x in range(width):
for y in range(height):
ax.annotate('AUC: {:d}\n MCC: {:d}\n RWS: {:d}'.format(auc[x][y],mcc[x][y],rws[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',fontsize=18);
plt.xticks(range(len(metrs)),metrs,fontsize=15)
plt.yticks(range(len(drts)), drts,fontsize=15)
plt.title('Number of successes (LOF and i-forest) out of 20 data set',fontsize=25)
plt.annotate('** Colors depend on AUC.', (0,0), (0, -30), xycoords='axes fraction',
textcoords='offset points', va='top',fontsize=15)
# plt.savefig('AND_success.jpg',dpi=150,bbox_inches='tight')
inlier_labels = [1,2,3,4]
outlier_labels = [5]
n_inliers = 1000
n_outliers = 30
X = []
y = []
for i in inlier_labels:
filt = y_train==i
ns = np.sum(filt)
X.extend(x_train[filt][:n_inliers])
y.extend(n_inliers*[0])
for i in outlier_labels:
filt = y_train==i
ns = np.sum(filt)
X.extend(x_train[filt][:n_outliers])
y.extend(n_outliers*[1])
X = np.array(X)
y = np.array(y)
X = np.reshape(X, [-1, image_size*image_size, 1])
metrics = ['cityblock', 'L2', 'L4', 'braycurtis', 'canberra', 'chebyshev',
'correlation', 'mahalanobis', 'wL2', 'wL4']
drt_list = ['CAE1D', 'CVAE1D']
result = []
for i in range(n_try):
# auc,mcc,rws,conf = grid_run_drama(X,y)
auc,mcc,rws,conf = grid_run_drama(X,y,
drt_list=drt_list,
metrics=metrics,
n_split=2)
arr = np.stack([auc,mcc,rws],axis=-1)
result.append(arr)
result = np.array(result)
drts = np.unique(conf[:,1])
metrs = np.unique(conf[:,2])
res = result.reshape(n_try,len(drt_list),len(metrics),-1)
drm.plot_table(np.mean(res,axis=0),drts,metrs)
auc = np.sum((res[:, :, :, 0].T>lof_all[:, 0]) & (res[:, :, :, 0].T>ifr_all[:, 0]),axis=-1).T
mcc = np.sum((res[:, :, :, 1].T>lof_all[:, 1]) & (res[:, :, :, 1].T>ifr_all[:, 1]),axis=-1).T
rws = np.sum((res[:, :, :, 2].T>lof_all[:, 2]) & (res[:, :, :, 2].T>ifr_all[:, 2]),axis=-1).T
fig = plt.figure(figsize=(20,10))
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect('auto')
ax.imshow(auc, cmap=plt.cm.jet,interpolation='nearest')
width, height = auc.shape
for x in range(width):
for y in range(height):
ax.annotate('AUC: {:d}\n MCC: {:d}\n RWS: {:d}'.format(auc[x][y],mcc[x][y],rws[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',fontsize=18);
plt.xticks(range(len(metrs)),metrs,fontsize=15)
plt.yticks(range(len(drts)), drts,fontsize=15)
plt.title('Number of successes (LOF and i-forest) out of 20 data set',fontsize=25)
plt.annotate('** Colors depend on AUC.', (0,0), (0, -30), xycoords='axes fraction',
textcoords='offset points', va='top',fontsize=15)
# plt.savefig('AND_success.jpg',dpi=150,bbox_inches='tight')
inlier_labels = [1,2,3,4]
outlier_labels = [5]
n_inliers = 1000
n_outliers = 30
X = []
y = []
for i in inlier_labels:
filt = y_train==i
ns = np.sum(filt)
X.extend(x_train[filt][:n_inliers])
y.extend(n_inliers*[0])
for i in outlier_labels:
filt = y_train==i
ns = np.sum(filt)
X.extend(x_train[filt][:n_outliers])
y.extend(n_outliers*[1])
X = np.array(X)
y = np.array(y)
X = np.reshape(X, [-1, image_size, image_size, 1])
metrics = ['cityblock', 'L2', 'L4', 'braycurtis', 'canberra', 'chebyshev',
'correlation', 'mahalanobis', 'wL2', 'wL4']
drt_list = ['CAE2D', 'CVAE2D']
result = []
for i in range(n_try):
# auc,mcc,rws,conf = grid_run_drama(X,y)
auc,mcc,rws,conf = grid_run_drama(X,y,
drt_list=drt_list,
metrics=metrics,
n_split=2)
arr = np.stack([auc,mcc,rws],axis=-1)
result.append(arr)
result = np.array(result)
drts = np.unique(conf[:,1])
metrs = np.unique(conf[:,2])
res = result.reshape(n_try,len(drt_list),len(metrics),-1)
drm.plot_table(np.mean(res,axis=0),drts,metrs)
auc = np.sum((res[:, :, :, 0].T>lof_all[:, 0]) & (res[:, :, :, 0].T>ifr_all[:, 0]),axis=-1).T
mcc = np.sum((res[:, :, :, 1].T>lof_all[:, 1]) & (res[:, :, :, 1].T>ifr_all[:, 1]),axis=-1).T
rws = np.sum((res[:, :, :, 2].T>lof_all[:, 2]) & (res[:, :, :, 2].T>ifr_all[:, 2]),axis=-1).T
fig = plt.figure(figsize=(20,10))
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect('auto')
ax.imshow(auc, cmap=plt.cm.jet,interpolation='nearest')
width, height = auc.shape
for x in range(width):
for y in range(height):
ax.annotate('AUC: {:d}\n MCC: {:d}\n RWS: {:d}'.format(auc[x][y],mcc[x][y],rws[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',fontsize=18);
plt.xticks(range(len(metrs)),metrs,fontsize=15)
plt.yticks(range(len(drts)), drts,fontsize=15)
plt.title('Number of successes (LOF and i-forest) out of 20 data set',fontsize=25)
plt.annotate('** Colors depend on AUC.', (0,0), (0, -30), xycoords='axes fraction',
textcoords='offset points', va='top',fontsize=15)
# plt.savefig('AND_success.jpg',dpi=150,bbox_inches='tight')
```
|
github_jupyter
|
```
# Import conventions we'll be using here. See Part 1
import matplotlib
# matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import numpy as np
```
# Limits, Legends, and Layouts
In this section, we'll focus on what happens around the edges of the axes: Ticks, ticklabels, limits, layouts, and legends.
# Limits and autoscaling
By default, Matplotlib will attempt to determine limits for you that encompasses all the data you have plotted. This is the "autoscale" feature. For image plots, the limits are not padded while plots such as scatter plots and bar plots are given some padding.
```
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=plt.figaspect(0.5))
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
plt.show()
```
### `ax.margins(...)`
If you'd like to add a bit of "padding" to a plot, `ax.margins(<some_small_fraction>)` is a very handy way to do so. Instead of choosing "even-ish" numbers as min/max ranges for each axis, `margins` will make Matplotlib calculate the min/max of each axis by taking the range of the data and adding on a fractional amount of padding.
As an example:
```
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plt.figaspect(0.5))
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax1.margins(x=0.0, y=0.1) # 10% padding in the y-direction only
ax2.margins(0.05) # 5% padding in all directions
plt.show()
```
### `ax.axis(...)`
The `ax.axis(...)` method is a convienent way of controlling the axes limits and enabling/disabling autoscaling.
If you ever need to get all of the current plot limits, calling `ax.axis()` with no arguments will return the xmin/max/etc:
xmin, xmax, ymin, ymax = ax.axis()
If you'd like to manually set all of the x/y limits at once, you can use `ax.axis` for this, as well (note that we're calling it with a single argument that's a sequence, not 4 individual arguments):
ax.axis([xmin, xmax, ymin, ymax])
However, you'll probably use `axis` mostly with either the `"tight"` or `"equal"` options. There are other options as well; see the documentation for full details. In a nutshell, though:
* *tight*: Set axes limits to the exact range of the data
* *equal*: Set axes scales such that one cm/inch in the y-direction is the same as one cm/inch in the x-direction. In Matplotlib terms, this sets the aspect ratio of the plot to 1. That _doesn't_ mean that the axes "box" will be square.
And as an example:
```
fig, axes = plt.subplots(nrows=3)
for ax in axes:
ax.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
axes[0].set_title('Normal Autoscaling', y=0.7, x=0.8)
axes[1].set_title('ax.axis("tight")', y=0.7, x=0.8)
axes[1].axis('tight')
axes[2].set_title('ax.axis("equal")', y=0.7, x=0.8)
axes[2].axis('equal')
plt.show()
```
### Manually setting only one limit
Another trick with limits is to specify only half of a limit. When done **after** a plot is made, this has the effect of allowing the user to anchor a limit while letting Matplotlib autoscale the rest of it.
```
# Good -- setting limits after plotting is done
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plt.figaspect(0.5))
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax1.set_ylim(bottom=-10)
ax2.set_xlim(right=25)
plt.show()
# Bad -- Setting limits before plotting is done
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plt.figaspect(0.5))
ax1.set_ylim(bottom=-10)
ax2.set_xlim(right=25)
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
plt.show()
```
# Legends
As you've seen in some of the examples so far, the X and Y axis can also be labeled, as well as the subplot itself via the title.
However, another thing you can label is the line/point/bar/etc that you plot. You can provide a label to your plot, which allows your legend to automatically build itself.
```
fig, ax = plt.subplots()
ax.plot([1, 2, 3, 4], [10, 20, 25, 30], label='Philadelphia')
ax.plot([1, 2, 3, 4], [30, 23, 13, 4], label='Boston')
ax.set(ylabel='Temperature (deg C)', xlabel='Time', title='A tale of two cities')
ax.legend()
plt.show()
```
In `classic` mode, legends will go in the upper right corner by default (you can control this with the `loc` kwarg). As of v2.0, by default Matplotlib will choose a location to avoid overlapping plot elements as much as possible. To force this option, you can pass in:
ax.legend(loc="best")
Also, if you happen to be plotting something that you do not want to appear in the legend, just set the label to "\_nolegend\_".
```
fig, ax = plt.subplots(1, 1)
ax.bar([1, 2, 3, 4], [10, 20, 25, 30], label="Foobar", align='center', color='lightblue')
ax.plot([1, 2, 3, 4], [10, 20, 25, 30], label="_nolegend_", marker='o', color='darkred')
ax.legend(loc='best')
plt.show()
```
# Exercise 4.1
Once again, let's use a bit of what we've learned. Try to reproduce the following figure:
<img src="images/exercise_4-1.png">
Hint: You'll need to combine `ax.axis(...)` and `ax.margins(...)`. Here's the data and some code to get you started:
```
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(0, 2 * np.pi, 150)
x1, y1 = np.cos(t), np.sin(t)
x2, y2 = 2 * x1, 2 * y1
colors = ['darkred', 'darkgreen']
# Try to plot the two circles, scale the axes as shown and add a legend
# Hint: it's easiest to combine `ax.axis(...)` and `ax.margins(...)` to scale the axes
%load solutions/4.1-legends_and_scaling.py
```
# Dealing with the boundaries: Layout, ticks, spines, etc
One key thing we haven't talked about yet is all of the annotation on the outside of the axes, the borders of the axes, and how to adjust the amount of space around the axes. We won't go over every detail, but this next section should give you a reasonable working knowledge of how to configure what happens around the edges of your axes.
## Ticks, Tick Lines, Tick Labels and Tickers
This is a constant source of confusion:
* A Tick is the *location* of a Tick Label.
* A Tick Line is the line that denotes the location of the tick.
* A Tick Label is the text that is displayed at that tick.
* A [`Ticker`](http://matplotlib.org/api/ticker_api.html#module-matplotlib.ticker) automatically determines the ticks for an Axis and formats the tick labels.
[`tick_params()`](https://matplotlib.org/api/axes_api.html#ticks-and-tick-labels) is often used to help configure your tickers.
```
fig, ax = plt.subplots()
ax.plot([1, 2, 3, 4], [10, 20, 25, 30])
# Manually set ticks and tick labels *on the x-axis* (note ax.xaxis.set, not ax.set!)
ax.xaxis.set(ticks=range(1, 5), ticklabels=[3, 100, -12, "foo"])
# Make the y-ticks a bit longer and go both in and out...
ax.tick_params(axis='y', direction='inout', length=10)
plt.show()
```
A commonly-asked question is "How do I plot categories?"
Starting in version 2.0 of mpl, just like any other data.
For example:
```
data = [('apples', 2), ('oranges', 3), ('peaches', 1)]
fruit, value = zip(*data)
fig, ax = plt.subplots()
ax.bar(fruit, value, align='center', color='gray')
plt.show()
```
## Subplot Spacing
The spacing between the subplots can be adjusted using [`fig.subplots_adjust()`](http://matplotlib.org/api/pyplot_api.html?#matplotlib.pyplot.subplots_adjust). Play around with the example below to see how the different arguments affect the spacing.
```
fig, axes = plt.subplots(2, 2, figsize=(9, 9))
fig.subplots_adjust(wspace=0.5, hspace=0.3,
left=0.125, right=0.9,
top=0.9, bottom=0.1)
plt.show()
```
A common "gotcha" is that the labels are not automatically adjusted to avoid overlapping those of another subplot. Matplotlib does not currently have any sort of robust layout engine, as it is a design decision to minimize the amount of "magical plotting". We intend to let users have complete, 100% control over their plots. LaTeX users would be quite familiar with the amount of frustration that can occur with automatic placement of figures in their documents.
That said, there have been some efforts to develop tools that users can use to help address the most common compaints. The "[Tight Layout](http://matplotlib.org/users/tight_layout_guide.html)" feature, when invoked, will attempt to resize margins and subplots so that nothing overlaps.
If you have multiple subplots, and want to avoid overlapping titles/axis labels/etc, `fig.tight_layout` is a great way to do so:
```
def example_plot(ax):
ax.plot([1, 2])
ax.set_xlabel('x-label', fontsize=16)
ax.set_ylabel('y-label', fontsize=8)
ax.set_title('Title', fontsize=24)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
# Enable fig.tight_layout to compare...
fig.tight_layout()
plt.show()
```
## GridSpec
Under the hood, Matplotlib utilizes [`GridSpec`](http://matplotlib.org/api/gridspec_api.html) to lay out the subplots. While `plt.subplots()` is fine for simple cases, sometimes you will need more advanced subplot layouts. In such cases, you should use GridSpec directly. GridSpec is outside the scope of this tutorial, but it is handy to know that it exists. [Here](http://matplotlib.org/users/gridspec.html) is a guide on how to use it.
## Sharing axes
There will be times when you want to have the x axis and/or the y axis of your subplots to be "shared". Sharing an axis means that the axis in one or more subplots will be tied together such that any change in one of the axis changes all of the other shared axes. This works very nicely with autoscaling arbitrary datasets that may have overlapping domains. Furthermore, when interacting with the plots (panning and zooming), all of the shared axes will pan and zoom automatically.
```
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
ax1.plot([1, 2, 3, 4], [1, 2, 3, 4])
ax2.plot([3, 4, 5, 6], [6, 5, 4, 3])
plt.show()
```
## "Twinning" axes
Sometimes one may want to overlay two plots on the same axes, but the scales may be entirely different. You can simply treat them as separate plots, but then twin them.
```
fig, ax1 = plt.subplots(1, 1)
ax1.plot([1, 2, 3, 4], [1, 2, 3, 4])
ax2 = ax1.twinx()
ax2.scatter([1, 2, 3, 4], [60, 50, 40, 30])
ax1.set(xlabel='X', ylabel='First scale')
ax2.set(ylabel='Other scale')
plt.show()
```
# Axis Spines
Spines are the axis lines for a plot. Each plot can have four spines: "top", "bottom", "left" and "right". By default, they are set so that they frame the plot, but they can be individually positioned and configured via the [`set_position()`](http://matplotlib.org/api/spines_api.html#matplotlib.spines.Spine.set_position) method of the spine. Here are some different configurations.
```
fig, ax = plt.subplots()
ax.plot([-2, 2, 3, 4], [-10, 20, 25, 5])
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom') # no ticklines at the top
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left') # no ticklines on the right
# "outward"
# Move the two remaining spines "out" away from the plot by 10 points
#ax.spines['bottom'].set_position(('outward', 10))
#ax.spines['left'].set_position(('outward', 10))
# "data"
# Have the spines stay intersected at (0,0)
#ax.spines['bottom'].set_position(('data', 0))
#ax.spines['left'].set_position(('data', 0))
# "axes"
# Have the two remaining spines placed at a fraction of the axes
#ax.spines['bottom'].set_position(('axes', 0.75))
#ax.spines['left'].set_position(('axes', 0.3))
plt.show()
```
# Exercise 4.2
This one is a bit trickier. Once again, try to reproduce the figure below:
<img src="images/exercise_4-2.png">
A few key hints: The two subplots have no vertical space between them (this means that the `hspace` is `0`). Note that the bottom spine is at 0 in data coordinates and the tick lines are missing from the right and top sides.
Because you're going to be doing a lot of the same things to both subplots, to avoid repitive code you might consider writing a function that takes an `Axes` object and makes the spine changes, etc to it.
```
import matplotlib.pyplot as plt
import numpy as np
# Try to reproduce the figure shown in images/exercise_4.2.png
# This one is a bit trickier!
# Here's the data...
data = [('dogs', 4, 4), ('frogs', -3, 1), ('cats', 1, 5), ('goldfish', -2, 2)]
animals, friendliness, popularity = zip(*data)
%load solutions/4.2-spines_ticks_and_subplot_spacing.py
```
|
github_jupyter
|
## QE methods and QE_utils
In this tutorial, we will explore various methods needed to handle Quantum Espresso (QE) calculations - to run them, prepare input, and extract output. All that will be done with the help of the **QE_methods** and **QE_utils** modules, which contains the following functions:
**QE_methods**
* cryst2cart(a1,a2,a3,r)
* [Topic 2](#topic-2) read_qe_schema(filename, verbose=0)
* [Topic 3](#topic-3) read_qe_index(filename, orb_list, verbose=0)
* [Topic 4](#topic-4) read_qe_wfc_info(filename, verbose=0)
* [Topic 9](#topic-9) read_qe_wfc_grid(filename, verbose=0)
* [Topic 5](#topic-5) read_qe_wfc(filename, orb_list, verbose=0)
* read_md_data(filename)
* read_md_data_xyz(filename, PT, dt)
* read_md_data_xyz2(filename, PT)
* read_md_data_cell(filename)
* out2inp(out_filename,templ_filename,wd,prefix,t0,tmax,dt)
* out2pdb(out_filename,T,dt,pdb_prefix)
* out2xyz(out_filename,T,dt,xyz_filename)
* xyz2inp(out_filename,templ_filename,wd,prefix,t0,tmax,dt)
* get_QE_normal_modes(filename, verbosity=0)
* [Topic 1](#topic-1) run_qe(params, t, dirname0, dirname1)
* read_info(params)
* read_all(params)
* read_wfc_grid(params)
**QE_utils**
* get_value(params,key,default,typ)
* split_orbitals_energies(C, E)
* [Topic 7](#topic-7) merge_orbitals(Ca, Cb)
* post_process(coeff, ene, issoc)
* [Topic 6](#topic-6) orthogonalize_orbitals(C)
* [Topic 8](#topic-8) orthogonalize_orbitals2(Ca,Cb)
```
import os
import sys
import math
import copy
if sys.platform=="cygwin":
from cyglibra_core import *
elif sys.platform=="linux" or sys.platform=="linux2":
from liblibra_core import *
#from libra_py import *
from libra_py import units
from libra_py import QE_methods
from libra_py import QE_utils
from libra_py import scan
from libra_py import hpc_utils
from libra_py import data_read
from libra_py import data_outs
from libra_py import data_conv
from libra_py.workflows.nbra import step2
import py3Dmol # molecular visualization
import matplotlib.pyplot as plt # plots
%matplotlib inline
plt.rc('axes', titlesize=24) # fontsize of the axes title
plt.rc('axes', labelsize=20) # fontsize of the x and y labels
plt.rc('legend', fontsize=20) # legend fontsize
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
plt.rc('figure.subplot', left=0.2)
plt.rc('figure.subplot', right=0.95)
plt.rc('figure.subplot', bottom=0.13)
plt.rc('figure.subplot', top=0.88)
colors = {}
colors.update({"11": "#8b1a0e"}) # red
colors.update({"12": "#FF4500"}) # orangered
colors.update({"13": "#B22222"}) # firebrick
colors.update({"14": "#DC143C"}) # crimson
colors.update({"21": "#5e9c36"}) # green
colors.update({"22": "#006400"}) # darkgreen
colors.update({"23": "#228B22"}) # forestgreen
colors.update({"24": "#808000"}) # olive
colors.update({"31": "#8A2BE2"}) # blueviolet
colors.update({"32": "#00008B"}) # darkblue
colors.update({"41": "#2F4F4F"}) # darkslategray
clrs_index = ["11", "21", "31", "41", "12", "22", "32", "13","23", "14", "24"]
```
First, lets prepare the working directories and run simple SCF calculations to generate the output files
```
PWSCF = os.environ['PWSCF62']
# Setup the calculations
params = {}
# I run the calculations on laptop, so no BATCH system
params["BATCH_SYSTEM"] = None
# The number of processors to use
params["NP"] = 1
# The QE executable
params["EXE"] = F"{PWSCF}/pw.x"
# The executable to generate the wavefunction files
params["EXE_EXPORT"] = F"{PWSCF}/pw_export.x" #"/mnt/c/cygwin/home/Alexey-user/Soft/espresso/bin/pw_export.x"
# The type of the calculations to be performed - in this case only a single SCF with spin-polarization
params["nac_method"] = 1
# The prefix of the input file
params["prefix0"] = "x0.scf"
# Working directory - where all stuff happen
params["wd"] = os.getcwd()+"/wd"
# Remove the previous results and temporary working directory from the previous runs
os.system(F"rm -r {params['wd']}")
os.system(F"mkdir {params['wd']}")
# Copy the input files into the working directory
# also, notice how the SCF input file name has been changed
os.system(F"cp x0.scf.in {params['wd']}/x0.scf.0.in")
os.system(F"cp x0.exp.in {params['wd']}")
os.system(F"cp Li.pbe-sl-kjpaw_psl.1.0.0.UPF {params['wd']}")
os.system(F"cp H.pbe-rrkjus_psl.1.0.0.UPF {params['wd']}")
```
<a name="topic-1"></a>
### 1. run_qe(params, t, dirname0, dirname1)
Use it to actually run the calculations
Comment this out if you have already done the calculations
```
help(QE_methods.run_qe)
!pwd
os.chdir("wd")
QE_methods.run_qe(params, 0, "res", "res2")
os.chdir("../")
```
<a name="topic-2"></a>
### 2. read_qe_schema(filename, verbose=0)
Can be used to read the information about the completed run
```
pwd
info = QE_methods.read_qe_schema("wd/res/x0.save/data-file-schema.xml", verbose=0)
print(info)
nat = info["nat"]
R, F = info["coords"], info["forces"]
for at in range(nat):
print(F"Atom {at} \t {info['atom_labels'][at]} \t\
x={R.get(3*at+0):.5f}, y={R.get(3*at+1):.5f}, z={R.get(3*at+2):.5f}\
fx={F.get(3*at+0):.5f}, fy={F.get(3*at+1):.5f}, fz={F.get(3*at+2):.5f}")
```
<a name="topic-3"></a>
### 3. read_qe_index(filename, orb_list, verbose=0)
Is analogous to **read_qe_schema** in many regards, it just extracts a bit different info, including orbital energies.
One would also need to specify which energy levels we want to extract, so one would need that info beforehands.
In this example, we have just 4 electrons, so:
1 - HOMO-1
2 - HOMO
3 - LUMO
4 - LUMO+1
Lets try just the 4 orbitals
```
info2, all_e = QE_methods.read_qe_index("wd/res/x0.export/index.xml", [1,2,3,4], verbose=1)
print( info2)
print(all_e)
e_alp = all_e[0]
e_bet = all_e[1]
for i in range(4):
print(F"E_{i}^alpha = {e_alp.get(i,i).real:12.8f} \t E_{i}^beta = {e_bet.get(i,i).real:12.8f}")
```
<a name="topic-4"></a>
### 4. read_qe_wfc_info(filename, verbose=0)
Can be used to extract some descriptors of the wavefunctions produced
```
wfc_info1 = QE_methods.read_qe_wfc_info("wd/res/x0.export/wfc.1", verbose=1)
wfc_info2 = QE_methods.read_qe_wfc_info("wd/res/x0.export/wfc.2", verbose=1)
print(wfc_info1)
print(wfc_info2)
```
<a name="topic-5"></a>
### 5. read_qe_wfc(filename, orb_list, verbose=0)
Can be used to read in the actual wavefunctions produced
```
alpha = QE_methods.read_qe_wfc("wd/res/x0.export/wfc.1", [1,2,3,4], verbose=0)
beta = QE_methods.read_qe_wfc("wd/res/x0.export/wfc.2", [1,2,3,4], verbose=0)
print(alpha)
print(alpha.num_of_rows, alpha.num_of_cols)
print(beta)
print(beta.num_of_rows, beta.num_of_cols)
```
Orthogonality and normalization
Below we can see that MO overlaps <alpha(i)|alpha(j)> are almost orthonormal - the diagonal elements are coorectly 1.0
But the off-diagonal elements are not quite 0.0
Same is true for <beta(i)|beta(j)>
However, there is no any expectation about the orthogonality or normalization across the two sets
```
S_aa = alpha.H() * alpha
S_bb = beta.H() * beta
S_ab = alpha.H() * beta
def print_mat(X):
nr, nc = X.num_of_rows, X.num_of_cols
for i in range(nr):
line = ""
for j in range(nc):
line = line + "%8.5f " % (X.get(i,j).real)
print(line)
print("S_aa")
print_mat(S_aa)
print("S_bb")
print_mat(S_bb)
print("S_ab")
print_mat(S_ab)
```
<a name="topic-6"></a>
### 6. QE_utils.orthogonalize_orbitals(C)
Can be used to orthogonalize orbitals if they are not.
So lets transform alpha and beta orbitals such they are now orthonormal within each set.
The resulting orbitals are not orthonormal across the two sets still
```
alp = QE_utils.orthogonalize_orbitals(alpha)
bet = QE_utils.orthogonalize_orbitals(beta)
S_aa = alp.H() * alp
S_bb = bet.H() * bet
S_ab = alp.H() * bet
print("S_aa")
print_mat(S_aa)
print("S_bb")
print_mat(S_bb)
print("S_ab")
print_mat(S_ab)
```
<a name="topic-7"></a>
### 7. QE_utils.merge_orbitals(Ca, Cb)
Sometimes (usually in the non-collinear case), we want to have a single set of orbitals (many are nearly doubly degenerate), not just alpha and beta components. We can prepare the single set from the spinor components using this function. In this example, we just gonna mimic non-collinear SOC calculations, pretending that alpha and beta orbital sets are the spinor components.
```
C = QE_utils.merge_orbitals(alpha, beta)
S = C.H() * C
print_mat(S)
```
<a name="topic-8"></a>
### 8. QE_utils.orthogonalize_orbitals2(Ca, Cb)
This is a special orthogonalization procedure - the one for 2-component spinors. The inputs are assumed to be the components for each orbital. The orthogonalization works such that it is S_aa + S_bb = I
```
alpha = QE_methods.read_qe_wfc("wd/res/x0.export/wfc.1", [1,2,3,4], verbose=0)
beta = QE_methods.read_qe_wfc("wd/res/x0.export/wfc.2", [1,2,3,4], verbose=0)
alp, bet = QE_utils.orthogonalize_orbitals2(alpha, beta)
S_aa = alp.H() * alp
S_bb = bet.H() * bet
print("S_aa")
print_mat(S_aa)
print("S_bb")
print_mat(S_bb)
print("S_aa + S_bb")
print_mat(S_aa + S_bb)
S_ab = alp.H() * bet
print("S_ab")
print_mat(S_ab)
```
<a name="topic-9"></a>
### 9. read_qe_wfc_grid(filename, verbose=0)
Can be used to read the grid points for the given PW representation.
```
G1 = QE_methods.read_qe_wfc_grid("wd/res/x0.export/grid.1", verbose=0)
print(len(G1))
for i in range(10):
print(F"{i} \t {G1[i].x} \t {G1[i].y} \t {G1[i].z}")
```
|
github_jupyter
|
# Naive-Bayes Classifier
```
#Baseline SVM with PCA classifier
import sklearn
import numpy as np
import sklearn.datasets as skd
import ast
from sklearn.feature_extraction import DictVectorizer
from sklearn import linear_model
from sklearn import naive_bayes
from sklearn.metrics import precision_recall_fscore_support
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import confusion_matrix
from scipy.sparse import vstack
import matplotlib.pyplot as plt
import itertools
import pickle
file = open("mr_train.obj",'rb')
mr_train = pickle.load(file)
file.close()
file = open("mr_test.obj",'rb')
mr_test = pickle.load(file)
file.close()
file = open("mr_cv.obj",'rb')
mr_cv = pickle.load(file)
file.close()
'''
file = open("b_train.obj",'rb')
b = pickle.load(file)
file.close()
file = open("c_cv.obj",'rb')
c = pickle.load(file)
file.close()
file = open("d_test.obj",'rb')
d = pickle.load(file)
file.close()
'''
file = open("x_train.obj",'rb')
x_train = pickle.load(file)
file.close()
file = open("x_test.obj",'rb')
x_test = pickle.load(file)
file.close()
file = open("x_cv.obj",'rb')
x_cv = pickle.load(file)
file.close()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.figure(figsize=(20,10))
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45,fontsize=10)
plt.yticks(tick_marks, classes,fontsize=10)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
print("Training data has %d malware samples and %d features" % (x_train.shape[0], x_train.shape[1]))
print("Crossval data has %d malware samples and %d features" % (x_cv.shape[0], x_cv.shape[1]))
print("Test data has %d malware samples and %d features" % (x_test.shape[0], x_test.shape[1]))
print("Performing IG Feature selection...")
indices=np.argsort(np.asarray(x_train.sum(axis=0)).ravel(),axis=0)[::-1]
x_train_ig = x_train[:,indices]
x_cv_ig = x_cv[:,indices]
x_test_ig = x_test[:,indices]
print("Training NB Classifier with top 5000 IG features ...")
NB = sklearn.naive_bayes.MultinomialNB()
NB.fit(x_train_ig,mr_train.target)
print("Obtaining predictions on test data...")
y_pred_cv=NB.predict(x_cv_ig)
y_pred_test=NB.predict(x_test_ig)
prec_cv, rec_cv, fsc_cv, sup_cv = precision_recall_fscore_support(mr_cv.target, y_pred_cv, average='weighted')
prec_test, rec_test, fsc_test, sup_test = precision_recall_fscore_support(mr_test.target, y_pred_test, average='weighted')
print("Precision on crossval data is %.4f" % prec_cv)
print("Recall on crossval data is %.4f" % rec_cv)
print("Precision on test data is %.4f" % prec_test)
print("Recall on test data is %.4f" % rec_test)
#Distance measure to class centroids
#Confusion Matrices
print("Finding class centroids and computing distance of samples to centroids")
clf = NearestCentroid()
clf.fit(x_train_ig,mr_train.target)
dist_train = pairwise_distances(x_train_ig, clf.centroids_)
dist_test = pairwise_distances(x_test_ig, clf.centroids_)
print("Calculating drift_l2 thresholds...")
m = np.resize(np.array([]),8)
var = np.resize(np.array([]),8)
thresh = np.resize(np.array([]),8)
for i in range(8):
m[i] = np.mean(dist_train[np.where(np.argmin(dist_train,axis=1)==i)][:,i])
var[i] = np.sqrt(np.std(dist_train[np.where(np.argmin(dist_train,axis=1)==i)][:,i]))
thresh[i] = m[i]+var[i]
test_drift_l2 = np.resize(np.array([]),8)
test_total = np.resize(np.array([]),8)
test_d_per = np.resize(np.array([]),8)
"Calculating drift_l2 on test data with new classes..."
for r in range(8):
test_drift_l2[r]=sum(dist_test[np.where(np.argmin(dist_test,axis=1)==r)][:,r] > thresh[r])
test_total[r]= sum(np.argmin(dist_test,axis=1)==r)
if test_total[r]!=0:
test_d_per[r]=test_drift_l2[r]/test_total[r]
else:
test_d_per[r]='nan'
print("In test set there are %d drift_l2ed malware of a total of %d samples, total drift_l2 percentage is %.4f" % (sum(test_drift_l2), sum(test_total), sum(test_drift_l2)/sum(test_total)))
print("Selecting drift_l2ed malware samples from test set...")
ind_array_test = np.array([])
indices_test = np.array([])
for i in range(8):
ind_array_test = np.where(np.argmin(dist_test,axis=1)==i)
indices_test = np.append(indices_test,ind_array_test[0][dist_test[np.where(np.argmin(dist_test,axis=1)==i)][:,i] > thresh[i]])
print("Appending drift_l2ed malware samples from test set to training set, and re-labelling...")
x_train_drift_l2 = vstack([x_train_ig,x_test_ig[indices_test.astype(int)]])
mr_train_drift_l2_target = np.append(mr_train.target,mr_test.target[indices_test.astype(int)],axis=0)
print("Training drift_l2-Aware SVM classifier with new training set...")
NB_drift_l2 = sklearn.naive_bayes.MultinomialNB()
NB_drift_l2.fit(x_train_drift_l2,mr_train_drift_l2_target)
print("Computing predictions on test data with newly trained model...")
y_drift_l2 = NB_drift_l2.predict(x_test_ig)
prec_drift_l2, rec_drift_l2, fsc_drift_l2, sup_drift_l2 = precision_recall_fscore_support(mr_test.target,y_drift_l2, average='weighted')
print("Precision on test data with new classes with original model was %.4f" %prec_test)
print("Recall on test data with new classes with original model was %.4f" %rec_test)
print("Precision on test data with new classes with concept drift_l2-aware model %.4f" %prec_drift_l2)
print("Recall on test data with new classes with concept drift_l2-aware model %.4f" %rec_drift_l2)
# Computing intersections of flagged samples and actual new class samples for l2 centroid drift
intersection = np.intersect1d(np.sort(indices_test),np.flatnonzero(mr_test.target > 7))
print(intersection.shape)
print("Precision with mean + 1std threshold is %.4f, recall is %.4f, flagged samples are %d, percentage of samples flagged is %.4f and percentage of flagged samples that actually belong to new families is %.4f" % (prec_drift_l2,rec_drift_l2,len(indices_test),np.double(len(indices_test))/np.double(len(mr_test.data)),np.double(intersection.shape[0])/np.double(np.flatnonzero(mr_test.target > 7).size)))
#Confusion Matrices
plt.figure(figsize=(50,30))
print("Computing confusion matrices...")
cnf_matrix_cv = confusion_matrix(mr_cv.target, y_pred_cv)
cnf_matrix_test = confusion_matrix(mr_test.target,y_pred_test)
cnf_matrix_drift_l2 = confusion_matrix(mr_test.target,y_drift_l2)
print("Plotting confusion matrix for crossvalidation data")
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cnf_matrix_cv, classes=mr_cv.target_names,
title='Confusion matrix, without normalization, crossval data')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix_cv, classes=mr_cv.target_names, normalize=True,
title='Normalized confusion matrix, crossval data')
plt.show()
print("Plotting confusion matrix for test data in base model")
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cnf_matrix_test, classes=mr_test.target_names,
title='Confusion matrix, without normalization, test data')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix_test, classes=mr_test.target_names, normalize=True,
title='Normalized confusion matrix, test data')
plt.show()
print("Plotting confusion matrix for test data in drift-aware l2 model")
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cnf_matrix_drift_l2, classes=mr_test.target_names,
title='Confusion matrix, without normalization, l2 model')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix_drift_l2, classes=mr_test.target_names, normalize=True,
title='Normalized confusion matrix, l2 model')
plt.show()
from sklearn.metrics import confusion_matrix
C = confusion_matrix(mr_test.target_names,cnf_matrix_drift_l2)
# Normalize the confusion matrix
Csum = np.sum(C,1)
C = C / Csum[None,:]
# Print the confusion matrix
print(np.array_str(C, precision=3, suppress_small=True))
plt.imshow(C, interpolation='none')
plt.colorbar()
```
|
github_jupyter
|
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TensorFlow Addons 画像 : 操作
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/addons/tutorials/image_ops"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/addons/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/addons/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示{</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/addons/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード/a0}</a></td>
</table>
## 概要
このノートブックでは、TensorFlow Addons で画像操作を使用する方法をいくつか紹介します。
この例では、以下の画像操作について説明します。
- `tfa.image.mean_filter2d`
- `tfa.image.rotate`
- `tfa.image.transform`
- `tfa.image.random_hsv_in_yiq`
- `tfa.image.adjust_hsv_in_yiq`
- `tfa.image.dense_image_warp`
- `tfa.image.euclidean_dist_transform`
# セットアップ
```
!pip install -U tensorflow-addons
import tensorflow as tf
import numpy as np
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
```
# 画像を準備して検査する
## 画像をダウンロードする
```
img_path = tf.keras.utils.get_file('tensorflow.png','https://tensorflow.org/images/tf_logo.png')
```
## 画像を検査する
### TensorFlow のアイコン
```
img_raw = tf.io.read_file(img_path)
img = tf.io.decode_image(img_raw)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, [500,500])
plt.title("TensorFlow Logo with shape {}".format(img.shape))
_ = plt.imshow(img)
```
### 白黒バージョンを作成する
```
bw_img = 1.0 - tf.image.rgb_to_grayscale(img)
plt.title("Mask image with shape {}".format(bw_img.shape))
_ = plt.imshow(bw_img[...,0], cmap='gray')
```
# tfa.image を使って遊ぶ
## 平均フィルタリング
平均フィルタリングはフィルタリング技術の 1 つで、画像や信号のノイズ除去によく使用されます。この考え方は、画像をピクセル単位で処理し、隣接するピクセルの平均値で置き換えるというものです。
```
mean = tfa.image.mean_filter2d(img, filter_shape=11)
_ = plt.imshow(mean)
```
## 回転
この操作は、特定の画像をユーザーが入力した角度(ラジアン単位)に回転させます。
```
rotate = tfa.image.rotate(img, tf.constant(np.pi/8))
_ = plt.imshow(rotate)
```
## 変換
この操作は、特定の画像をユーザーが指定した変換ベクトルに基づいて変換します。
```
transform = tfa.image.transform(img, [1.0, 1.0, -250, 0.0, 1.0, 0.0, 0.0, 0.0])
_ = plt.imshow(transform)
```
## YIQ でランダムに HSV 変換する
この操作は、特定の RGB 画像のカラースケールを YIQ に変更しますが、ここではデルタ色相と彩度の値を指定された範囲からランダムに選択します。
```
delta = 0.5
lower_saturation = 0.1
upper_saturation = 0.9
lower_value = 0.2
upper_value = 0.8
rand_hsvinyiq = tfa.image.random_hsv_in_yiq(img, delta, lower_saturation, upper_saturation, lower_value, upper_value)
_ = plt.imshow(rand_hsvinyiq)
```
## YIQ で HSV を調整する
この操作は、特定の RGB 画像のカラースケールを YIQ に変更しますが、ここではランダムに選択するのではなく、デルタ色相と彩度の値はユーザーの入力値です。
```
delta = 0.5
saturation = 0.3
value = 0.6
adj_hsvinyiq = tfa.image.adjust_hsv_in_yiq(img, delta, saturation, value)
_ = plt.imshow(adj_hsvinyiq)
```
## 高密度画像ワープ
この操作は、オフセットベクトルのフローフィールドで指定された任意の画像の非線形ワープを行います(例えば、ここではランダムな値を使用します)。
```
input_img = tf.image.convert_image_dtype(tf.expand_dims(img, 0), tf.dtypes.float32)
flow_shape = [1, input_img.shape[1], input_img.shape[2], 2]
init_flows = np.float32(np.random.normal(size=flow_shape) * 2.0)
dense_img_warp = tfa.image.dense_image_warp(input_img, init_flows)
dense_img_warp = tf.squeeze(dense_img_warp, 0)
_ = plt.imshow(dense_img_warp)
```
## ユークリッド距離変換
この操作は、前景ピクセルから背景ピクセルまでのピクセル値をユークリッド距離で更新します。
- 注意: これは二値化画像のみを受け取り、結果は変換された画像になります。異なる画像を指定した場合は、結果は単一の値の画像になります。
```
gray = tf.image.convert_image_dtype(bw_img,tf.uint8)
# The op expects a batch of images, so add a batch dimension
gray = tf.expand_dims(gray, 0)
eucid = tfa.image.euclidean_dist_transform(gray)
eucid = tf.squeeze(eucid, (0, -1))
_ = plt.imshow(eucid, cmap='gray')
```
|
github_jupyter
|
# Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
**Instructions:**
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
**You will learn to:**
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.
## 1 - Packages ##
First, let's run the cell below to import all the packages that you will need during this assignment.
- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
```
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
```
## 2 - Overview of the Problem set ##
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.
```
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
```
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
```
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
```
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
**Exercise:** Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
```
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
```
**Expected Output for m_train, m_test and num_px**:
<table style="width:15%">
<tr>
<td>**m_train**</td>
<td> 209 </td>
</tr>
<tr>
<td>**m_test**</td>
<td> 50 </td>
</tr>
<tr>
<td>**num_px**</td>
<td> 64 </td>
</tr>
</table>
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
```python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
```
```
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
```
**Expected Output**:
<table style="width:35%">
<tr>
<td>**train_set_x_flatten shape**</td>
<td> (12288, 209)</td>
</tr>
<tr>
<td>**train_set_y shape**</td>
<td>(1, 209)</td>
</tr>
<tr>
<td>**test_set_x_flatten shape**</td>
<td>(12288, 50)</td>
</tr>
<tr>
<td>**test_set_y shape**</td>
<td>(1, 50)</td>
</tr>
<tr>
<td>**sanity check after reshaping**</td>
<td>[17 31 56 22 33]</td>
</tr>
</table>
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
Let's standardize our dataset.
```
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
```
<font color='blue'>
**What you need to remember:**
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
- "Standardize" the data
## 3 - General Architecture of the learning algorithm ##
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
<img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
**Mathematical expression of the algorithm**:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
**Key steps**:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude
## 4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call `model()`.
### 4.1 - Helper functions
**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1 / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
```
**Expected Output**:
<table>
<tr>
<td>**sigmoid([0, 2])**</td>
<td> [ 0.5 0.88079708]</td>
</tr>
</table>
### 4.2 - Initializing parameters
**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
```
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros(shape=(dim, 1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
```
**Expected Output**:
<table style="width:15%">
<tr>
<td> ** w ** </td>
<td> [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td> ** b ** </td>
<td> 0 </td>
</tr>
</table>
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
### 4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
**Hints**:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
```
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T,X) + b)# compute activation
cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A)))# compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = (1 / m) * np.dot(X, (A - Y).T)
db = (1 / m) * np.sum(A- Y)
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td> ** dw ** </td>
<td> [[ 0.99845601]
[ 2.39507239]]</td>
</tr>
<tr>
<td> ** db ** </td>
<td> 0.00145557813678 </td>
</tr>
<tr>
<td> ** cost ** </td>
<td> 5.801545319394553 </td>
</tr>
</table>
### 4.4 - Optimization
- You have initialized your parameters.
- You are also able to compute a cost function and its gradient.
- Now, you want to update the parameters using gradient descent.
**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
```
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **w** </td>
<td>[[ 0.19033591]
[ 0.12259159]] </td>
</tr>
<tr>
<td> **b** </td>
<td> 1.92535983008 </td>
</tr>
<tr>
<td> **dw** </td>
<td> [[ 0.67752042]
[ 1.41625495]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 0.219194504541 </td>
</tr>
</table>
**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:
1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
```
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T, X) + b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
```
**Expected Output**:
<table style="width:30%">
<tr>
<td>
**predictions**
</td>
<td>
[[ 1. 1. 0.]]
</td>
</tr>
</table>
<font color='blue'>
**What to remember:**
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples
## 5 - Merge all functions into a model ##
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
**Exercise:** Implement the model function. Use the following notation:
- Y_prediction_test for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()
```
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
```
Run the following cell to train your model.
```
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **Cost after iteration 0 ** </td>
<td> 0.693147 </td>
</tr>
<tr>
<td> <center> $\vdots$ </center> </td>
<td> <center> $\vdots$ </center> </td>
</tr>
<tr>
<td> **Train Accuracy** </td>
<td> 99.04306220095694 % </td>
</tr>
<tr>
<td>**Test Accuracy** </td>
<td> 70.0 % </td>
</tr>
</table>
**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
```
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
```
Let's also plot the cost function and the gradients.
```
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
```
**Interpretation**:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
## 6 - Further analysis (optional/ungraded exercise) ##
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
#### Choice of learning rate ####
**Reminder**:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
```
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
```
**Interpretation**:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
## 7 - Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
<font color='blue'>
**What to remember from this assignment:**
1. Preprocessing the dataset is important.
2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:
- Play with the learning rate and the number of iterations
- Try different initialization methods and compare the results
- Test other preprocessings (center the data, or divide each row by its standard deviation)
Bibliography:
- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
|
github_jupyter
|
# ACA-Py & ACC-Py Basic Template
## Copy this template into the root folder of your notebook workspace to get started
### Imports
```
from aries_cloudcontroller import AriesAgentController
import os
from termcolor import colored
```
### Initialise the Agent Controller
```
api_key = os.getenv("ACAPY_ADMIN_API_KEY")
admin_url = os.getenv("ADMIN_URL")
print(f"Initialising a controller with admin api at {admin_url} and an api key of {api_key}")
agent_controller = AriesAgentController(admin_url,api_key)
```
### Start a Webhook Server
```
webhook_port = int(os.getenv("WEBHOOK_PORT"))
webhook_host = "0.0.0.0"
await agent_controller.init_webhook_server(webhook_host, webhook_port)
print(f"Listening for webhooks from agent at http://{webhook_host}:{webhook_port}")
```
## Register Agent Event Listeners
You can see some examples within the webhook_listeners recipe. Copy any relevant cells across and fill in additional logic as needed.
```
listeners = []
## YOUR LISTENERS HERE
# Receive connection messages
def connections_handler(payload):
state = payload['state']
connection_id = payload["connection_id"]
their_role = payload["their_role"]
routing_state = payload["routing_state"]
print("----------------------------------------------------------")
print("Connection Webhook Event Received")
print("Connection ID : ", connection_id)
print("State : ", state)
print("Routing State : ", routing_state)
print("Their Role : ", their_role)
print("----------------------------------------------------------")
if state == "active":
# Your business logic
print(colored("Connection ID: {0} is now active.".format(connection_id), "green", attrs=["bold"]))
connection_listener = {
"handler": connections_handler,
"topic": "connections"
}
listeners.append(connection_listener)
def issuer_handler(payload):
connection_id = payload['connection_id']
exchange_id = payload['credential_exchange_id']
state = payload['state']
role = payload['role']
print("\n---------------------------------------------------\n")
print("Handle Issue Credential Webhook")
print(f"Connection ID : {connection_id}")
print(f"Credential exchange ID : {exchange_id}")
print("Agent Protocol Role : ", role)
print("Protocol State : ", state )
print("\n---------------------------------------------------\n")
if state == "offer_sent":
proposal = payload["credential_proposal_dict"]
attributes = proposal['credential_proposal']['attributes']
print(f"Offering credential with attributes : {attributes}")
## YOUR LOGIC HERE
elif state == "request_received":
print("Request for credential received")
## YOUR LOGIC HERE
elif state == "credential_sent":
print("Credential Sent")
## YOUR LOGIC HERE
issuer_listener = {
"topic": "issue_credential",
"handler": issuer_handler
}
listeners.append(issuer_listener)
agent_controller.register_listeners(listeners)
```
## Store Issuing Schema and Cred Def Identifiers
If you intend for this agent to issue credentials you should first initialise your agent as an issuer and author the relevant identifiers to the public ledger. The issuer_initialisation recipe notebook can be duplicated and used as a starting point.
Once schema and cred def identifiers are created copy across and store in variables as illustrated in the cell below. Be sure to use unique names for each variable.
```
schema_id='ABsZzHjqQSfKUCEquCaAkN:2:aries_playground:0.0.1'
cred_def_id='ABsZzHjqQSfKUCEquCaAkN:3:CL:9916:default'
# %store <schema_id>
# %store <cred_def_id>
```
## Load any Identifiers from Store
If you are writing your logic across multiple notebooks, which I have found can make it easier to break things up, then rather than defining the schema and cred def identifiers every time it can be easier to load them from the jupyter store. Note: this assumes they have been written to the store in a previous notebook during the time the current docker containers have been running.
```
# %store -r <schema_id>
# %store -r <cred_def_id>
```
## Establish Connection with the Holder (PORT 8889)
Before you can issue a credential you must first establish a connection across which the credential will be issued to a holder. (see recipes/connection)
```
# Alias for invited connection
alias = None
auto_accept = "true"
# Use public DID?
public = "false"
# Should this invitation be usable by multiple invitees?
multi_use = "false"
invitation_response = await agent_controller.connections.create_invitation(alias, auto_accept, public, multi_use)
# Is equivalent to above. Arguments are optionally
# invitation_response = await agent_controller.connections.create_invitation()
# You will use this identifier to issue a credential across this connection
connection_id = invitation_response["connection_id"]
```
## Copy Invitation Object to Holder Notebook
```
invitation = invitation_response["invitation"]
## Copy this output
print(invitation)
```
## OPTIONAL: Display Invite as QR Code
This is useful if you wish to issue a credential to a mobile wallet.
```
import qrcode
# Link for connection invitation
invitation_url = invitation_response["invitation_url"]
# Creating an instance of qrcode
qr = qrcode.QRCode(
version=1,
box_size=5,
border=5)
qr.add_data(invitation_url)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
img
```
## Populate Credential Attributes
Before you can issue a credential, you must define the values that will be issued in this credential. The attribute names **MUST** match those in the schem identified by the <schema_id> value.
Make sure to change all code enclosed with <>.
```
comment=input("Please enter some comment: ")
credential_attributes = [
{"name": "comment", "value": comment},
]
print(credential_attributes)
```
## Send Credential
This is the easiest way to issue a credential because it automates the rest of the protocol steps.
Note: The `connection_id` must be in the active state before a credential can be sent.
```
# Do you want the ACA-Py instance to trace it's processes (for testing/timing analysis)
trace = False
comment = ""
# Remove credential record after issued?
auto_remove = True
# Change <schema_id> and <cred_def_id> to correct pair. Cred_def_id must identify a definition to which your agent has corresponding private issuing key.
send_cred_response = await agent_controller.issuer.send_credential(connection_id, schema_id, cred_def_id, credential_attributes, comment, auto_remove, trace)
# Note last three args are optional.
# await agent_controller.issuer.send_credential(connection_id, <schema_id>, <cred_def_id, credential_attributes)
```
## Now Request a Proof of this Credential Through the Verifier Notebook (Port 8891)
## Terminate Controller
Whenever you have finished with this notebook, be sure to terminate the controller. This is especially important if your business logic runs across multiple notebooks.
```
await agent_controller.terminate()
```
|
github_jupyter
|
# 作业3:设计并训练KNN算法对图片进行分类。
## example1:
```
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
k=7
test_num=int(input('请输入需要测试的数据数量:'))
#加载TFRecord训练集的数据
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(["/home/srhyme/ML project/DS/train.tfrecords"])
_, example = reader.read(filename_queue)
features = tf.parse_single_example(
example,features={
'image_raw': tf.FixedLenFeature([], tf.string),
'pixels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
})
train_images = tf.decode_raw(features['image_raw'], tf.uint8)
train_labels = tf.cast(features['label'], tf.int32)
train_pixels = tf.cast(features['pixels'], tf.int32)
#加载TFRecord测试集的数据
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(["/home/srhyme/ML project/DS/test.tfrecords"])
_, example = reader.read(filename_queue)
features = tf.parse_single_example(
example,features={
'image_raw': tf.FixedLenFeature([], tf.string),
'pixels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
})
test_images = tf.decode_raw(features['image_raw'], tf.uint8)
test_labels = tf.cast(features['label'], tf.int32)
test_pixels = tf.cast(features['pixels'], tf.int32)
tri_list=[]
tei_list=[]
trl_list=[]
tel_list=[]
#转换TFRecord里面的类型格式
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(sess.run(train_pixels)):
image,label=sess.run([train_images,train_labels])
tri_list.append(image)
trl=np.zeros((1,10))
trl[0][label]=1
trl_list.append(trl[0])
train_labels=np.array(trl_list)
train_images=np.array(tri_list)
print('训练集已加载完毕')
for i in range(test_num):
image,label=sess.run([test_images,test_labels])
tei_list.append(image)
tel=np.zeros((1,10))
tel[0][label]=1
tel_list.append(tel[0])
test_labels=np.array(tel_list)
test_images=np.array(tei_list)
print('测试集已加载完毕')
sess.close()
x_train = tf.placeholder(tf.float32)
x_test = tf.placeholder(tf.float32)
y_train = tf.placeholder(tf.float32)
# 欧式距离
euclidean_distance = tf.sqrt(tf.reduce_sum(tf.square(x_train - x_test), 1))
# 计算最相近的k个样本的索引
_, nearest_index = tf.nn.top_k(-euclidean_distance, k)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
predicted_num = 0
# 对每个图片进行预测
for i in range(test_images.shape[0]):
# 最近k个样本的标记索引
nearest_index_res = sess.run(
nearest_index,
feed_dict={
x_train: train_images,
y_train: train_labels,
x_test: test_images[i]})
# 最近k个样本的标记
nearest_label = []
for j in range(k):
nearest_label.append(list(train_labels[nearest_index_res[j]]))
predicted_class = sess.run(tf.argmax(tf.reduce_sum(nearest_label, 0), 0))
true_class = sess.run(tf.argmax(test_labels[i]))
if predicted_class == true_class:
predicted_num += 1
if i % 100 == 0:
print('step is %d accuracy is %.4f' % (i, predicted_num / (i+1)))
print('accuracy is %.4f' % (predicted_num / test_num))
```
## example2:
```
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from collections import Counter
#训练集数据的引入
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(["/home/srhyme/ML project/DS/train.tfrecords"])
_, example = reader.read(filename_queue)
features = tf.parse_single_example(
example,features={
'image_raw': tf.FixedLenFeature([], tf.string),
'pixels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
})
train_images = tf.decode_raw(features['image_raw'], tf.uint8)
train_labels = tf.cast(features['label'], tf.int32)
train_pixels = tf.cast(features['pixels'], tf.int32)
#测试集数据的引入
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(["/home/srhyme/ML project/DS/test.tfrecords"])
_, example = reader.read(filename_queue)
features = tf.parse_single_example(
example,features={
'image_raw': tf.FixedLenFeature([], tf.string),
'pixels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
})
test_images = tf.decode_raw(features['image_raw'], tf.uint8)
test_labels = tf.cast(features['label'], tf.int32)
test_pixels = tf.cast(features['pixels'], tf.int32)
#设置变量
testnum=int(input('请输入需要测试的数据集数量:'))
k=5
correct_probability=testnum
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
test_num=sess.run(test_pixels)
train_num=sess.run(train_pixels)
#生成一个训练标签的列表,方便索引
c_labels=[]
for n in range(train_num):
train_label=sess.run(train_labels)
c_labels.append(train_label)
#生成一个测试标签的列表,方便索引
g_labels=[]
for n in range(test_num):
test_label=sess.run(test_labels)
g_labels.append(test_label)
for i in range(testnum):#测试集数量
test_image=sess.run(test_images)
#生成每个测试集的距离列表
min_label=[]
for j in range(train_num):#训练集数量
train_image=sess.run(train_images)
euclidean_distance =np.sqrt(np.sum(np.square(train_image - test_image)))
min_label.append(-euclidean_distance)
#生成最近k个点的位置
min_labels=tf.constant([min_label])
_, nearest_index = tf.nn.top_k(min_labels, k)
#生成一个最近k点标签列表
nearest_label=[]
near=nearest_index
for m in range(k):
nearest_label.append(c_labels[sess.run(near[0,m])])#在训练标签中找到该位置的标签
#生成该测试集经过knn训练后拟合的标签
nearset_dict=Counter(nearest_label)
key_list=[]
value_list=[]
for key,value in nearset_dict.items():
key_list.append(key)
value_list.append(value)
max_value=max(value_list)
get_value_index = value_list.index(max_value)
guess = key_list[get_value_index]
#判断正确率
correct=g_labels[i]
if correct != guess:
correct_probability=correct_probability - 1
print('正确率为',(correct_probability/testnum))
```
|
github_jupyter
|
# Continuous Delivery Explained
> "An introduction to the devops practice of CI/CD."
- toc: false
- branch: master
- badges: true
- comments: true
- categories: [devops, continuous-delivery]
- image: images/copied_from_nb/img/devops/feedback-cycle.png

> *I wrote this back in September 2014 and never published it, but since it's an introductory piece it stands its ground, so let this serve as an initial post…*
## CD in a Nutshell
A typical mission statement for Continuous Delivery is this…
> *Our highest priority is to satisfy the customer,*
> *through early and **continuous delivery** of valuable software.*
Continuous Delivery strives to improve the process of software delivery, by applying Continuous Deployment paired with automated testing and Continuous Integration. The goal is creating software developed to a high standard and easily packaged and deployed to test environments, resulting in the ability to rapidly, reliably and repeatedly push out enhancements and bug fixes to customers in small increments, at low risk and with minimal manual overhead.
CD is effective because it facilitates an explorative approach by providing real, valuable measurements of the output of the process, and feeding those results back into the process. It's the next logical step after applying Agile principles to development, by expanding the scope to the whole software life-cycle and all involved parties, from inception to going live and then maintaining the product for a substantial amount of time in fast-paced iterations.
## Some More Details
Continuous Delivery means that your software is production-ready from day one of your project (even when it's not “feature complete”), and that you can release to users on demand at the push of a button. There are several practices and patterns that enable this, but the foundation is formed in particular by excellent configuration management, continuous integration, and comprehensive automated testing at all levels. The key pattern is the deployment pipeline, which is effectively the extension of continuous integration out to production, whereby every check-in produces a release candidate which is assessed for its fitness to be released to production through a series of automated and then manual tests.
In order to be able to perform these validations against every build, your regression tests must be automated — both at the unit and acceptance level. Humans then perform tasks such as exploratory testing, usability testing, and showcases as later validations against builds that have already passed the automated tests. Builds can be deployed automatically on demand to testing, staging and production environments by the people authorized to do so — note that this means deployments are triggered by humans and performed by machines.
Through these practices, teams can get fast feedback on whether the software being delivered is useful, reduce the risk of release, and achieve a much more predictable, reliable process for software delivery. The backbone of CD is a culture in which everybody, if somehow involved in the delivery process, collaborates throughout the life-cycle of the product — developers, testers, infrastructure, operators, DBAs, managers, and customers alike.
## Where to Go From Here?
Here are some resources for diving deeper into the topic:
- [Jez Humble's Blog · Continuous Delivery](https://continuousdelivery.com/about/)
- [CD Foundation](https://cd.foundation/) – A Neutral Home for the Next Generation of Continuous Delivery Collaboration.
- [IT Revolution DevOps Blog](https://itrevolution.com/devops-blog/)
- [Devops Weekly Mailing List](https://www.devopsweekly.com/) (by [@garethr](https://twitter.com/garethr))
- [Team Topologies](https://teamtopologies.com/)
> 👍 *Credits:* [Devops-toolchain](https://commons.wikimedia.org/wiki/File:Devops-toolchain.svg)
|
github_jupyter
|
# Content-based recommender using Deep Structured Semantic Model
An example of how to build a Deep Structured Semantic Model (DSSM) for incorporating complex content-based features into a recommender system. See [Learning Deep Structured Semantic Models for Web Search using Clickthrough Data](https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/). This example does not attempt to provide a datasource or train a model, but merely show how to structure a complex DSSM network.
```
import warnings
import mxnet as mx
from mxnet import gluon, nd, autograd, sym
import numpy as np
from sklearn.random_projection import johnson_lindenstrauss_min_dim
# Define some constants
max_user = int(1e5)
title_vocab_size = int(3e4)
query_vocab_size = int(3e4)
num_samples = int(1e4)
hidden_units = 128
epsilon_proj = 0.25
ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()
```
## Bag of words random projection
A previous version of this example contained a bag of word random projection example, it is kept here for reference but not used in the next example.
Random Projection is a dimension reduction technique that guarantees the disruption of the pair-wise distance between your original data point within a certain bound.
What is even more interesting is that the dimension to project onto to guarantee that bound does not depend on the original number of dimension but solely on the total number of datapoints.
You can see more explanation [in this blog post](http://jasonpunyon.com/blog/2017/12/02/fun-with-random-numbers-random-projection/)
```
proj_dim = johnson_lindenstrauss_min_dim(num_samples, epsilon_proj)
print("To keep a distance disruption ~< {}% of our {} samples we need to randomly project to at least {} dimensions".format(epsilon_proj*100, num_samples, proj_dim))
class BagOfWordsRandomProjection(gluon.HybridBlock):
def __init__(self, vocab_size, output_dim, random_seed=54321, pad_index=0):
"""
:param int vocab_size: number of element in the vocabulary
:param int output_dim: projection dimension
:param int ramdon_seed: seed to use to guarantee same projection
:param int pad_index: index of the vocabulary used for padding sentences
"""
super(BagOfWordsRandomProjection, self).__init__()
self._vocab_size = vocab_size
self._output_dim = output_dim
proj = self._random_unit_vecs(vocab_size=vocab_size, output_dim=output_dim, random_seed=random_seed)
# we set the projection of the padding word to 0
proj[pad_index, :] = 0
self.proj = self.params.get_constant('proj', value=proj)
def _random_unit_vecs(self, vocab_size, output_dim, random_seed):
rs = np.random.RandomState(seed=random_seed)
W = rs.normal(size=(vocab_size, output_dim))
Wlen = np.linalg.norm(W, axis=1)
W_unit = W / Wlen[:,None]
return W_unit
def hybrid_forward(self, F, x, proj):
"""
:param nd or sym F:
:param nd.NDArray x: index of tokens
returns the sum of the projected embeddings of each token
"""
embedded = F.Embedding(x, proj, input_dim=self._vocab_size, output_dim=self._output_dim)
return embedded.sum(axis=1)
bowrp = BagOfWordsRandomProjection(1000, 20)
bowrp.initialize()
bowrp(mx.nd.array([[10, 50, 100], [5, 10, 0]]))
```
With padding:
```
bowrp(mx.nd.array([[10, 50, 100, 0], [5, 10, 0, 0]]))
```
# Content-based recommender / ranking system using DSSM
For example in the search result ranking problem:
You have users, that have performed text-based searches. They were presented with results, and selected one of them.
Results are composed of a title and an image.
Your positive examples will be the clicked items in the search results, and the negative examples are sampled from the non-clicked examples.
The network will jointly learn embeddings for users and query text making up the "Query", title and image making the "Item" and learn how similar they are.
After training, you can index the embeddings for your items and do a knn search with your query embeddings using the cosine similarity to return ranked items
```
proj_dim = 128
class DSSMRecommenderNetwork(gluon.HybridBlock):
def __init__(self, query_vocab_size, proj_dim, max_user, title_vocab_size, hidden_units, random_seed=54321, p=0.5):
super(DSSMRecommenderNetwork, self).__init__()
with self.name_scope():
# User/Query pipeline
self.user_embedding = gluon.nn.Embedding(max_user, proj_dim)
self.user_mlp = gluon.nn.Dense(hidden_units, activation="relu")
# Instead of bag of words, we use learned embeddings + stacked biLSTM average
self.query_text_embedding = gluon.nn.Embedding(query_vocab_size, proj_dim)
self.query_lstm = gluon.rnn.LSTM(hidden_units, 2, bidirectional=True)
self.query_text_mlp = gluon.nn.Dense(hidden_units, activation="relu")
self.query_dropout = gluon.nn.Dropout(p)
self.query_mlp = gluon.nn.Dense(hidden_units, activation="relu")
# Item pipeline
# Instead of bag of words, we use learned embeddings + stacked biLSTM average
self.title_embedding = gluon.nn.Embedding(title_vocab_size, proj_dim)
self.title_lstm = gluon.rnn.LSTM(hidden_units, 2, bidirectional=True)
self.title_mlp = gluon.nn.Dense(hidden_units, activation="relu")
# You could use vgg here for example
self.image_embedding = gluon.model_zoo.vision.resnet18_v2(pretrained=False).features
self.image_mlp = gluon.nn.Dense(hidden_units, activation="relu")
self.item_dropout = gluon.nn.Dropout(p)
self.item_mlp = gluon.nn.Dense(hidden_units, activation="relu")
def hybrid_forward(self, F, user, query_text, title, image):
# Query
user = self.user_embedding(user)
user = self.user_mlp(user)
query_text = self.query_text_embedding(query_text)
query_text = self.query_lstm(query_text.transpose((1,0,2)))
# average the states
query_text = query_text.mean(axis=0)
query_text = self.query_text_mlp(query_text)
query = F.concat(user, query_text)
query = self.query_dropout(query)
query = self.query_mlp(query)
# Item
title_text = self.title_embedding(title)
title_text = self.title_lstm(title_text.transpose((1,0,2)))
# average the states
title_text = title_text.mean(axis=0)
title_text = self.title_mlp(title_text)
image = self.image_embedding(image)
image = self.image_mlp(image)
item = F.concat(title_text, image)
item = self.item_dropout(item)
item = self.item_mlp(item)
# Cosine Similarity
query = query.expand_dims(axis=2)
item = item.expand_dims(axis=2)
sim = F.batch_dot(query, item, transpose_a=True) / (query.norm(axis=1) * item.norm(axis=1) + 1e-9).expand_dims(axis=2)
return sim.squeeze(axis=2)
network = DSSMRecommenderNetwork(
query_vocab_size,
proj_dim,
max_user,
title_vocab_size,
hidden_units
)
network.initialize(mx.init.Xavier(), ctx)
# Load pre-trained vgg16 weights
with network.name_scope():
network.image_embedding = gluon.model_zoo.vision.resnet18_v2(pretrained=True, ctx=ctx).features
```
It is quite hard to visualize the network since it is relatively complex but you can see the two-pronged structure, and the resnet18 branch
```
mx.viz.plot_network(network(
mx.sym.var('user'), mx.sym.var('query_text'), mx.sym.var('title'), mx.sym.var('image')),
shape={'user': (1,1), 'query_text': (1,30), 'title': (1,30), 'image': (1,3,224,224)},
node_attrs={"fixedsize":"False"})
```
We can print the summary of the network using dummy data. We can see it is already training on 32M parameters!
```
user = mx.nd.array([[200], [100]], ctx)
query = mx.nd.array([[10, 20, 0, 0, 0], [40, 50, 0, 0, 0]], ctx) # Example of an encoded text
title = mx.nd.array([[10, 20, 0, 0, 0], [40, 50, 0, 0, 0]], ctx) # Example of an encoded text
image = mx.nd.random.uniform(shape=(2,3, 224,224), ctx=ctx) # Example of an encoded image
network.summary(user, query, title, image)
network(user, query, title, image)
```
The output is the similarity, if we wanted to train it on real data, we would need to minimize the Cosine loss, 1 - cosine_similarity.
|
github_jupyter
|
# Plots for logistic regression, consistent vs inconsistent noiseless AT, increasing epsilon
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
import dotenv
import pandas as pd
import mlflow
import plotly
import plotly.graph_objects as go
import plotly.express as px
import plotly.subplots
import plotly.io as pio
import typing
import os
import shutil
import sys
import warnings
EXPORT = False
SHOW_TITLES = not EXPORT
EXPORT_DIR_NAME = 'eps_increase_separate_legend'
EXPERIMENT_NAME = 'logistic_regression_inconsistent_consistent_increase_epsilon'
# Load environment variables
dotenv.load_dotenv()
# Enable loading of the project module
MODULE_DIR = os.path.join(os.path.abspath(os.path.join(os.path.curdir, os.path.pardir, os.pardir)), 'src')
sys.path.append(MODULE_DIR)
%load_ext autoreload
%autoreload 2
import interpolation_robustness as ir
FIGURE_SIZE = (2.75, 1.4)
LEGEND_FIGURE_SIZE = (2.75, 0.7)
LEGEND_FONT_SIZE = ir.plots.FONT_SIZE_SMALL_PT
ir.plots.setup_matplotlib(show_titles=SHOW_TITLES)
if EXPORT:
EXPORT_DIR = os.path.join(ir.util.REPO_ROOT_DIR, 'logs', f'export_{EXPORT_DIR_NAME}')
print('Using export directory', EXPORT_DIR)
if os.path.exists(EXPORT_DIR):
shutil.rmtree(EXPORT_DIR)
os.makedirs(EXPORT_DIR)
def export_fig(fig: plt.Figure, filename: str):
# If export is disabled then do nothing
if EXPORT:
export_path = os.path.join(EXPORT_DIR, filename)
fig.savefig(export_path)
print('Exported figure at', export_path)
```
## Load experiment data
```
client = mlflow.tracking.MlflowClient()
experiment = client.get_experiment_by_name(EXPERIMENT_NAME)
runs = mlflow.search_runs(
experiment.experiment_id
)
runs = runs.set_index('run_id', drop=False) # set index, but keep column to not break stuff depending on it
# Convert some parameters to numbers and sort accordingly
runs['params.data_dim'] = runs['params.data_dim'].astype(int)
runs['params.data_num_train_samples'] = runs['params.data_num_train_samples'].astype(int)
runs['params.train_attack_epsilon'] = runs['params.train_attack_epsilon'].astype(np.float)
runs['params.test_attack_epsilon'] = runs['params.test_attack_epsilon'].astype(np.float)
runs['params.l2_lambda'] = runs['params.l2_lambda'].astype(np.float)
runs['params.label_noise'] = runs['params.label_noise'].astype(np.float)
runs['metrics.train_robust_risk'] = 1.0 - runs['metrics.train_robust_accuracy']
assert runs['params.l2_lambda'].eq(0).all()
runs['metrics.train_robust_log_loss'] = runs['metrics.training_loss']
runs = runs.sort_values(['params.label_noise'], ascending=True)
print('Loaded', len(runs), 'runs')
assert runs['status'].eq('FINISHED').all()
assert runs['params.label_noise'].eq(0).all()
grouping_keys = ['params.data_dim', 'params.l2_lambda', 'params.train_consistent_attacks', 'params.train_attack_epsilon', 'params.data_num_train_samples']
aggregate_metrics = ('metrics.true_robust_risk', 'metrics.train_robust_log_loss')
runs_agg = runs.groupby(grouping_keys, as_index=False).aggregate({metric: ['mean', 'std'] for metric in aggregate_metrics})
```
## Plots
```
robust_consistent_color_idx = 1
robust_inconsistent_color_idx = 2
population_linestyle = ir.plots.LINESTYLE_MAP[0]
training_linestyle = ir.plots.LINESTYLE_MAP[2]
BASELINE_LAMBDA = 0
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
fig, ax = plt.subplots(figsize=FIGURE_SIZE)
target_num_samples = 1000
current_runs = runs_agg[runs_agg['params.data_num_train_samples'] == target_num_samples]
assert current_runs['params.l2_lambda'].eq(0).all()
data_dim, = current_runs['params.data_dim'].unique()
current_consistent_runs = current_runs[current_runs['params.train_consistent_attacks'] == 'True']
current_inconsistent_runs = current_runs[current_runs['params.train_consistent_attacks'] == 'False']
ax.errorbar(
current_consistent_runs['params.train_attack_epsilon'],
current_consistent_runs[('metrics.true_robust_risk', 'mean')],
yerr=current_consistent_runs[('metrics.true_robust_risk', 'std')],
label=fr'Robust risk, cons.',
c=f'C{robust_consistent_color_idx}',
ls=population_linestyle,
zorder=2
)
ax.errorbar(
current_inconsistent_runs['params.train_attack_epsilon'],
current_inconsistent_runs[('metrics.true_robust_risk', 'mean')],
yerr=current_inconsistent_runs[('metrics.true_robust_risk', 'std')],
label=fr'Robust risk, incons.',
c=f'C{robust_inconsistent_color_idx}',
ls=population_linestyle,
zorder=2
)
ax.errorbar(
current_consistent_runs['params.train_attack_epsilon'],
current_consistent_runs[('metrics.train_robust_log_loss', 'mean')],
yerr=current_consistent_runs[('metrics.train_robust_log_loss', 'std')],
label=fr'Training loss, cons.',
c=f'C{robust_consistent_color_idx}',
ls=training_linestyle,
zorder=2
)
ax.errorbar(
current_inconsistent_runs['params.train_attack_epsilon'],
current_inconsistent_runs[('metrics.train_robust_log_loss', 'mean')],
yerr=current_inconsistent_runs[('metrics.train_robust_log_loss', 'std')],
label=fr'Training loss, incons.',
c=f'C{robust_inconsistent_color_idx}',
ls=training_linestyle,
zorder=2
)
ax.set_xlabel('Train and test $\epsilon$')
ax.set_ylim(bottom=-0.005)
ax.set_xlim(left=-0.001)
if SHOW_TITLES:
fig.suptitle(f'Consistent vs inconsistent AT, fixed d {data_dim} and n {target_num_samples}')
export_fig(fig, f'eps_increase.pdf')
plt.show()
# Legend
legend_fig = plt.figure(figsize=LEGEND_FIGURE_SIZE)
handles, labels = ax.get_legend_handles_labels()
ir.plots.errorbar_legend(
legend_fig,
handles,
labels,
loc='center',
ncol=2,
mode='expand',
frameon=True,
fontsize=LEGEND_FONT_SIZE,
borderpad=0.7
)
export_fig(legend_fig, f'eps_increase_legend.pdf')
```
|
github_jupyter
|
# RNN - LSTM - Toxic Comments
A corpus of manually labeled comments - classifying each comment by its type of toxicity is available on Kaggle. We will aim to do a binary classification of whether a comment is toxic or not.
Approach:
- Learning Embedding with the Task
- LSTM
- BiLSTM
```
import numpy as np
import pandas as pd
import keras
import matplotlib.pyplot as plt
%matplotlib inline
import vis
```
### Get the Data
Uncomment these shell lines to get the data
```
# !wget http://bit.do/deep_toxic_train -P data/
# !mv data/deep_toxic_train data/train.zip
df = pd.read_csv("data/train.zip")
df.head()
```
### Import the required libraries
```
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
```
### Create the Input & Output Data
```
train_sentences = df["comment_text"]
train_sentences.head()
```
**Pre-processing the train data**
- Tokenization: "This is an apple" -> ["This", "is", "an", "apple"]
- Indexing: {0: "This", 1: "is", 2: "an", 3: "apple"}
- Index Representation: [0, 1, 2, 3]
```
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# Tokenizer
max_words = 20000
tokenizer = Tokenizer(num_words=max_words, oov_token='UNK')
tokenizer.fit_on_texts(list(train_sentences))
```
Tokenizer Fix from https://github.com/keras-team/keras/issues/8092
```
tokenizer.word_index = {e:i for e,i in tokenizer.word_index.items() if i <= max_words} # <= because tokenizer is 1 indexed
tokenizer.word_index[tokenizer.oov_token] = max_words + 1
# Index Representation
tokenized_train = tokenizer.texts_to_sequences(train_sentences)
# Selecting Padding
# find length of each sentence and plot the length
number_of_words = [len(comment) for comment in tokenized_train]
plt.hist(number_of_words, bins = np.arange(0, 500, 10));
# Padding to make it uniform
maxlen = 200
X = pad_sequences(tokenized_train, maxlen = maxlen)
labels = df.iloc[:,2].values
# Baseline Benchmark
1 - df.iloc[:,2].sum()/df.iloc[:,2].count()
from keras.utils import to_categorical
y = to_categorical(labels)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
```
### Step 2: Create the Model Architecture
```
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Dropout, Bidirectional
model = Sequential()
model.add(Embedding(max_words, output_dim=128, mask_zero=True))
model.add(LSTM(60))
model.add(Dropout(0.1))
model.add(Dense(2, activation='sigmoid'))
```
### Step 3: Compile the Model & Fit on the Data
```
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
output = model.fit(X_train, y_train, batch_size=128, epochs=5, validation_split=0.2)
```
### Step 4: Evaluate the Model
```
vis.metrics(output.history)
score = model.evaluate(X_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Step 5: Visualise evaluation & Make a prediction
```
predict_classes = model.predict_classes(X_test)
actual_classes = np.dot(y_test,np.array([[0],[1]])).reshape(-1)
pd.crosstab(actual_classes, predict_classes)
```
|
github_jupyter
|
## Mask Adaptivity Detection Using YOLO
Mask became an essential accessory post COVID-19. Most of the countries are making face masks mandatory to avail services like transport, fuel and any sort of outside activity. It is become utmost necessary to keep track of the adaptivity of the crowd. This notebook contains implementation of a face mask adaptivity tracker using YOLO.

```
import warnings
import numpy as np
import argparse
import time
import cv2
import os
warnings.filterwarnings("ignore")
```
### Prepare DarkNet Environment
```
# Create DarkNet Environment
def prepare_environment():
os.environ['PATH'] += ':/usr/local/cuda/bin'
!rm -fr darknet
!git clone https://github.com/AlexeyAB/darknet/
!apt install gcc-5 g++-5 -y
!ln -s /usr/bin/gcc-5 /usr/local/cuda/bin/gcc
!ln -s /usr/bin/g++-5 /usr/local/cuda/bin/g++
%cd darknet
!sed -i 's/GPU=0/GPU=1/g' Makefile
!sed -i 's/OPENCV=0/OPENCV=1/g' Makefile
!make
# get yolov3 weights
#!wget https://pjreddie.com/media/files/darknet53.conv.74.weights
!chmod a+x ./darknet
!apt install ffmpeg libopencv-dev libgtk-3-dev python-numpy python3-numpy libdc1394-22 libdc1394-22-dev libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libxine2-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libv4l-dev libtbb-dev qtbase5-dev libfaac-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils unzip
status='Completed'
return status
prepare_environment()
from google.colab import drive
drive.mount('/content/drive')
os.listdir('/content/drive/My Drive/darknet/YOLO_Custom')
```
### Get Tiny YOLO Weight (Skip if Resuming Training)
```
!wget --header="Host: pjreddie.com" --header="User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" --header="Accept-Language: en-US,en;q=0.9" --header="Referer: https://github.com/AlexeyAB/darknet" --header="Cookie: __utma=134107727.1364647705.1589636782.1589689587.1589901067.3; __utmz=134107727.1589901067.3.3.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)" --header="Connection: keep-alive" "https://pjreddie.com/media/files/yolov3-tiny.weights" -c -O 'yolov3-tiny.weights'
!./darknet partial cfg/yolov3-tiny.cfg yolov3-tiny.weights yolov3-tiny.conv.15 15
```
### Copy Required Files from Drive
```
# Copy fils from Google Drive to the VM local filesystem
!cp -r "/content/drive/My Drive/darknet/YOLO_Custom" /content/darknet/YOLO_Custom
```
### Train
Use the below command to train yolo:
!./darknet detector train data_file_path config_file_path training_weights_path log_path
To train yolov3 instead of tiny yolo, replace the following files:
use ***yolov3_train_cfg*** instead of ***yolov3-tiny_obj_train.cfg***
use ***yolov3_test_cfg*** instead of ***yolov3-tiny_obj_test.cfg*** for testing purpose.
Replace the yolov3 weight link with the tiny yolo weight link
```
!./darknet detector train "/content/darknet/YOLO_Custom/obj.data" "/content/darknet/YOLO_Custom/yolov3-tiny_obj_train.cfg" "/content/darknet/yolov3-tiny.conv.15" "train.log" -dont_show
```
### Utility Functions
```
# Define threshold for confidence score and Non max supression here
confthres=0.2
nmsthres=0.1
path="./"
def get_labels(label_dir):
return open(label_dir).read().split('\n')
def get_colors(LABELS):
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),dtype="uint8")
return COLORS
def get_weights(weights_path):
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([yolo_path, weights_path])
return weightsPath
def get_config(config_path):
configPath = os.path.sep.join([yolo_path, config_path])
return configPath
def load_model(configpath,weightspath):
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configpath, weightspath)
return net
#https://medium.com/analytics-vidhya/object-detection-using-yolo-v3-and-deploying-it-on-docker-and-minikube-c1192e81ae7a
def get_predection(image,net,LABELS,COLORS):
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
#print(layerOutputs[0])
end = time.time()
# show timing information on YOLO
#print("[INFO] YOLO took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
num_class_0 = 0
num_class_1 = 0
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
#print(detection)
classID = np.argmax(scores)
# print(classID)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > confthres:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
if(classID==0):
num_class_0 +=1
elif(classID==1):
num_class_1 +=1
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, confthres,
nmsthres)
if(num_class_0>0 or num_class_1>0):
index= num_class_0/(num_class_0+num_class_1)
else:
index=-1
#print(index,)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}".format(LABELS[classIDs[i]])
#print(boxes)
#print(classIDs)
#cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color=(69, 60, 90), thickness=2)
cv2.rectangle(image, (x, y-5), (x+62, y-15), color, cv2.FILLED)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0,0,0), 1)
if(index!=-1 and index<.50):
cv2.rectangle(image, (40, 46), (220, 16), (0,0,255), cv2.FILLED)
cv2.putText(image,'Mask Adaptivity: POOR',(40,40),cv2.FONT_HERSHEY_SIMPLEX,0.5, (0,255,255), 1)
elif(index>=.50 and index<.70):
cv2.rectangle(image, (40, 46), (255, 16), (0, 165, 255), cv2.FILLED)
cv2.putText(image,'Mask Adaptivity: MODERATE',(40,40),cv2.FONT_HERSHEY_SIMPLEX,0.5, (0,0,0), 1)
elif(index>=0.70):
cv2.rectangle(image, (40, 46), (220, 16), (42,236,42), cv2.FILLED)
cv2.putText(image,'Mask Adaptivity: HIGH',(40,40),cv2.FONT_HERSHEY_SIMPLEX,0.5, (0,0,0), 1)
return image
# Method to predict Image
def predict_image(img_path):
image = cv2.imread(img_path)
nets=load_model(CFG,Weights)
#Colors=get_colors(Lables)
Colors=[(42,236,42),(0,0,255)]
res=get_predection(image,nets,Lables,Colors)
# image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
# show the output image
cv2_imshow(res)
cv2.waitKey()
# Method to predict Video
def predict_video(input_path,output_path):
vid = cv2.VideoCapture(input_path)
op=output_path
height, width = None, None
writer = None
print('[Info] processing Video (It may take several minutes to Run)..')
while True:
grabbed, frame = vid.read()
# Checking if the complete video is read
if not grabbed:
break
if width is None or height is None:
height, width = frame.shape[:2]
frame=get_predection(frame,nets,Lables,Colors)
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
writer = cv2.VideoWriter(op, fourcc, 27,(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
print ("[INFO] Cleaning up...")
writer.release()
vid.release()
print ("[INFO] Prediction Completed.")
# This will not work in colab, as colab can't access local hardware
import time
def predict_web_cam():
#stream = cv2.VideoCapture(0)
sess = K.get_session()
while True:
# Capture frame-by-frame
grabbed, frame = stream.read()
if not grabbed:
break
# Run detection
start = time.time()
image = Image.fromarray(frame)
output_image = get_predection(image,nets,Lables,Colors)
end = time.time()
print("Inference time: {:.2f}s".format(end - start))
# Display the resulting frame
cv2.imshow('Web Cam',np.asarray(output_image))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
stream.release()
cv2.destroyAllWindows()
```
### Set the Variables (Must before Prediction)
```
# Set the pah for test config file, label directory and weights
CFG='/content/darknet/YOLO_Custom/yolov3-tiny_obj_test.cfg'
label_dir='/content/darknet/YOLO_Custom/obj.names'
#cfgpath=test_config_path
Weights='/content/darknet/YOLO_Custom/yolov3-tiny_obj_train_tiny8.weights'
Lables=get_labels(label_dir)
```
### Predict Image
```
from google.colab.patches import cv2_imshow
img_path='/content/buckey.jpg'
predict_image(img_path)
```
### Predict Video
```
input_path='/content/in.mp4'
output_path='/content/out.mp4'
predict_video(input_path,output_path)
```
|
github_jupyter
|
## Precision-Recall Curves in Multiclass
For multiclass classification, we have 2 options:
- determine a PR curve for each class.
- determine the overall PR curve as the micro-average of all classes
Let's see how to do both.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
# to convert the 1-D target vector in to a matrix
from sklearn.preprocessing import label_binarize
from sklearn.metrics import precision_recall_curve
from yellowbrick.classifier import PrecisionRecallCurve
```
## Load data (multiclass)
```
# load data
data = load_wine()
data = pd.concat([
pd.DataFrame(data.data, columns=data.feature_names),
pd.DataFrame(data.target, columns=['target']),
], axis=1)
data.head()
# target distribution:
# multiclass and (fairly) balanced
data.target.value_counts(normalize=True)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# the target is a vector with the 3 classes
y_test[0:10]
```
## Train ML models
The dataset we are using is very, extremely simple, so I am creating dumb models intentionally, that is few trees and very shallow for the random forests and few iterations for the logit. This is, so that we can get the most out of the PR curves by inspecting them visually.
### Random Forests
The Random Forests in sklearn are not trained as a 1 vs Rest. So in order to produce a 1 vs rest probability vector for each class, we need to wrap this estimator with another one from sklearn:
- [OneVsRestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html)
```
# set up the model, wrapped by the OneVsRestClassifier
rf = OneVsRestClassifier(
RandomForestClassifier(
n_estimators=10, random_state=39, max_depth=1, n_jobs=4,
)
)
# train the model
rf.fit(X_train, y_train)
# produce the predictions (as probabilities)
y_train_rf = rf.predict_proba(X_train)
y_test_rf = rf.predict_proba(X_test)
# note that the predictions are an array of 3 columns
# first column: the probability of an observation of being of class 0
# second column: the probability of an observation of being of class 1
# third column: the probability of an observation of being of class 2
y_test_rf[0:10, :]
pd.DataFrame(y_test_rf).sum(axis=1)[0:10]
# The final prediction is that of the biggest probabiity
rf.predict(X_test)[0:10]
```
### Logistic Regression
The Logistic regression supports 1 vs rest automatically though its multi_class parameter:
```
# set up the model
logit = LogisticRegression(
random_state=0, multi_class='ovr', max_iter=10,
)
# train
logit.fit(X_train, y_train)
# obtain the probabilities
y_train_logit = logit.predict_proba(X_train)
y_test_logit = logit.predict_proba(X_test)
# note that the predictions are an array of 3 columns
# first column: the probability of an observation of being of class 0
# second column: the probability of an observation of being of class 1
# third column: the probability of an observation of being of class 2
y_test_logit[0:10, :]
# The final prediction is that of the biggest probabiity
logit.predict(X_test)[0:10]
```
## Precision-Recall Curve
### Per class with Sklearn
```
# with label_binarize we transform the target vector
# into a multi-label matrix, so that it matches the
# outputs of the models
# then we have 1 class per column
y_test = label_binarize(y_test, classes=[0, 1, 2])
y_test[0:10, :]
# now we determine the precision and recall at different thresholds
# considering only the probability vector for class 2 and the true
# target for class 2
# so we treat the problem as class 2 vs rest
p, r, thresholds = precision_recall_curve(y_test[:, 2], y_test_rf[:, 2])
# precision values
p
# recall values
r
# threhsolds examined
thresholds
```
Go ahead and examine the precision and recall for the other classes see how these values change.
```
# now let's do these for all classes and capture the results in
# dictionaries, so we can plot the values afterwards
# determine the Precision and recall
# at various thresholds of probability
# in a 1 vs all fashion, for each class
precision_rf = dict()
recall_rf = dict()
# for each class
for i in range(3):
# determine precision and recall at various thresholds
# in a 1 vs all fashion
precision_rf[i], recall_rf[i], _ = precision_recall_curve(
y_test[:, i], y_test_rf[:, i])
precision_rf
# plot the curves for each class
for i in range(3):
plt.plot(recall_rf[i], precision_rf[i], label='class {}'.format(i))
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve")
plt.show()
# and now for the logistic regression
precision_lg = dict()
recall_lg = dict()
# for each class
for i in range(3):
# determine precision and recall at various thresholds
# in a 1 vs all fashion
precision_lg[i], recall_lg[i], _ = precision_recall_curve(
y_test[:, i], y_test_logit[:, i])
plt.plot(recall_lg[i], precision_lg[i], label='class {}'.format(i))
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve")
plt.show()
# and now, just because it is a bit difficult to compare
# between models, we plot the PR curves class by class,
# but the 2 models in the same plot
# for each class
for i in range(3):
plt.plot(recall_lg[i], precision_lg[i], label='logit class {}'.format(i))
plt.plot(recall_rf[i], precision_rf[i], label='rf class {}'.format(i))
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve for class{}".format(i))
plt.show()
```
We see that the Random Forest does a better job for all classes.
### Micro-average with sklearn
In order to do this, we concatenate all the probability vectors 1 after the other, and so we do with the real values.
```
# probability vectors for all classes in 1-d vector
y_test_rf.ravel()
# see that the unravelled prediction vector has 3 times the size
# of the origina target
len(y_test), len(y_test_rf.ravel())
# A "micro-average": quantifying score on all classes jointly
# for random forests
precision_rf["micro"], recall_rf["micro"], _ = precision_recall_curve(
y_test.ravel(), y_test_rf.ravel(),
)
# for logistic regression
precision_lg["micro"], recall_lg["micro"], _ = precision_recall_curve(
y_test.ravel(), y_test_logit.ravel(),
)
# now we plot them next to each other
i = "micro"
plt.plot(recall_lg[i], precision_lg[i], label='logit micro {}')
plt.plot(recall_rf[i], precision_rf[i], label='rf micro {}')
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve for class{}".format(i))
plt.show()
```
## Yellowbrick
### Per class with Yellobrick
https://www.scikit-yb.org/en/latest/api/classifier/prcurve.html
**Note:**
In the cells below, we are passing to Yellobrick classes a model that is already fit. When we fit() the Yellobrick class, it will check if the model is fit, in which case it will do nothing.
If we pass a model that is not fit, and a multiclass target, Yellowbrick will wrap the model automatically with a 1 vs Rest classifier.
Check Yellobrick's documentation for more details.
```
visualizer = PrecisionRecallCurve(
rf, per_class=True, cmap="cool", micro=False,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
visualizer = PrecisionRecallCurve(
logit, per_class=True, cmap="cool", micro=False,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
```
### Micro yellowbrick
```
visualizer = PrecisionRecallCurve(
rf, cmap="cool", micro=True,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
visualizer = PrecisionRecallCurve(
logit, cmap="cool", micro=True,
)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Finalize and show the figure
```
That's all for PR curves
|
github_jupyter
|
## A motivating example: harmonic oscillator
### created by Yuying Liu, 11/02/2019
```
# imports
import os
import sys
import torch
import numpy as np
import scipy as sp
from scipy import integrate
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
import warnings
warnings.filterwarnings('ignore')
module_path = os.path.abspath(os.path.join('../../src/'))
if module_path not in sys.path:
sys.path.append(module_path)
import VaryingStepRKNN as net
# # # # # # # # # # # # # # # # # #
# global constants, paths, etc. #
# # # # # # # # # # # # # # # # # #
load_data = False
load_model = False
data_dir = '../data/VaryingStep/Linear/'
model_dir = '../../model/VaryingStep/Linear/'
```
### simulation
```
np.random.seed(2)
train_steps = 10000
test_steps = 2**15
n_train = 500
n_test = 50
n_steps = 5
dt = 0.002;
n = 2
A = np.array([[0, -1], [1, 0]])
def harmonic_oscillator_rhs(x):
# rhs of harmonic oscillator
return A.dot(x)
if load_data:
# # # # # # # # # # # # # # # # # # # #
# load simulated data from directory #
# # # # # # # # # # # # # # # # # # # #
train_data = np.load(os.path.join(data_dir, 'trainBig.npy'))
test_data = np.load(os.path.join(data_dir, 'testBig.npy'))
n_train = train_data.shape[0]
n_test = test_data.shape[0]
else:
# # # # # # # # # # # # # # # # #
# simulate harmonic oscillator #
# # # # # # # # # # # # # # # # #
# simulate training trials
train_data = np.zeros((n_train, train_steps, n))
print('generating training trials ...')
for i in tqdm(range(n_train)):
x_init = np.random.uniform(-1, 1, n)
t = np.linspace(0, (train_steps-1)*dt, train_steps)
sol = sp.integrate.solve_ivp(lambda _, x: harmonic_oscillator_rhs(x), [0, (train_steps-1)*dt], x_init, t_eval=t)
train_data[i, :, :] = sol.y.T
# simulate test trials
test_data = np.zeros((n_test, test_steps, n))
print('generating testing trials ...')
for i in tqdm(range(n_test)):
x_init = np.random.uniform(-1, 1, n)
t = np.linspace(0, (test_steps-1)*dt, test_steps)
sol = sp.integrate.solve_ivp(lambda _, x: harmonic_oscillator_rhs(x), [0, (test_steps-1)*dt], x_init, t_eval=t)
test_data[i, :, :] = sol.y.T
# save data
np.save(os.path.join(data_dir, 'trainBig.npy'), train_data)
np.save(os.path.join(data_dir, 'testBig.npy'), test_data)
```
### visualize & load data
```
# load the data to dataset object
datasets = list()
step_sizes = list()
print('Dt\'s: ')
for i in range(2, 10):
step_size = 2**i
print(step_size * dt)
step_sizes.append(step_size)
datasets.append(net.VaryingStepDataSet(train_data, test_data, dt, n_steps=n_steps, step_size=step_size))
# # # # # # # # #
# visualization #
# # # # # # # # #
# visualize time series & samples
t = np.linspace(0, (test_steps-1)*dt, test_steps)
fig = plt.figure(figsize=(12, 8))
gs = gridspec.GridSpec(nrows=3, ncols=2, hspace=0.5)
ax0 = fig.add_subplot(gs[0, :])
ax0.plot(t, test_data[0, :, :], linewidth=2.0)
ax0.set_title('sampled trajectory', fontsize=20)
ax0.tick_params(axis='both', which='major', labelsize=15)
ax1 = fig.add_subplot(gs[1:, 0])
ax2 = fig.add_subplot(gs[1:, 1])
for i in range(n_train):
ax1.scatter(datasets[0].train_ys[i, 0:100, 0], datasets[0].train_ys[i, 0:100, 1], marker='.', s=30)
ax2.scatter(datasets[-1].train_ys[i, 0:100, 0], datasets[-1].train_ys[i, 0:100, 1], marker='.', s=30)
ax1.set_title('high res samples', fontsize=20)
ax1.tick_params(axis='both', which='major', labelsize=15)
ax2.set_title('low res samples', fontsize=20)
ax2.tick_params(axis='both', which='major', labelsize=15)
```
### RKNN with varying timesteps
```
models = list()
max_epoch=10000
if load_model:
# load the model
for step_size in step_sizes:
models.append(torch.load(os.path.join(model_dir, 'model_D{}.pt'.format(step_size)), map_location='cpu'))
# fix model consistencies trained on gpus (optional)
for model in models:
model.device = 'cpu'
model._modules['vector_field']._modules['activation'] = torch.nn.ReLU()
else:
for (step_size, dataset) in zip(step_sizes, datasets):
# set up the network
model = net.VaryingTimeStepper(arch=[2, 20, 40, 40, 20, 2], dt=dt)
# training
print('training model_D{} ...'.format(step_size))
model.train_net(dataset, max_epoch=max_epoch, batch_size=n_train, lr=1e-3,
model_path=os.path.join(model_dir, 'model_D{}.pt'.format(step_size)))
models.append(model)
# uniscale forecast
n_steps = 1000
preds_mse = list()
criterion = torch.nn.MSELoss(reduction='none')
for (model, dataset) in tqdm(zip(models, datasets)):
y_preds, _ = model.forecast(dataset.test_x, n_steps=n_steps)
preds_mse.append(criterion(dataset.test_ys[:, 0:n_steps, :], y_preds).mean(-1))
# visualize forecasting error at each time step
fig = plt.figure(figsize=(20, 6))
t = [dt*step for step in range(n_steps)]
colors=iter(plt.cm.rainbow(np.linspace(0, 1, 8)))
for k in range(len(preds_mse)):
err = preds_mse[k]
mean = err.mean(0).detach().numpy()
rgb = next(colors)
plt.plot(t, np.log(mean), linestyle='-', color=rgb, linewidth=3.0, label='$\Delta\ t$={}'.format(step_sizes[k]*dt))
plt.legend(fontsize=20, loc='upper right')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# multiscale forecast
n_steps = 1000
criterion = torch.nn.MSELoss(reduction='none')
y_preds, _ = net.multiscale_forecast(datasets[0].test_x, n_steps=n_steps, models=models[3:])
multiscale_preds_mse = criterion(datasets[0].test_ys[:, 0:n_steps, :], y_preds).mean(-1)
# visualize multiscale forecasting error at each time step
fig = plt.figure(figsize=(20, 6))
t = [dt*step for step in range(n_steps)]
# visualize forecasting error at each time step
fig = plt.figure(figsize=(20, 6))
t = [dt*step for step in range(n_steps)]
colors=iter(plt.cm.rainbow(np.linspace(0, 1, 8)))
multiscale_err = multiscale_preds_mse.mean(0).detach().numpy()
for k in range(len(preds_mse)):
err = preds_mse[k]
mean = err.mean(0).detach().numpy()
rgb = next(colors)
plt.plot(t, np.log(mean), linestyle='-', color=rgb, linewidth=3.0, alpha=0.5, label='$\Delta\ t$={}'.format(step_sizes[k]*dt))
plt.plot(t, np.log(multiscale_err), linestyle='-', color='k', linewidth=3.0, label='multiscale'.format(step_sizes[0]))
plt.legend(fontsize=20, loc='upper right')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
```
### Table 1
```
diff_scale_mse = torch.zeros(len(models)+1, len(step_sizes))
for i in range(len(models)):
for j in range(len(step_sizes)):
step_size = step_sizes[j]
diff_scale_mse[i, j] = preds_mse[i][:, 0:5*step_size:step_size].mean()
for j in range(len(step_sizes)):
diff_scale_mse[-1, j] = multiscale_preds_mse[:, 0:5*step_size:step_size].mean()
np.set_printoptions(precision=2, suppress=False)
print(diff_scale_mse.detach().numpy())
```
### Flow maps
```
xvalues, yvalues = np.meshgrid(np.arange(-1.0,1.0,0.02), np.arange(-1.0,1.0,0.02))
inits = np.stack([xvalues, yvalues], 2)
t = [0.] + [10 * dt * step_size for step_size in step_sizes]
flow = np.zeros((inits.shape[0], inits.shape[1], len(t), 2))
for i in range(inits.shape[0]):
for j in range(inits.shape[1]):
init = inits[i, j]
sol = sp.integrate.solve_ivp(lambda _, x: harmonic_oscillator_rhs(x), [0, 10*step_sizes[-1]*dt], init, t_eval=t)
flow[i, j, :] = sol.y.T
to_plots = list()
vmin = float('inf')
vmax = float('-inf')
for i in range(1, len(t)):
vmin = min(np.min(flow[:, :, i, 1] - flow[:, :, 0, 1]), vmin)
vmax = max(np.max(flow[:, :, i, 1] - flow[:, :, 0, 1]), vmax)
to_plots.append((flow[:, :, i, 1] - flow[:, :, 0, 1]))
for im in to_plots:
plt.figure(figsize=(10, 8))
plt.imshow(im, extent=[-1,1,-1,1], vmin=vmin, vmax=vmax)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# plt.figure(figsize=(10, 8))
# plt.imshow(im, extent=[-1,1,-1,1], vmin=vmin, vmax=vmax)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# cbar = plt.colorbar()
# cbar.ax.tick_params(labelsize=20)
```
|
github_jupyter
|
Decoding with ANOVA + SVM: face vs house in the Haxby dataset
===============================================================
This example does a simple but efficient decoding on the Haxby dataset:
using a feature selection, followed by an SVM.
```
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
%matplotlib inline
```
Retrieve the files of the Haxby dataset
----------------------------------------
```
from nilearn import datasets
# By default 2nd subject will be fetched
haxby_dataset = datasets.fetch_haxby()
# print basic information on the dataset
print('Mask nifti image (3D) is located at: %s' % haxby_dataset.mask)
print('Functional nifti image (4D) is located at: %s' %
haxby_dataset.func[0])
```
Load the behavioral data
-------------------------
```
import pandas as pd
# Load target information as string and give a numerical identifier to each
behavioral = pd.read_csv(haxby_dataset.session_target[0], sep=" ")
conditions = behavioral['labels']
# Restrict the analysis to faces and places
condition_mask = behavioral['labels'].isin(['face', 'house'])
conditions = conditions[condition_mask]
# Confirm that we now have 2 conditions
print(conditions.unique())
# Record these as an array of sessions, with fields
# for condition (face or house) and run
session = behavioral[condition_mask].to_records(index=False)
print(session.dtype.names)
```
Prepare the fMRI data: smooth and apply the mask
-------------------------------------------------
```
from nilearn.input_data import NiftiMasker
mask_filename = haxby_dataset.mask
# For decoding, standardizing is often very important
# note that we are also smoothing the data
masker = NiftiMasker(mask_img=mask_filename, smoothing_fwhm=4,
standardize=True, memory="nilearn_cache", memory_level=1)
func_filename = haxby_dataset.func[0]
X = masker.fit_transform(func_filename)
# Apply our condition_mask
X = X[condition_mask]
```
Build the decoder
------------------
Define the prediction function to be used.
Here we use a Support Vector Classification, with a linear kernel
```
from sklearn.svm import SVC
svc = SVC(kernel='linear')
# Define the dimension reduction to be used.
# Here we use a classical univariate feature selection based on F-test,
# namely Anova. When doing full-brain analysis, it is better to use
# SelectPercentile, keeping 5% of voxels
# (because it is independent of the resolution of the data).
from sklearn.feature_selection import SelectPercentile, f_classif
feature_selection = SelectPercentile(f_classif, percentile=5)
# We have our classifier (SVC), our feature selection (SelectPercentile),and now,
# we can plug them together in a *pipeline* that performs the two operations
# successively:
from sklearn.pipeline import Pipeline
anova_svc = Pipeline([('anova', feature_selection), ('svc', svc)])
```
Fit the decoder and predict
----------------------------
```
anova_svc.fit(X, conditions)
y_pred = anova_svc.predict(X)
```
Obtain prediction scores via cross validation
-----------------------------------------------
```
from sklearn.model_selection import LeaveOneGroupOut, cross_val_score
# Define the cross-validation scheme used for validation.
# Here we use a LeaveOneGroupOut cross-validation on the session group
# which corresponds to a leave-one-session-out
cv = LeaveOneGroupOut()
# Compute the prediction accuracy for the different folds (i.e. session)
cv_scores = cross_val_score(anova_svc, X, conditions, cv=cv, groups=session)
# Return the corresponding mean prediction accuracy
classification_accuracy = cv_scores.mean()
# Print the results
print("Classification accuracy: %.4f / Chance level: %f" %
(classification_accuracy, 1. / len(conditions.unique())))
# Classification accuracy: 0.70370 / Chance level: 0.5000
```
Visualize the results
----------------------
Look at the SVC's discriminating weights
```
coef = svc.coef_
# reverse feature selection
coef = feature_selection.inverse_transform(coef)
# reverse masking
weight_img = masker.inverse_transform(coef)
# Use the mean image as a background to avoid relying on anatomical data
from nilearn import image
mean_img = image.mean_img(func_filename)
# Create the figure
from nilearn.plotting import plot_stat_map, show
plot_stat_map(weight_img, mean_img, title='SVM weights')
# Saving the results as a Nifti file may also be important
weight_img.to_filename('haxby_face_vs_house.nii')
show()
```
***Exercise:***
What do you see?
|
github_jupyter
|
# Assignment 3: Question Answering
Welcome to this week's assignment of course 4. In this you will explore question answering. You will implement the "Text to Text Transfer from Transformers" (better known as T5). Since you implemented transformers from scratch last week you will now be able to use them.
<img src = "qa.png">
## Outline
- [Overview](#0)
- [Part 0: Importing the Packages](#0)
- [Part 1: C4 Dataset](#1)
- [1.1 Pre-Training Objective](#1.1)
- [1.2 Process C4](#1.2)
- [1.2.1 Decode to natural language](#1.2.1)
- [1.3 Tokenizing and Masking](#1.3)
- [Exercise 01](#ex01)
- [1.4 Creating the Pairs](#1.4)
- [Part 2: Transfomer](#2)
- [2.1 Transformer Encoder](#2.1)
- [2.1.1 The Feedforward Block](#2.1.1)
- [Exercise 02](#ex02)
- [2.1.2 The Encoder Block](#2.1.2)
- [Exercise 03](#ex03)
- [2.1.3 The Transformer Encoder](#2.1.3)
- [Exercise 04](#ex04)
<a name='0'></a>
### Overview
This assignment will be different from the two previous ones. Due to memory and time constraints of this environment you will not be able to train a model and use it for inference. Instead you will create the necessary building blocks for the transformer encoder model and will use a pretrained version of the same model in two ungraded labs after this assignment.
After completing these 3 (1 graded and 2 ungraded) labs you will:
* Implement the code neccesary for Bidirectional Encoder Representation from Transformer (BERT).
* Understand how the C4 dataset is structured.
* Use a pretrained model for inference.
* Understand how the "Text to Text Transfer from Transformers" or T5 model works.
<a name='0'></a>
# Part 0: Importing the Packages
```
import ast
import string
import textwrap
import itertools
import numpy as np
import trax
from trax import layers as tl
from trax.supervised import decoding
# Will come handy later.
wrapper = textwrap.TextWrapper(width=70)
# Set random seed
np.random.seed(42)
```
<a name='1'></a>
## Part 1: C4 Dataset
The [C4](https://www.tensorflow.org/datasets/catalog/c4) is a huge data set. For the purpose of this assignment you will use a few examples out of it which are present in `data.txt`. C4 is based on the [common crawl](https://commoncrawl.org/) project. Feel free to read more on their website.
Run the cell below to see how the examples look like.
```
# load example jsons
example_jsons = list(map(ast.literal_eval, open('data.txt')))
# Printing the examples to see how the data looks like
for i in range(5):
print(f'example number {i+1}: \n\n{example_jsons[i]} \n')
```
Notice the `b` before each string? This means that this data comes as bytes rather than strings. Strings are actually lists of bytes so for the rest of the assignments the name `strings` will be used to describe the data.
To check this run the following cell:
```
type(example_jsons[0].get('text'))
```
<a name='1.1'></a>
### 1.1 Pre-Training Objective
**Note:** The word "mask" will be used throughout this assignment in context of hiding/removing word(s)
You will be implementing the BERT loss as shown in the following image.
<img src = "loss.png" width="600" height = "400">
Assume you have the following text: <span style = "color:blue"> **Thank you <span style = "color:red">for inviting </span> me to your party <span style = "color:red">last</span> week** </span>
Now as input you will mask the words in red in the text:
<span style = "color:blue"> **Input:**</span> Thank you **X** me to your party **Y** week.
<span style = "color:blue">**Output:**</span> The model should predict the words(s) for **X** and **Y**.
**Z** is used to represent the end.
<a name='1.2'></a>
### 1.2 Process C4
C4 only has the plain string `text` field, so you will tokenize and have `inputs` and `targets` out of it for supervised learning. Given your inputs, the goal is to predict the targets during training.
You will now take the `text` and convert it to `inputs` and `targets`.
```
# Grab text field from dictionary
natural_language_texts = [example_json['text'] for example_json in example_jsons]
# First text example
natural_language_texts[4]
```
<a name='1.2.1'></a>
#### 1.2.1 Decode to natural language
The following functions will help you `detokenize` and`tokenize` the text data.
The `sentencepiece` vocabulary was used to convert from text to ids. This vocabulary file is loaded and used in this helper functions.
`natural_language_texts` has the text from the examples we gave you.
Run the cells below to see what is going on.
```
# Special tokens
PAD, EOS, UNK = 0, 1, 2
def detokenize(np_array):
return trax.data.detokenize(
np_array,
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='.')
def tokenize(s):
# The trax.data.tokenize function operates on streams,
# that's why we have to create 1-element stream with iter
# and later retrieve the result with next.
return next(trax.data.tokenize(
iter([s]),
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='.'))
# printing the encoding of each word to see how subwords are tokenized
tokenized_text = [(tokenize(word).tolist(), word) for word in natural_language_texts[0].split()]
print(tokenized_text, '\n')
# We can see that detokenize successfully undoes the tokenization
print(f"tokenized: {tokenize('Beginners')}\ndetokenized: {detokenize(tokenize('Beginners'))}")
```
As you can see above, you were able to take a piece of string and tokenize it.
Now you will create `input` and `target` pairs that will allow you to train your model. T5 uses the ids at the end of the vocab file as sentinels. For example, it will replace:
- `vocab_size - 1` by `<Z>`
- `vocab_size - 2` by `<Y>`
- and so forth.
It assigns every word a `chr`.
The `pretty_decode` function below, which you will use in a bit, helps in handling the type when decoding. Take a look and try to understand what the function is doing.
Notice that:
```python
string.ascii_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
```
**NOTE:** Targets may have more than the 52 sentinels we replace, but this is just to give you an idea of things.
```
vocab_size = trax.data.vocab_size(
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='.')
def get_sentinels(vocab_size=vocab_size, display=False):
sentinels = {}
for i, char in enumerate(reversed(string.ascii_letters), 1):
decoded_text = detokenize([vocab_size - i])
# Sentinels, ex: <Z> - <a>
sentinels[decoded_text] = f'<{char}>'
if display:
print(f'The sentinel is <{char}> and the decoded token is:', decoded_text)
return sentinels
sentinels = get_sentinels(vocab_size, display=True)
def pretty_decode(encoded_str_list, sentinels=sentinels):
# If already a string, just do the replacements.
if isinstance(encoded_str_list, (str, bytes)):
for token, char in sentinels.items():
encoded_str_list = encoded_str_list.replace(token, char)
return encoded_str_list
# We need to decode and then prettyfy it.
return pretty_decode(detokenize(encoded_str_list))
pretty_decode("I want to dress up as an Intellectual this halloween.")
```
The functions above make your `inputs` and `targets` more readable. For example, you might see something like this once you implement the masking function below.
- <span style="color:red"> Input sentence: </span> Younes and Lukasz were working together in the lab yesterday after lunch.
- <span style="color:red">Input: </span> Younes and Lukasz **Z** together in the **Y** yesterday after lunch.
- <span style="color:red">Target: </span> **Z** were working **Y** lab.
<a name='1.3'></a>
### 1.3 Tokenizing and Masking
You will now implement the `tokenize_and_mask` function. This function will allow you to tokenize and mask input words with a noise probability. We usually mask 15% of the words.
<a name='ex01'></a>
### Exercise 01
```
# UNQ_C1
# GRADED FUNCTION: tokenize_and_mask
def tokenize_and_mask(text, vocab_size=vocab_size, noise=0.15,
randomizer=np.random.uniform, tokenize=tokenize):
"""Tokenizes and masks a given input.
Args:
text (str or bytes): Text input.
vocab_size (int, optional): Size of the vocabulary. Defaults to vocab_size.
noise (float, optional): Probability of masking a token. Defaults to 0.15.
randomizer (function, optional): Function that generates random values. Defaults to np.random.uniform.
tokenize (function, optional): Tokenizer function. Defaults to tokenize.
Returns:
tuple: Tuple of lists of integers associated to inputs and targets.
"""
# current sentinel number (starts at 0)
cur_sentinel_num = 0
# inputs
inps = []
# targets
targs = []
### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ###
# prev_no_mask is True if the previous token was NOT masked, False otherwise
# set prev_no_mask to True
prev_no_mask = None
# loop through tokenized `text`
for token in tokenize(text):
# check if the `noise` is greater than a random value (weighted coin flip)
if randomizer() < noise:
# check to see if the previous token was not masked
if prev_no_mask==True: # add new masked token at end_id
# number of masked tokens increases by 1
cur_sentinel_num += None
# compute `end_id` by subtracting current sentinel value out of the total vocabulary size
end_id = None - None
# append `end_id` at the end of the targets
targs.append(None)
# append `end_id` at the end of the inputs
inps.append(None)
# append `token` at the end of the targets
targs.append(None)
# set prev_no_mask accordingly
prev_no_mask = None
else: # don't have two masked tokens in a row
# append `token ` at the end of the inputs
inps.append(None)
# set prev_no_mask accordingly
prev_no_mask = None
### END CODE HERE ###
return inps, targs
# Some logic to mock a np.random value generator
# Needs to be in the same cell for it to always generate same output
def testing_rnd():
def dummy_generator():
vals = np.linspace(0, 1, 10)
cyclic_vals = itertools.cycle(vals)
for _ in range(100):
yield next(cyclic_vals)
dumr = itertools.cycle(dummy_generator())
def dummy_randomizer():
return next(dumr)
return dummy_randomizer
input_str = natural_language_texts[0]
print(f"input string:\n\n{input_str}\n")
inps, targs = tokenize_and_mask(input_str, randomizer=testing_rnd())
print(f"tokenized inputs:\n\n{inps}\n")
print(f"targets:\n\n{targs}")
```
#### **Expected Output:**
```CPP
b'Beginners BBQ Class Taking Place in Missoula!\nDo you want to get better at making delicious BBQ? You will have the opportunity, put this on your calendar now. Thursday, September 22nd join World Class BBQ Champion, Tony Balay from Lonestar Smoke Rangers. He will be teaching a beginner level class for everyone who wants to get better with their culinary skills.\nHe will teach you everything you need to know to compete in a KCBS BBQ competition, including techniques, recipes, timelines, meat selection and trimming, plus smoker and fire information.\nThe cost to be in the class is $35 per person, and for spectators it is free. Included in the cost will be either a t-shirt or apron and you will be tasting samples of each meat that is prepared.'
tokenized inputs:
[31999, 15068, 4501, 3, 12297, 3399, 16, 5964, 7115, 31998, 531, 25, 241, 12, 129, 394, 44, 492, 31997, 58, 148, 56, 43, 8, 1004, 6, 474, 31996, 39, 4793, 230, 5, 2721, 6, 1600, 1630, 31995, 1150, 4501, 15068, 16127, 6, 9137, 2659, 5595, 31994, 782, 3624, 14627, 15, 12612, 277, 5, 216, 31993, 2119, 3, 9, 19529, 593, 853, 21, 921, 31992, 12, 129, 394, 28, 70, 17712, 1098, 5, 31991, 3884, 25, 762, 25, 174, 12, 214, 12, 31990, 3, 9, 3, 23405, 4547, 15068, 2259, 6, 31989, 6, 5459, 6, 13618, 7, 6, 3604, 1801, 31988, 6, 303, 24190, 11, 1472, 251, 5, 37, 31987, 36, 16, 8, 853, 19, 25264, 399, 568, 31986, 21, 21380, 7, 34, 19, 339, 5, 15746, 31985, 8, 583, 56, 36, 893, 3, 9, 3, 31984, 9486, 42, 3, 9, 1409, 29, 11, 25, 31983, 12246, 5977, 13, 284, 3604, 24, 19, 2657, 31982]
targets:
[31999, 12847, 277, 31998, 9, 55, 31997, 3326, 15068, 31996, 48, 30, 31995, 727, 1715, 31994, 45, 301, 31993, 56, 36, 31992, 113, 2746, 31991, 216, 56, 31990, 5978, 16, 31989, 379, 2097, 31988, 11, 27856, 31987, 583, 12, 31986, 6, 11, 31985, 26, 16, 31984, 17, 18, 31983, 56, 36, 31982, 5]
```
You will now use the inputs and the targets from the `tokenize_and_mask` function you implemented above. Take a look at the masked sentence using your `inps` and `targs` from the sentence above.
```
print('Inputs: \n\n', pretty_decode(inps))
print('\nTargets: \n\n', pretty_decode(targs))
```
<a name='1.4'></a>
### 1.4 Creating the Pairs
You will now create pairs using your dataset. You will iterate over your data and create (inp, targ) pairs using the functions that we have given you.
```
# Apply tokenize_and_mask
inputs_targets_pairs = [tokenize_and_mask(text) for text in natural_language_texts]
def display_input_target_pairs(inputs_targets_pairs):
for i, inp_tgt_pair in enumerate(inputs_targets_pairs, 1):
inps, tgts = inp_tgt_pair
inps, tgts = pretty_decode(inps), pretty_decode(tgts)
print(f'[{i}]\n\n'
f'inputs:\n{wrapper.fill(text=inps)}\n\n'
f'targets:\n{wrapper.fill(text=tgts)}\n\n\n\n')
display_input_target_pairs(inputs_targets_pairs)
```
<a name='2'></a>
# Part 2: Transfomer
We now load a Transformer model checkpoint that has been pre-trained using the above C4 dataset and decode from it. This will save you a lot of time rather than have to train your model yourself. Later in this notebook, we will show you how to fine-tune your model.
<img src = "fulltransformer.png" width="300" height="600">
Start by loading in the model. We copy the checkpoint to local dir for speed, otherwise initialization takes a very long time. Last week you implemented the decoder part for the transformer. Now you will implement the encoder part. Concretely you will implement the following.
<img src = "encoder.png" width="300" height="600">
<a name='2.1'></a>
### 2.1 Transformer Encoder
You will now implement the transformer encoder. Concretely you will implement two functions. The first function is `FeedForwardBlock`.
<a name='2.1.1'></a>
#### 2.1.1 The Feedforward Block
The `FeedForwardBlock` function is an important one so you will start by implementing it. To do so, you need to return a list of the following:
- [`tl.LayerNorm()`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.normalization.LayerNorm) = layer normalization.
- [`tl.Dense(d_ff)`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense) = fully connected layer.
- [`activation`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.activation_fns.Relu) = activation relu, tanh, sigmoid etc.
- `dropout_middle` = we gave you this function (don't worry about its implementation).
- [`tl.Dense(d_model)`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense) = fully connected layer with same dimension as the model.
- `dropout_final` = we gave you this function (don't worry about its implementation).
You can always take a look at [trax documentation](https://trax-ml.readthedocs.io/en/latest/) if needed.
**Instructions**: Implement the feedforward part of the transformer. You will be returning a list.
<a name='ex02'></a>
### Exercise 02
```
# UNQ_C2
# GRADED FUNCTION: FeedForwardBlock
def FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes, mode, activation):
"""Returns a list of layers implementing a feed-forward block.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: list of integers, axes to share dropout mask
mode: str: 'train' or 'eval'
activation: the non-linearity in feed-forward layer
Returns:
A list of layers which maps vectors to vectors.
"""
dropout_middle = tl.Dropout(rate=dropout,
shared_axes=dropout_shared_axes,
mode=mode)
dropout_final = tl.Dropout(rate=dropout,
shared_axes=dropout_shared_axes,
mode=mode)
### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ###
ff_block = [
# trax Layer normalization
None,
# trax Dense layer using `d_ff`
None,
# activation() layer - you need to call (use parentheses) this func!
None,
# dropout middle layer
None,
# trax Dense layer using `d_model`
None,
# dropout final layer
None,
]
### END CODE HERE ###
return ff_block
# Print the block layout
feed_forward_example = FeedForwardBlock(d_model=512, d_ff=2048, dropout=0.8, dropout_shared_axes=0, mode = 'train', activation = tl.Relu)
print(feed_forward_example)
```
#### **Expected Output:**
```CPP
[LayerNorm, Dense_2048, Relu, Dropout, Dense_512, Dropout]
```
<a name='2.1.2'></a>
#### 2.1.2 The Encoder Block
The encoder block will use the `FeedForwardBlock`.
You will have to build two residual connections. Inside the first residual connection you will have the `tl.layerNorm()`, `attention`, and `dropout_` layers. The second residual connection will have the `feed_forward`.
You will also need to implement `feed_forward`, `attention` and `dropout_` blocks.
So far you haven't seen the [`tl.Attention()`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.attention.Attention) and [`tl.Residual()`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Residual) layers so you can check the docs by clicking on them.
<a name='ex03'></a>
### Exercise 03
```
# UNQ_C3
# GRADED FUNCTION: EncoderBlock
def EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation, FeedForwardBlock=FeedForwardBlock):
"""
Returns a list of layers that implements a Transformer encoder block.
The input to the layer is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
d_model (int): depth of embedding.
d_ff (int): depth of feed-forward layer.
n_heads (int): number of attention heads.
dropout (float): dropout rate (how much to drop out).
dropout_shared_axes (int): axes on which to share dropout mask.
mode (str): 'train' or 'eval'.
ff_activation (function): the non-linearity in feed-forward layer.
FeedForwardBlock (function): A function that returns the feed forward block.
Returns:
list: A list of layers that maps (activations, mask) to (activations, mask).
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ###
# Attention block
attention = tl.Attention(
# Use dimension of the model
d_feature=None,
# Set it equal to number of attention heads
n_heads=None,
# Set it equal `dropout`
dropout=None,
# Set it equal `mode`
mode=None
)
# Call the function `FeedForwardBlock` (implemented before) and pass in the parameters
feed_forward = FeedForwardBlock(
None,
None,
None,
None,
None,
None
)
# Dropout block
dropout_ = tl.Dropout(
# set it equal to `dropout`
rate=None,
# set it equal to the axes on which to share dropout mask
shared_axes=None,
# set it equal to `mode`
mode=None
)
encoder_block = [
# add `Residual` layer
tl.Residual(
# add norm layer
None,
# add attention
None,
# add dropout
None,
),
# add another `Residual` layer
tl.Residual(
# add feed forward
None,
),
]
### END CODE HERE ###
return encoder_block
# Print the block layout
encoder_example = EncoderBlock(d_model=512, d_ff=2048, n_heads=6, dropout=0.8, dropout_shared_axes=0, mode = 'train', ff_activation=tl.Relu)
print(encoder_example)
```
#### **Expected Output:**
```CPP
[Serial_in2_out2[
Branch_in2_out3[
None
Serial_in2_out2[
LayerNorm
Serial_in2_out2[
Dup_out2
Dup_out2
Serial_in4_out2[
Parallel_in3_out3[
Dense_512
Dense_512
Dense_512
]
PureAttention_in4_out2
Dense_512
]
]
Dropout
]
]
Add_in2
], Serial[
Branch_out2[
None
Serial[
LayerNorm
Dense_2048
Relu
Dropout
Dense_512
Dropout
]
]
Add_in2
]]
```
<a name='2.1.3'></a>
### 2.1.3 The Transformer Encoder
Now that you have implemented the `EncoderBlock`, it is time to build the full encoder. BERT, or Bidirectional Encoder Representations from Transformers is one such encoder.
You will implement its core code in the function below by using the functions you have coded so far.
The model takes in many hyperparameters, such as the `vocab_size`, the number of classes, the dimension of your model, etc. You want to build a generic function that will take in many parameters, so you can use it later. At the end of the day, anyone can just load in an API and call transformer, but we think it is important to make sure you understand how it is built. Let's get started.
**Instructions:** For this encoder you will need a `positional_encoder` first (which is already provided) followed by `n_layers` encoder blocks, which are the same encoder blocks you previously built. Once you store the `n_layers` `EncoderBlock` in a list, you are going to encode a `Serial` layer with the following sublayers:
- [`tl.Branch`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Branch): helps with the branching and has the following sublayers:
- `positional_encoder`.
- [`tl.PaddingMask()`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.attention.PaddingMask): layer that maps integer sequences to padding masks.
- Your list of `EncoderBlock`s
- [`tl.Select([0], n_in=2)`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Select): Copies, reorders, or deletes stack elements according to indices.
- [`tl.LayerNorm()`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.normalization.LayerNorm).
- [`tl.Mean()`](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Mean): Mean along the first axis.
- `tl.Dense()` with n_units set to n_classes.
- `tl.LogSoftmax()`
Please refer to the [trax documentation](https://trax-ml.readthedocs.io/en/latest/) for further information.
<a name='ex04'></a>
### Exercise 04
```
# UNQ_C4
# GRADED FUNCTION: TransformerEncoder
def TransformerEncoder(vocab_size=vocab_size,
n_classes=10,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
dropout=0.1,
dropout_shared_axes=None,
max_len=2048,
mode='train',
ff_activation=tl.Relu,
EncoderBlock=EncoderBlock):
"""
Returns a Transformer encoder model.
The input to the model is a tensor of tokens.
Args:
vocab_size (int): vocab size. Defaults to vocab_size.
n_classes (int): how many classes on output. Defaults to 10.
d_model (int): depth of embedding. Defaults to 512.
d_ff (int): depth of feed-forward layer. Defaults to 2048.
n_layers (int): number of encoder/decoder layers. Defaults to 6.
n_heads (int): number of attention heads. Defaults to 8.
dropout (float): dropout rate (how much to drop out). Defaults to 0.1.
dropout_shared_axes (int): axes on which to share dropout mask. Defaults to None.
max_len (int): maximum symbol length for positional encoding. Defaults to 2048.
mode (str): 'train' or 'eval'. Defaults to 'train'.
ff_activation (function): the non-linearity in feed-forward layer. Defaults to tl.Relu.
EncoderBlock (function): Returns the encoder block. Defaults to EncoderBlock.
Returns:
trax.layers.combinators.Serial: A Transformer model as a layer that maps
from a tensor of tokens to activations over a set of output classes.
"""
positional_encoder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode),
tl.PositionalEncoding(max_len=max_len)
]
### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ###
# Use the function `EncoderBlock` (implemented above) and pass in the parameters over `n_layers`
encoder_blocks = [None for _ in range(None)]
# Assemble and return the model.
return tl.Serial(
# Encode
tl.Branch(
# Use `positional_encoder`
None,
# Use trax padding mask
None,
),
# Use `encoder_blocks`
None,
# Use select layer
None,
# Use trax layer normalization
None,
# Map to output categories.
# Use trax mean. set axis to 1
None,
# Use trax Dense using `n_classes`
None,
# Use trax log softmax
None,
)
### END CODE HERE ###
# Run this cell to see the structure of your model
# Only 1 layer is used to keep the output readable
TransformerEncoder(n_layers=1)
```
#### **Expected Output:**
```CPP
Serial[
Branch_out2[
[Embedding_32000_512, Dropout, PositionalEncoding]
PaddingMask(0)
]
Serial_in2_out2[
Branch_in2_out3[
None
Serial_in2_out2[
LayerNorm
Serial_in2_out2[
Dup_out2
Dup_out2
Serial_in4_out2[
Parallel_in3_out3[
Dense_512
Dense_512
Dense_512
]
PureAttention_in4_out2
Dense_512
]
]
Dropout
]
]
Add_in2
]
Serial[
Branch_out2[
None
Serial[
LayerNorm
Dense_2048
Relu
Dropout
Dense_512
Dropout
]
]
Add_in2
]
Select[0]_in2
LayerNorm
Mean
Dense_10
LogSoftmax
]
```
**NOTE Congratulations! You have completed all of the graded functions of this assignment.** Since the rest of the assignment takes a lot of time and memory to run we are providing some extra ungraded labs for you to see this model in action.
**Keep it up!**
To see this model in action continue to the next 2 ungraded labs. **We strongly recommend you to try the colab versions of them as they will yield a much smoother experience.** The links to the colabs can be found within the ungraded labs or if you already know how to open files within colab here are some shortcuts (if not, head to the ungraded labs which contain some extra instructions):
[BERT Loss Model Colab](https://drive.google.com/file/d/1EHAbMnW6u-GqYWh5r3Z8uLbz4KNpKOAv/view?usp=sharing)
[T5 SQuAD Model Colab](https://drive.google.com/file/d/1c-8KJkTySRGqCx_JjwjvXuRBTNTqEE0N/view?usp=sharing)
|
github_jupyter
|
```
#default_exp dataset_torch
```
# dataset_torch
> Module to load the slates dataset into a Pytorch Dataset and Dataloaders with default train/valid test splits.
```
#export
import torch
import recsys_slates_dataset.data_helper as data_helper
from torch.utils.data import Dataset, DataLoader
import torch
import json
import numpy as np
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level='INFO')
class SequentialDataset(Dataset):
''' A Pytorch Dataset for the FINN Recsys Slates Dataset.
Attributes:
data: [Dict] A dictionary with tensors of the dataset. First dimension in each tensor must be the batch dimension. Requires the keys "click" and "slate". Additional elements can be added.
sample_candidate_items: [int] Number of negative item examples sampled from the item universe for each interaction. If positive, the dataset provide an additional dictionary item "allitem". Often also called uniform candidate sampling. See Eide et. al. 2021 for more information.
'''
def __init__(self, data, sample_candidate_items=0):
self.data = data
self.num_items = self.data['slate'].max()+1
self.sample_candidate_items = sample_candidate_items
self.mask2ind = {'train' : 1, 'valid' : 2, 'test' : 3}
logging.info(
"Loading dataset with slate size={} and number of negative samples={}"
.format(self.data['slate'].size(), self.sample_candidate_items))
# Performs some checks on the dataset to make sure it is valid:
assert "slate" in data.keys(), "Slate tensor is not in dataset. This is required."
assert "click" in data.keys(), "Click tensor is not in dataset. This is required."
assert all([val.size(0)==data['slate'].size(0) for key, val in data.items()]), "Not all data tensors have the same batch dimension"
def __getitem__(self, idx):
batch = {key: val[idx] for key, val in self.data.items()}
if self.sample_candidate_items:
# Sample actions uniformly (3 is the first non-special item)
batch['allitem'] = torch.randint(
size=(batch['click'].size(0), self.sample_candidate_items),
low=3, high=self.num_items, device = batch['click'].device
)
return batch
def __len__(self):
return len(self.data['click'])
#export
def load_dataloaders(data_dir= "dat",
batch_size=1024,
num_workers= 0,
sample_candidate_items=False,
valid_pct= 0.05,
test_pct= 0.05,
t_testsplit= 5,
limit_num_users=None,
seed=0):
"""
Loads pytorch dataloaders to be used in training. If used with standard settings, the train/val/test split is equivalent to Eide et. al. 2021.
Attributes:
data_dir: [str] where download and store data if not already downloaded.
batch_size: [int] Batch size given by dataloaders.
num_workers: [int] How many threads should be used to prepare batches of data.
sample_candidate_items: [int] Number of negative item examples sampled from the item universe for each interaction. If positive, the dataset provide an additional dictionary item "allitem". Often also called uniform candidate sampling. See Eide et. al. 2021 for more information.
valid_pct: [float] Percentage of users allocated to validation dataset.
test_pct: [float] Percentage of users allocated to test dataset.
t_testsplit: [int] For users allocated to validation and test datasets, how many initial interactions should be part of the training dataset.
limit_num_users: [int] For debugging purposes, only return some users.
seed: [int] Seed used to sample users/items.
"""
logging.info("Download data if not in data folder..")
data_helper.download_data_files(data_dir=data_dir)
logging.info('Load data..')
with np.load("{}/data.npz".format(data_dir)) as data_np:
data = {key: torch.tensor(val) for key, val in data_np.items()}
if limit_num_users is not None:
logging.info("Limiting dataset to only return the first {} users.".format(limit_num_users))
data = {key : val[:limit_num_users] for key, val in data.items()}
with open('{}/ind2val.json'.format(data_dir), 'rb') as handle:
# Use string2int object_hook found here: https://stackoverflow.com/a/54112705
ind2val = json.load(
handle,
object_hook=lambda d: {
int(k) if k.lstrip('-').isdigit() else k: v
for k, v in d.items()
}
)
num_users = len(data['click'])
num_validusers = int(num_users * valid_pct)
num_testusers = int(num_users * test_pct)
torch.manual_seed(seed)
perm_user = torch.randperm(num_users)
valid_user_idx = perm_user[:num_validusers]
test_user_idx = perm_user[num_validusers:(num_validusers+num_testusers)]
train_user_idx = perm_user[(num_validusers+num_testusers):]
# Split dictionary into train/valid/test with a phase mask that shows which interactions are in different sets
# (as some users have both train and valid data)
data_train = data
data_train['phase_mask'] = torch.ones_like(data['click']).bool()
data_train['phase_mask'][test_user_idx,t_testsplit:]=False
data_train['phase_mask'][valid_user_idx,t_testsplit:]=False
data_valid = {key: val[valid_user_idx] for key, val in data.items()}
data_valid['phase_mask'] = torch.zeros_like(data_valid['click']).bool()
data_valid['phase_mask'][:,t_testsplit:] = True
data_test = {key: val[test_user_idx] for key, val in data.items()}
data_test['phase_mask'] = torch.zeros_like(data_test['click']).bool()
data_test['phase_mask'][:,t_testsplit:] = True
data_dicts = {
"train" : data_train,
"valid" : data_valid,
"test" : data_test}
datasets = {
phase : SequentialDataset(data, sample_candidate_items)
for phase, data in data_dicts.items()
}
# Build dataloaders for each data subset:
dataloaders = {
phase: DataLoader(ds, batch_size=batch_size, shuffle=(phase=="train"), num_workers=num_workers)
for phase, ds in datasets.items()
}
for key, dl in dataloaders.items():
logging.info(
"In {}: num_users: {}, num_batches: {}".format(key, len(dl.dataset), len(dl))
)
# Load item attributes:
with np.load('{}/itemattr.npz'.format(data_dir), mmap_mode=None) as itemattr_file:
itemattr = {key : val for key, val in itemattr_file.items()}
return ind2val, itemattr, dataloaders
#slow
ind2val, itemattr, dataloaders = load_dataloaders()
```
|
github_jupyter
|
# Collaborative filtering on the MovieLense Dataset
###### This notebook is based on part of Chapter 9 of [BigQuery: The Definitive Guide](https://www.oreilly.com/library/view/google-bigquery-the/9781492044451/ "http://shop.oreilly.com/product/0636920207399.do") by Lakshmanan and Tigani.
### MovieLens dataset
To illustrate recommender systems in action, let’s use the MovieLens dataset. This is a dataset of movie reviews released by GroupLens, a research lab in the Department of Computer Science and Engineering at the University of Minnesota, through funding by the US National Science Foundation.
Download the data and load it as a BigQuery table using:
```
import os
PROJECT = "your-project-here" # REPLACE WITH YOUR PROJECT ID
BUCKET = "your-bucket-here" # REPLACE WITH YOUR BUCKET NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "1.14"
%%bash
rm -r bqml_data
mkdir bqml_data
cd bqml_data
curl -O 'http://files.grouplens.org/datasets/movielens/ml-20m.zip'
unzip ml-20m.zip
bq --location=US mk --dataset \
--description 'Movie Recommendations' \
$PROJECT:movielens
bq --location=US load --source_format=CSV \
--autodetect movielens.ratings ml-20m/ratings.csv
bq --location=US load --source_format=CSV \
--autodetect movielens.movies_raw ml-20m/movies.csv
```
## Exploring the data
Two tables should now be available in <a href="https://console.cloud.google.com/bigquery">BigQuery</a>.
Collaborative filtering provides a way to generate product recommendations for users, or user targeting for products. The starting point is a table, <b>movielens.ratings</b>, with three columns: a user id, an item id, and the rating that the user gave the product. This table can be sparse -- users don’t have to rate all products. Then, based on just the ratings, the technique finds similar users and similar products and determines the rating that a user would give an unseen product. Then, we can recommend the products with the highest predicted ratings to users, or target products at users with the highest predicted ratings.
```
%%bigquery --project $PROJECT
SELECT *
FROM movielens.ratings
LIMIT 10
```
A quick exploratory query yields that the dataset consists of over 138 thousand users, nearly 27 thousand movies, and a little more than 20 million ratings, confirming that the data has been loaded successfully.
```
%%bigquery --project $PROJECT
SELECT
COUNT(DISTINCT userId) numUsers,
COUNT(DISTINCT movieId) numMovies,
COUNT(*) totalRatings
FROM movielens.ratings
```
On examining the first few movies using the query following query, we can see that the genres column is a formatted string:
```
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies_raw
WHERE movieId < 5
```
We can parse the genres into an array and rewrite the table as follows:
```
%%bigquery --project $PROJECT
CREATE OR REPLACE TABLE movielens.movies AS
SELECT * REPLACE(SPLIT(genres, "|") AS genres)
FROM movielens.movies_raw
%%bigquery --project $PROJECT
SELECT *
FROM movielens.movies
WHERE movieId < 5
```
## Matrix factorization
Matrix factorization is a collaborative filtering technique that relies on factorizing the ratings matrix into two vectors called the user factors and the item factors. The user factors is a low-dimensional representation of a user_id and the item factors similarly represents an item_id.
We can create the recommender model using (<b>Optional</b>, takes 30 minutes. Note: we have a model we already trained if you want to skip this step):
```
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId', rating_col='rating')
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender`)
```
Note that we create a model as usual, except that the model_type is matrix_factorization and that we have to identify which columns play what roles in the collaborative filtering setup.
What did you get? Our model took an hour to train, and the training loss starts out extremely bad and gets driven down to near-zero over next the four iterations:
<table>
<tr>
<th>Iteration</th>
<th>Training Data Loss</th>
<th>Evaluation Data Loss</th>
<th>Duration (seconds)</th>
</tr>
<tr>
<td>4</td>
<td>0.5734</td>
<td>172.4057</td>
<td>180.99</td>
</tr>
<tr>
<td>3</td>
<td>0.5826</td>
<td>187.2103</td>
<td>1,040.06</td>
</tr>
<tr>
<td>2</td>
<td>0.6531</td>
<td>4,758.2944</td>
<td>219.46</td>
</tr>
<tr>
<td>1</td>
<td>1.9776</td>
<td>6,297.2573</td>
<td>1,093.76</td>
</tr>
<tr>
<td>0</td>
<td>63,287,833,220.5795</td>
<td>168,995,333.0464</td>
<td>1,091.21</td>
</tr>
</table>
However, the evaluation data loss is quite high, and much higher than the training data loss. This indicates that overfitting is happening, and so we need to add some regularization. Let’s do that next. Note the added l2_reg=0.2 (<b>Optional</b>, takes 30 minutes):
```
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender_l2
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId',
rating_col='rating', l2_reg=0.2)
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender_l2`)
```
Now, we get faster convergence (three iterations instead of five), and a lot less overfitting. Here are our results:
<table>
<tr>
<th>Iteration</th>
<th>Training Data Loss</th>
<th>Evaluation Data Loss</th>
<th>Duration (seconds)</th>
</tr>
<tr>
<td>2</td>
<td>0.6509</td>
<td>1.4596</td>
<td>198.17</td>
</tr>
<tr>
<td>1</td>
<td>1.9829</td>
<td>33,814.3017</td>
<td>1,066.06</td>
</tr>
<tr>
<td>0</td>
<td>481,434,346,060.7928</td>
<td>2,156,993,687.7928</td>
<td>1,024.59</td>
</tr>
</table>
By default, BigQuery sets the number of factors to be the log2 of the number of rows. In our case, since we have 20 million rows in the table, the number of factors would have been chosen to be 24. As with the number of clusters in K-Means clustering, this is a reasonable default but it is often worth experimenting with a number about 50% higher (36) and a number that is about a third lower (16):
```
%%bigquery --project $PROJECT
CREATE OR REPLACE MODEL movielens.recommender_16
options(model_type='matrix_factorization',
user_col='userId', item_col='movieId',
rating_col='rating', l2_reg=0.2, num_factors=16)
AS
SELECT
userId, movieId, rating
FROM movielens.ratings
%%bigquery --project $PROJECT
SELECT *
-- Note: remove cloud-training-demos if you are using your own model:
FROM ML.TRAINING_INFO(MODEL `cloud-training-demos.movielens.recommender_16`)
```
When we did that, we discovered that the evaluation loss was lower (0.97) with num_factors=16 than with num_factors=36 (1.67) or num_factors=24 (1.45). We could continue experimenting, but we are likely to see diminishing returns with further experimentation. So, let’s pick this as the final matrix factorization model and move on.
## Making recommendations
With the trained model, we can now provide recommendations. For example, let’s find the best comedy movies to recommend to the user whose userId is 903. In the query below, we are calling ML.PREDICT passing in the trained recommendation model and providing a set of movieId and userId to carry out the predictions on. In this case, it’s just one userId (903), but all movies whose genre includes Comedy.
```
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g
WHERE g = 'Comedy'
))
ORDER BY predicted_rating DESC
LIMIT 5
```
## Filtering out already rated movies
Of course, this includes movies the user has already seen and rated in the past. Let’s remove them:
```
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
WITH seen AS (
SELECT ARRAY_AGG(movieId) AS movies
FROM movielens.ratings
WHERE userId = 903
)
SELECT
movieId, title, 903 AS userId
FROM movielens.movies, UNNEST(genres) g, seen
WHERE g = 'Comedy' AND movieId NOT IN UNNEST(seen.movies)
))
ORDER BY predicted_rating DESC
LIMIT 5
```
For this user, this happens to yield the same set of movies -- the top predicted ratings didn’t include any of the movies the user has already seen.
## Customer targeting
In the previous section, we looked at how to identify the top-rated movies for a specific user. Sometimes, we have a product and have to find the customers who are likely to appreciate it. Suppose, for example, we wish to get more reviews for movieId=96481 which has only one rating and we wish to send coupons to the 5 users who are likely to rate it the highest. We can identify those users using:
```
%%bigquery --project $PROJECT
SELECT * FROM
ML.PREDICT(MODEL `cloud-training-demos.movielens.recommender_16`, (
WITH allUsers AS (
SELECT DISTINCT userId
FROM movielens.ratings
)
SELECT
96481 AS movieId,
(SELECT title FROM movielens.movies WHERE movieId=96481) title,
userId
FROM
allUsers
))
ORDER BY predicted_rating DESC
LIMIT 5
```
### Batch predictions for all users and movies
What if we wish to carry out predictions for every user and movie combination? Instead of having to pull distinct users and movies as in the previous query, a convenience function is provided to carry out batch predictions for all movieId and userId encountered during training. A limit is applied here, otherwise, all user-movie predictions will be returned and will crash the notebook.
```
%%bigquery --project $PROJECT
SELECT *
FROM ML.RECOMMEND(MODEL `cloud-training-demos.movielens.recommender_16`)
LIMIT 10
```
As seen in a section above, it is possible to filter out movies the user has already seen and rated in the past. The reason already seen movies aren’t filtered out by default is that there are situations (think of restaurant recommendations, for example) where it is perfectly expected that we would need to recommend restaurants the user has liked in the past.
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
```
import neuroglancer
# Use this in IPython to allow external viewing
# neuroglancer.set_server_bind_address(bind_address='192.168.158.128',
# bind_port=80)
from nuggt.utils import ngutils
viewer = neuroglancer.Viewer()
viewer
import numpy as np
import zarr
import os
# working_dir = '/media/jswaney/Drive/Justin/coregistration/whole_brain_tde'
working_dir = '/home/jswaney/coregistration'
fixed_path = 'fixed/zarr_stack/8_8_8'
moving_path = 'moving/zarr_stack/8_8_8'
fixed_store = zarr.NestedDirectoryStore(os.path.join(working_dir, fixed_path))
moving_store = zarr.NestedDirectoryStore(os.path.join(working_dir, moving_path))
fixed_img = zarr.open(fixed_store, mode='r')
moving_img = zarr.open(moving_store, mode='r')
print(fixed_img.shape)
print(moving_img.shape)
normalization = 2000
def plot_image(img, viewer, layer, shader):
with viewer.txn() as txn:
source = neuroglancer.LocalVolume(img.astype(np.float32))
txn.layers[layer] = neuroglancer.ImageLayer(source=source, shader=shader)
def plot_fixed(fixed_img, viewer):
fixed_shader = ngutils.red_shader % (1 / normalization)
plot_image(fixed_img, viewer, 'fixed', fixed_shader)
def plot_moving(moving_img, viewer):
moving_shader = ngutils.green_shader % (1 / normalization)
plot_image(moving_img, viewer, 'moving', moving_shader)
def plot_both(fixed_img, moving_img, viewer):
plot_fixed(fixed_img, viewer)
plot_moving(moving_img, viewer)
plot_both(fixed_img, moving_img, viewer)
```
# Downsampling Zarr
```
from phathom.io.zarr import downsample_zarr
factors = (8, 8, 8)
output_path = os.path.join(working_dir, 'fixed/zarr_down8')
nb_workers = 1
downsample_zarr(fixed_img, factors, output_path, nb_workers)
factors = (8, 8, 8)
output_path = os.path.join(working_dir, 'moving/zarr_down8')
nb_workers = 1
downsample_zarr(moving_img, factors, output_path, nb_workers)
```
# Downsampling ndarray
```
from skimage.measure import block_reduce
factors = (16, 16, 16)
def downsample_mean(img, factors):
return block_reduce(img, factors, np.mean, 0)
def downsample_max(img, factors):
return block_reduce(img, factors, np.max, 0)
fixed_downsample = downsample_mean(fixed_img, factors)
moving_downsample = downsample_mean(moving_img, factors)
plot_both(fixed_downsample, moving_downsample, viewer)
```
# Gaussian smoothing
```
from skimage.filters import gaussian
sigma = 1.0
fixed_smooth = gaussian(fixed_downsample, sigma, preserve_range=True)
moving_smooth = gaussian(moving_downsample, sigma, preserve_range=True)
plot_both(fixed_smooth, moving_smooth, viewer)
```
# Destriping
```
import pystripe
import multiprocessing
import tqdm
bandwidth = [64, 64]
wavelet = 'db4'
def _filter_streaks(img):
return pystripe.filter_streaks(img, sigma=bandwidth, wavelet=wavelet)
with multiprocessing.Pool(12) as pool:
rf = list(tqdm.tqdm(pool.imap(_filter_streaks, fixed_smooth), total=len(fixed_smooth)))
rm = list(tqdm.tqdm(pool.imap(_filter_streaks, moving_smooth), total=len(moving_smooth)))
fixed_destripe = np.array(rf).T
moving_destripe = np.array(rm).T
with multiprocessing.Pool(12) as pool:
rf = list(tqdm.tqdm(pool.imap(_filter_streaks, fixed_destripe), total=len(fixed_smooth)))
rm = list(tqdm.tqdm(pool.imap(_filter_streaks, moving_destripe), total=len(moving_smooth)))
fixed_destripe = np.array(rf).T
moving_destripe = np.array(rm).T
plot_both(fixed_destripe, moving_destripe, viewer)
```
# Rigid transformation
```
from phathom.registration import coarse, pcloud
from phathom import utils
from scipy.ndimage import map_coordinates
t = np.array([0, 0, 0])
thetas = np.array([np.pi/4, 0, 0])
def rigid_warp(img, t, thetas, center, output_shape):
r = pcloud.rotation_matrix(thetas)
idx = np.indices(output_shape)
pts = np.reshape(idx, (idx.shape[0], idx.size//idx.shape[0])).T
warped_pts = coarse.rigid_transformation(t, r, pts, center)
interp_values = map_coordinates(img, warped_pts.T)
transformed = np.reshape(interp_values, output_shape)
return transformed
transformed = rigid_warp(fixed_downsample,
t,
thetas,
np.zeros(3),
moving_downsample.shape)
plot_fixed(transformed, viewer)
from scipy.ndimage.measurements import center_of_mass
def center_mass(img):
return np.asarray(center_of_mass(img))
fixed_com = center_mass(fixed_downsample)
moving_com = center_mass(moving_downsample)
print(fixed_com)
print(moving_com)
```
# Optimization
```
def ncc(fixed, transformed, nonzero=False):
if nonzero:
idx = np.where(transformed)
a = fixed[idx]
b = transformed[idx]
else:
a = fixed
b = transformed
return np.sum((a-a.mean())*(b-b.mean())/((a.size-1)*a.std()*b.std()))
def ssd(fixed, transformed):
return np.mean((fixed-transformed)**2)
def registration_objective(x, fixed_img, moving_img, t):
transformed_img = rigid_warp(moving_img,
t=t,
thetas=x,
center=fixed_com,
output_shape=moving_img.shape)
return ssd(moving_img, transformed_img)
def callback(x, f, accept):
pass
from scipy.optimize import basinhopping
niter = 4
t_star = moving_com-fixed_com
bounds = [(0, np.pi/2) for _ in range(3)]
res = basinhopping(registration_objective,
x0=np.zeros(3),
niter=niter,
T=1.0,
stepsize=1.0,
interval=5,
minimizer_kwargs={
'method': 'L-BFGS-B',
# 'method': 'Nelder-Mead',
'args': (fixed_smooth,
moving_smooth,
t_star),
'bounds': bounds,
'tol': 0.01,
'options': {'disp': False}
},
disp=True)
theta_star = res.x
print(res)
registered = rigid_warp(fixed_smooth, t_star, theta_star, fixed_com, moving_destripe.shape)
plot_fixed(registered, viewer)
```
# Contour
```
import matplotlib.pyplot as plt
plt.hist(fixed_downsample.ravel(), bins=100)
plt.xlim([0, 1000])
plt.ylim([0, 100000])
plt.show()
plt.hist(moving_downsample.ravel(), bins=100)
plt.xlim([0, 1000])
plt.ylim([0, 100000])
plt.show()
threshold = 150
fixed_mask = fixed_downsample > threshold
moving_mask = moving_downsample > threshold
plot_both(1000*fixed_mask, 1000*moving_mask, viewer)
```
# Convex hull
```
from skimage.morphology import convex_hull_image
import tqdm
fixed_hull = np.zeros_like(fixed_mask)
for i, f in enumerate(tqdm.tqdm(fixed_mask)):
if not np.all(f == 0):
fixed_hull[i] = convex_hull_image(f)
moving_hull = np.zeros_like(moving_mask)
for i, m in enumerate(tqdm.tqdm(moving_mask)):
if not np.all(m == 0):
moving_hull[i] = convex_hull_image(m)
plot_both(1000*fixed_hull, 1000*moving_hull, viewer)
from scipy.ndimage.morphology import distance_transform_edt
fixed_distance = distance_transform_edt(fixed_mask)
moving_distance = distance_transform_edt(moving_mask)
plot_both(100*fixed_distance, 100*moving_distance, viewer)
niter = 3
from scipy.optimize import basinhopping
fixed_com = center_mass(fixed_mask)
moving_com = center_mass(moving_mask)
t0 = moving_com-fixed_com
bounds = [(-s, s) for s in moving_distance.shape]+[(-np.pi, np.pi) for _ in range(3)]
# bounds = [(-np.pi, np.pi) for _ in range(3)]
def absolute_difference(img1, img2):
return np.mean(np.abs(img1-img2))
def registration_objective(x, fixed_img, moving_img):
transformed_img = rigid_warp(moving_img,
t= x[:3],
thetas= x[3:],
center=fixed_com,
output_shape=fixed_img.shape)
return absolute_difference(fixed_img, transformed_img)
# return ssd(fixed_img, transformed_img)
def callback(x, f, accept):
print(x)
res = basinhopping(registration_objective,
x0=np.concatenate((t0, np.zeros(3))),
niter=niter,
T=0.5,
stepsize=0.5,
interval=5,
minimizer_kwargs={
'method': 'L-BFGS-B',
'args': (fixed_distance,
moving_distance),
'bounds': bounds,
'tol': 0.001,
'options': {'disp': False}
},
callback=callback,
disp=True)
t_star = res.x[:3]
theta_star = res.x[3:]
print(res)
reg_distance = rigid_warp(moving_distance,
t_star,
theta_star,
fixed_com,
moving_distance.shape)
plot_moving(100*reg_distance, viewer)
```
Sum of squared differences seems to provide slightly better registration than Normalized cross-correlation in the case of distance transformed convex hulls. This might be because NCC is indifferent to intensity difference and only considers correlations in the intensities, whereas SSD will penalize for any difference in intensity. In a multi-modal setting, this is usually not desired, but since we are dealing with the same brain in both images, the overall shape (and therefore distance transforms) should take similar values (not just correlated).
Also, it was necessary to include the translation component in the optimization procedure because our center of mass estimate for the center of rotation is not accurate. This causes the optimization for our rigid transformation to be partially constrained to inaccurate values, making it hard to converge to a rotation
# Coarse Registration
```
registered = rigid_warp(moving_downsample,
t_star,
theta_star,
fixed_com,
moving_downsample.shape)
plot_both(fixed_downsample, registered, viewer)
```
We need to convert the downsampled transformation into an approprate transformation for the original resolution image. The rotation matrix is scale invariant, but we need to make sure the center of rotation and translation are upsampled by the same amount that we downsampled
```
print('Converged rigid transformation for downsampled image')
print('Rotation (deg):', theta_star*180/np.pi)
print('Center (px):', fixed_com)
print('Translation (px):', t_star)
fixed_fullres_path = os.path.join(working_dir, 'fixed/zarr_stack/8_8_8')
fixed_fullres_store = zarr.NestedDirectoryStore(fixed_fullres_path)
fixed_fullres = zarr.open(fixed_fullres_store, mode='r')
theta = theta_star
true_factors = np.array(fixed_fullres.shape) / np.array(fixed_downsample.shape)
t, center = coarse._scale_rigid_params(t_star,
fixed_com,
true_factors)
print('Converged rigid transformation for original image')
print('Rotation (deg):', theta*180/np.pi)
print('Center (px):', center)
print('Translation (px):', t)
plot_both(fixed_img, moving_img, viewer)
registered = rigid_warp(moving_img,
t,
theta,
center,
fixed_img.shape)
plot_moving(registered, viewer)
np.save('rigid_8_8_8.npy', registered)
```
# Save the transformation
```
from phathom.utils import pickle_save
transformation_dict = {'t': t,
'center': center,
'theta': theta}
pickle_save(os.path.join(working_dir, 'rigid_transformation.pkl'),
transformation_dict)
from phathom.utils import pickle_load
transformation_dict = pickle_load(os.path.join(working_dir, 'rigid_transformation.pkl'))
transformation_dict
```
|
github_jupyter
|
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import warnings
warnings.filterwarnings('ignore')
df1 = pd.read_csv('monday.csv', sep = ";")
df2 = pd.read_csv('tuesday.csv', sep = ";")
df3 = pd.read_csv('wednesday.csv', sep = ";")
df4 = pd.read_csv('thursday.csv', sep = ";")
df5 = pd.read_csv('friday.csv', sep = ";")
df = pd.concat([df1, df2, df3, df4, df5], ignore_index=True)
df_unique = pd.read_csv('df_unique.csv', index_col=None)
df_unique.drop('Unnamed: 0', axis=1, inplace=True)
# 5. Simulate a single customer
# 5.1 create transition probability matrix
# create table with transition for each timestamp
# two columns (from-->to)
# concat/append this yourney for each customer
# count individual transitions from each location
# (from fruit -> drinks (87 people in total)
# (from fruit -> spices (45 people in total) ...
# those are the probabilities for each location)
df_journey = df_unique.groupby(['customer_unique', 'location', 'date']).count().sort_values(by = ['customer_unique', 'date'])
entry_point = df_journey.reset_index(1).index.get_level_values(level=0).unique()
entry_list= []
for i in entry_point:
entry_list.append(df_journey.reset_index(1).xs(i)['location'][0])
df_journey.reset_index(inplace=True)
# count entry location
import collections
entry_dict = collections.Counter(entry_list)
entry_p = []
for i in list(entry_dict.values()):
entry_p.append(i/(sum(entry_dict.values())))
states_ = ['fruit', 'dairy', 'spices', 'drinks']
entry_dic = zip(states_,entry_p)
list(entry_dic)
sum(entry_p)
df_trans = df_journey[['location']]
df_trans['_to'] = df_journey[['location']].shift(periods=-1, fill_value='checkout')
indexlist = df_trans[df_trans['location'] == 'checkout'].index
df_trans.drop(indexlist, inplace=True)
df_trans['transition'] = df_trans['location'] + "_" + df_trans['_to']
df_trans
labels, trans = pd.factorize(df_trans['transition'])
df_trans['trans_fac'] = labels
len(labels)
trans_dict = df_trans['transition'].value_counts(ascending=True)
values = [value for (key, value) in sorted(trans_dict.items())]
keys = [key for (key, value) in sorted(trans_dict.items())]
sorted_trans_dict = dict(zip(keys, values))
sorted_trans_dict
df_unique.head()
trans_matrix = pd.DataFrame.from_dict({'entrance': [0,0,0,0,0,0],
'fruit': [0,0,0,0,0,0],
'spices': [0,0,0,0,0,0],
'dairy': [0,0,0,0,0,0],
'drinks': [0,0,0,0,0,0],
'checkout': [0,0,0,0,0,0]})
trans_matrix.set_index([pd.Index(['entrance', 'fruit', 'spices', 'dairy', 'drinks', 'checkout'])], inplace=True)
trans_matrix
for f in sorted_trans_dict.items():
origin = str(f[0].split('_')[0])
dest = str(f[0].split('_')[1])
count_ = int(f[1])
trans_matrix.loc[origin][dest] = count_
row_sum = trans_matrix.sum(axis=1)
prob_matrix = trans_matrix.T/row_sum
prob_matrix.loc['checkout']['checkout']=1
#prob_matrix.loc['entrance']['entrance']=1
prob_matrix
prob_matrix.fillna(0, inplace=True)
prob_dict = dict(prob_matrix)
# check:
prob_matrix.T.loc['spices'].sum()
prob_matrix
# add 0 probability for going to checkout from entrance
entry_p.append(0)
entry_p.insert(0,0)
entry_p
# adding entrance probability
prob_matrix['entrance'] = entry_p
```
```
prob_matrix
# Average number of steps during journey
# counting every stage (including ideling customers)
(df_journey.shape[0]-1)/df_journey['customer_unique'].nunique()
df_journey['customer_unique'].nunique()
len(set(df_journey['customer_unique'].values))
# counting every different stage
df.shape[0]/df_journey['customer_unique'].nunique()
# calculate probabilities at entrance
# by counting first stage of all unique customers
# cal. percentage value
# use this number in markov.next_state('XXX') function
x = prob_dict.keys()
list(x)[5]
```
_______________________________
```
class Customer(object):
def __init__(self, transition_prob):
"""
Initialize the MarkovChain instance.
Parameters
----------
transition_prob: dict
A dict object representing the transition
probabilities in Markov Chain.
Should be of the form:
{'state1': {'state1': 0.1, 'state2': 0.4},
'state2': {...}}
"""
self.transition_prob = transition_prob
self.states = list(transition_prob.keys())
self.first_state = 'entrance'
def next_state(self, current_state):
return np.random.choice(
self.states,
p=[self.transition_prob[current_state][next_state]
for next_state in self.states])
def generate_states(self, current_state='entrance', no=50):
future_states = []
for i in range(no):
next_state = self.next_state(current_state)
future_states.append(next_state)
current_state = next_state
if future_states[-1] == self.states[-1]:
break
return future_states
```
```
markow = Customer(prob_dict)
markow.generate_states()
markov.transition_prob
```
|
github_jupyter
|
# Example 1: How to Generate Synthetic Data (MarginalSynthesizer)
In this notebook we show you how to create a simple synthetic dataset.
# Environment
## Library Imports
```
import numpy as np
import pandas as pd
from pathlib import Path
import os
import sys
```
## Jupyter-specific Imports and Settings
```
# set printing options
np.set_printoptions(threshold=sys.maxsize)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
# Display all cell outputs
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
from IPython import get_ipython
ipython = get_ipython()
# autoreload extension
if 'autoreload' not in ipython.extension_manager.loaded:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from importlib import reload
```
## Import Synthesizer
For this example we use the MarginalSynthesizer algorithm. As the name suggests, this algorithm generates data via the marginal distributions of each column in the input dataset. In other words, the output synthetic data will have similar counts for each column but the statistical patterns between columns are likely not preserved. While this method is rather naive, it will work with data of any shape or size - and run relatively quickly as well.
```
from synthesis.synthesizers import MarginalSynthesizer
```
# Synthetic Data Generation
Let's load a dataset to see how the generation process works.
In this case, we will use the adult dataset - source: https://archive.ics.uci.edu/ml/datasets/adult
```
df_original = pd.read_csv('../data/original/adult.csv')
df_original.head()
```
We will now import our synthesizer and fit it on the input data.
Additionally we can specify the 'epsilon' value, which according to the definition of differential privacy is used to quantify the privacy risk posed by releasing statistics computed on sensitive data. More on that here: https://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf
In short, a lower value of epsilon will result in more randomness and v.v.
```
epsilon = 1 # set to float(np.inf) if you'd like to compute without differential privacy.
synthesizer = MarginalSynthesizer(epsilon=epsilon)
synthesizer.fit(df_original)
```
After our synthesizer has fitted the structure of the original data source, we can now use it to generate a new dataset.
```
# we can specify the number of records by sample(n_records=...),
# default it generates the same number of records as the input data
df_synthetic = synthesizer.sample()
```
We now obtained a new dataset which looks very similar to the original one.
```
df_synthetic.head()
```
# Evaluation
We can see that the synthetic data has a similar structure the original. We can also evaluate whether it has retained the statistical distributions of the original data. We use the SyntheticDataEvaluator class to compare the synthetic data to the original by applying various metrics.
Note: for more elaborate evaluation techniques we refer to the example notebook on 'evaluating synthetic data'.
```
from synthesis.evaluation import SyntheticDataEvaluator
evaluator = SyntheticDataEvaluator()
evaluator.fit(df_original, df_synthetic)
evaluator.score()
evaluator.plot()
```
Observe that the marginal distributions are preserved quite well - especially for columns with low dimensionality. When using differentialy private algorithms (like MarginalSynthesizer) it is advised to reduce the dimensionality of the original data by generalizing columns.
Also observe that the last plot shows the synthetic data did not capture any of the correlations in the original data. This is expected as MarginalSynthesizer synthesizes data by columns independently.
# Conclusion
We hope that gave you a quick introduction on synthetic data generation. Now go try it on your own data!
In the next example notebook we show how a more sophisticated algorithm is able to preserve statistical patterns between columns in the original data.
|
github_jupyter
|
# Simplifying Codebases
Param's just a Python library, and so anything you can do with Param you can do "manually". So, why use Param?
The most immediate benefit to using Param is that it allows you to greatly simplify your codebases, making them much more clear, readable, and maintainable, while simultaneously providing robust handling against error conditions.
Param does this by letting a programmer explicitly declare the types and values of parameters accepted by the code. Param then ensures that only suitable values of those parameters ever make it through to the underlying code, removing the need to handle any of those conditions explicitly.
To see how this works, let's create a Python class with some attributes without using Param:
```
class OrdinaryClass(object):
def __init__(self, a=2, b=3, title="sum"):
self.a = a
self.b = b
self.title = title
def __call__(self):
return self.title + ": " + str(self.a + self.b)
```
As this is just standard Python, we can of course instantiate this class, modify its variables, and call it:
```
o1 = OrdinaryClass(b=4, title="Sum")
o1.a=4
o1()
```
The same code written using Param would look like:
```
import param
class ParamClass(param.Parameterized):
a = param.Integer(2, bounds=(0,1000), doc="First addend")
b = param.Integer(3, bounds=(0,1000), doc="Second addend")
title = param.String(default="sum", doc="Title for the result")
def __call__(self):
return self.title + ": " + str(self.a + self.b)
o2 = ParamClass(b=4, title="Sum")
o2()
```
As you can see, the Parameters here are used precisely like normal attributes once they are defined, so the code for `__call__` and for invoking the constructor are the same in both cases. It's thus generally quite straightforward to migrate an existing class into Param. So, why do that?
Well, with fewer lines of code than the ordinary class, you've now unlocked a whole wealth of features and better behavior! For instance, what happens if a user tries to supply some inappropriate data? With Param, such errors will be caught immediately:
```
with param.exceptions_summarized():
o3 = ParamClass()
o3.b = -5
```
Of course, you could always add more code to an ordinary Python class to check for errors like that, but it quickly gets unwieldy:
```
class OrdinaryClass2(object):
def __init__(self, a=2, b=3, title="sum"):
if type(a) is not int:
raise ValueError("'a' must be an integer")
if type(b) is not int:
raise ValueError("'b' must be an integer")
if a<0:
raise ValueError("'a' must be at least `0`")
if b<0:
raise ValueError("'b' must be at least `0`")
if type(title) is not str:
raise ValueError("'title' must be a string")
self.a = a
self.b = b
self.title = title
def __call__(self):
return self.title + ": " + str(self.a + self.b)
with param.exceptions_summarized():
OrdinaryClass2(a="f")
```
Unfortunately, catching errors in the constructor like that won't help if someone modifies the attribute directly, which won't be detected as an error:
```
o4 = OrdinaryClass2()
o4.a = "four"
```
Python will happily accept this incorrect value and will continue processing. It may only be much later, in a very different part of your code, that you see a mysterious error message that's then very difficult to relate back to the actual problem you need to fix:
```
with param.exceptions_summarized():
o4()
```
Here there's no problem with the code in the cell above; `o4()` is fully valid Python; the real problem is in the preceding cell, which could have been in a completely different file or library. The error message is also obscure and confusing at this level, because the user of `o4` may have no idea why strings and integers are getting concatenated.
To get a better error message, you _could_ move those checks into the `__call__` method, which would make sure that errors are always eventually detected:
```
class OrdinaryClass3(object):
def __init__(self, a=2, b=3, title="sum"):
self.a = a
self.b = b
self.title = title
def __call__(self):
if type(self.a) is not int:
raise ValueError("'a' must be an integer")
if type(self.b) is not int:
raise ValueError("'b' must be an integer")
if self.a<0:
raise ValueError("'a' must be at least `0`")
if self.b<0:
raise ValueError("'b' must be at least `0`")
if type(self.title) is not str:
raise ValueError("'title' must be a string")
return self.title + ": " + str(self.a + self.b)
o5 = OrdinaryClass3()
o5.a = "four"
with param.exceptions_summarized():
o5()
```
But you'd now have to check for errors in _every_ _single_ _method_ that might use those parameters. Worse, you still only detect the problem very late, far from where it was first introduced. Any distance between the error and the error report makes it much more difficult to address, as the user then has to track down where in the code `a` might have gotten set to a non-integer.
With Param you can catch such problems at their start, as soon as an incorrect value is provided, when it is still simple to detect and correct it. To get those same features in hand-written Python code, you would need to provide explicit getters and setters, which is made easier with Python properties and decorators, but is still quite unwieldy:
```
class OrdinaryClass4(object):
def __init__(self, a=2, b=3, title="sum"):
self.a = a
self.b = b
self.title = title
@property
def a(self): return self.__a
@a.setter
def a(self, a):
if type(a) is not int:
raise ValueError("'a' must be an integer")
if a < 0:
raise ValueError("'a' must be at least `0`")
self.__a = a
@property
def b(self): return self.__b
@b.setter
def b(self, b):
if type(b) is not int:
raise ValueError("'a' must be an integer")
if b < 0:
raise ValueError("'a' must be at least `0`")
self.__b = b
@property
def title(self): return self.__title
def title(self, b):
if type(title) is not string:
raise ValueError("'title' must be a string")
self.__title = title
def __call__(self):
return self.title + ": " + str(self.a + self.b)
o5=OrdinaryClass4()
o5()
with param.exceptions_summarized():
o5=OrdinaryClass4()
o5.b=-6
```
Note that this code has an easily overlooked mistake in it, reporting `a` rather than `b` as the problem. This sort of error is extremely common in copy-pasted validation code of this type, because tests rarely exercise all of the error conditions involved.
As you can see, even getting close to the automatic validation already provided by Param requires 8 methods and >30 highly repetitive lines of code, even when using relatively esoteric Python features like properties and decorators, and still doesn't yet implement other Param features like automatic documentation, attribute inheritance, or dynamic values. With Param, the corresponding `ParamClass` code only requires 6 lines and no fancy techniques beyond Python classes. Most importantly, the Param version lets readers and program authors focus directly on what this code actually does, which is to compute a function from three provided parameters:
```
class ParamClass(param.Parameterized):
a = param.Integer(2, bounds=(0,1000), doc="First addend")
b = param.Integer(3, bounds=(0,1000), doc="Second addend")
title = param.String(default="sum", doc="Title for the result")
def __call__(self):
return self.title + ": " + str(self.a + self.b)
```
Even a quick skim of this code reveals what parameters are available, what values they will accept, what the default values are, and how those parameters will be used in the method. Plus the actual code of the method stands out immediately, as all the code is either parameters or actual functionality. In contrast, users of OrdinaryClass3 will have to read through dozens of lines of code to discern even basic information about usage, or else authors of the code will need to create and maintain docstrings that may or may not match the actual code over time and will further increase the amount of text to write and maintain.
## Programming contracts
If you think about the examples above, you can see how Param makes it simple for programmers to make a contract with their users, being explicit and clear what will be accepted and rejected, while also allowing programmers to make safe assumptions about what inputs the code may ever receive. There is no need for `__call__` _ever_ to check for the type of one of its parameters, whether it's in the range allowed, or any other property that can be enforced by Param. Your custom code can then be much more linear and straightforward, getting right to work with the actual task at hand, without having to have reams of `if` statements and `asserts()` that disrupt the flow of the source file and make the reader get sidetracked in error-handling code. Param lets you once and for all declare what this code accepts, which is both clear documentation to the user and a guarantee that the programmer can forget about any other possible value a user might someday supply.
Crucially, these contracts apply not just between the user and a given piece of code, but also between components of the system itself. When validation code is expensive, as in ordinary Python, programmers will typically do it only at the edges of the system, where input from the user is accepted. But expressing types and ranges is so easy in Param, it can be done for any major component in the system. The Parameter list declares very clearly what that component accepts, which lets the code for that component ignore all potential inputs that are disallowed by the Parameter specifications, while correctly advertising to the rest of the codebase what inputs are allowed. Programmers can thus focus on their particular components of interest, knowing precisely what inputs will ever be let through, without having to reason about the flow of configuration and data throughout the whole system.
Without Param, you should expect Python code to be full of confusing error checking and handling of different input types, while still only catching a small fraction of the possible incorrect inputs that could be provided. But Param-based code should be dramatically easier to read, easier to maintain, easier to develop, and nearly bulletproof against mistaken or even malicious usage.
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.