path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/generate_readme_plots.ipynb | ###Markdown
Setup
###Code
# use full window width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import os
os.chdir('..')
from matplotlib import pyplot as plt
import numpy as np
import virl
###Output
_____no_output_____
###Markdown
Generate Reward Plots
###Code
s = np.array([0., 0, 0, 0]) # epidemic state
c = 1. # infection rate damping
env = virl.Epidemic(stochastic=False, noisy=False)
fig, axes = plt.subplots(1, 3, figsize=(3*8, 6))
for k in range(4):
a = [env._reward(np.array([0, i/100, 0, 0]), env.actions[k]) for i in range(100)]
axes[0].plot(np.arange(100)/100, a, label=f'action {k}')
axes[0].set_ylabel('reward')
axes[0].set_xlabel('fraction of population infected')
y = axes[0].get_ylim()
infected = [0.01, 0.1]
plot = ['center', 'right']
for i in range(2):
x = infected[i]
b = [env._reward([0,x, 0,0], env.actions[a]) for a in range(4)]
axes[0].plot([x, x], y, '--', alpha=0.75, label=f'see {plot[i]} plot')
axes[i+1].bar(np.arange(4), height=b)
axes[i+1].set_xticks([0, 1, 2, 3])
axes[i+1].set_xlabel('action')
axes[i+1].set_ylabel('reward')
axes[i+1].set_title(f'Reward at {(x*100):.0f}% population infected')
axes[0].legend()
plt.savefig(dpi=300, fname='reward.png')
###Output
_____no_output_____
###Markdown
Generate Problem ID plots
###Code
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(10):
env = virl.Epidemic(problem_id=i)
states = []
rewards = []
done = False
s = env.reset()
states.append(s)
while not done:
s, r, done, info = env.step(action=0) # deterministic agent
states.append(s)
rewards.append(r)
ax.plot(np.array(states)[:,1], label=f'problem_id={i}')
ax.set_xlabel('weeks since start of epidemic')
ax.set_ylabel('Number of Infectious persons')
ax.set_title('Simulation of problem_ids without intervention')
ax.legend()
plt.savefig(dpi=300, fname='problem_id.png')
###Output
_____no_output_____
###Markdown
Generate Noisy Observation Plot
###Code
env = virl.Epidemic(problem_id=0, noisy=True)
states = []
rewards = []
done = False
s = env.reset()
states.append(s)
while not done:
s, r, done, info = env.step(action=0) # deterministic agent
states.append(s)
rewards.append(r)
fig, ax = plt.subplots(figsize=(8, 6))
labels = ['susceptibles', 'infectious', 'quarantined', 'recovereds']
states = np.array(states)
for i in range(4):
ax.plot(states[:,i], label=labels[i]);
ax.set_xlabel('weeks since start of epidemic')
ax.set_ylabel('State s(t)')
ax.set_title('Problem 0 with noisy observations without intervention')
ax.legend()
plt.savefig(dpi=300, fname='noisy.png')
###Output
_____no_output_____
###Markdown
Generate stochastic sample simulations
###Code
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(10):
env = virl.Epidemic(stochastic=True)
states = []
rewards = []
done = False
s = env.reset()
states.append(s)
while not done:
s, r, done, info = env.step(action=0) # deterministic agent
states.append(s)
rewards.append(r)
ax.plot(np.array(states)[:,1], label=f'draw {i}')
ax.set_xlabel('weeks since start of epidemic')
ax.set_ylabel('Number of Infectious persons')
ax.set_title('Simulation of 10 stochastic episodes without intervention')
ax.legend()
plt.savefig(dpi=300, fname='stochastic.png')
###Output
_____no_output_____
###Markdown
Setup
###Code
# use full window width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import os
os.chdir('..')
from matplotlib import pyplot as plt
import numpy as np
import virl
###Output
_____no_output_____
###Markdown
Generate Reward Plots
###Code
s = np.array([0., 0, 0, 0]) # epidemic state
c = 1. # infection rate damping
env = virl.Epidemic(stochastic=False, noisy=False)
fig, axes = plt.subplots(1, 3, figsize=(3*8, 6))
for k in range(4):
a = [env._reward(np.array([0, i/100, 0, 0]), env.actions[k]) for i in range(100)]
axes[0].plot(np.arange(100)/100, a, label=f'action {k}')
axes[0].set_ylabel('reward')
axes[0].set_xlabel('fraction of population infected')
y = axes[0].get_ylim()
infected = [0.01, 0.1]
plot = ['center', 'right']
for i in range(2):
x = infected[i]
b = [env._reward([0,x, 0,0], env.actions[a]) for a in range(4)]
axes[0].plot([x, x], y, '--', alpha=0.75, label=f'see {plot[i]} plot')
axes[i+1].bar(np.arange(4), height=b)
axes[i+1].set_xticks([0, 1, 2, 3])
axes[i+1].set_xlabel('action')
axes[i+1].set_ylabel('reward')
axes[i+1].set_title(f'Reward at {(x*100):.0f}% population infected')
axes[0].legend()
plt.savefig(dpi=300, fname='reward.png')
###Output
_____no_output_____
###Markdown
Generate Problem ID plots
###Code
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(10):
env = virl.Epidemic(problem_id=i)
states = []
rewards = []
done = False
s = env.reset()
states.append(s)
while not done:
s, r, done, info = env.step(action=0) # deterministic agent
states.append(s)
rewards.append(r)
ax.plot(np.array(states)[:,1], label=f'problem_id={i}')
ax.set_xlabel('weeks since start of epidemic')
ax.set_ylabel('Number of Infectious persons')
ax.set_title('Simulation of problem_ids without intervention')
ax.legend()
plt.savefig(dpi=300, fname='problem_id.png')
###Output
_____no_output_____
###Markdown
Generate Noisy Observation Plot
###Code
env = virl.Epidemic(problem_id=0, noisy=True)
states = []
rewards = []
done = False
s = env.reset()
states.append(s)
while not done:
s, r, done, info = env.step(action=0) # deterministic agent
states.append(s)
rewards.append(r)
fig, ax = plt.subplots(figsize=(8, 6))
labels = ['susceptibles', 'infectious', 'quarantined', 'recovereds']
states = np.array(states)
for i in range(4):
ax.plot(states[:,i], label=labels[i]);
ax.set_xlabel('weeks since start of epidemic')
ax.set_ylabel('State s(t)')
ax.set_title('Problem 0 with noisy observations without intervention')
ax.legend()
plt.savefig(dpi=300, fname='noisy.png')
###Output
_____no_output_____
###Markdown
Generate stochastic sample simulations
###Code
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(10):
env = virl.Epidemic(stochastic=True)
states = []
rewards = []
done = False
s = env.reset()
states.append(s)
while not done:
s, r, done, info = env.step(action=0) # deterministic agent
states.append(s)
rewards.append(r)
ax.plot(np.array(states)[:,1], label=f'draw {i}')
ax.set_xlabel('weeks since start of epidemic')
ax.set_ylabel('Number of Infectious persons')
ax.set_title('Simulation of 10 stochastic episodes without intervention')
ax.legend()
plt.savefig(dpi=300, fname='stochastic.png')
###Output
_____no_output_____ |
python/irc_channel_classifier.ipynb | ###Markdown
IRC Channel Classifier
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import umap
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import kneighbors_graph
import os
project_dir = '/Users/preneond/Documents/Work/Stratosphere/IRC-Research/IRC-Behavioral-Analysis/'
# project_dir = '/home/prenek/IRC-Behavioral-Analysis/'
log_dir = os.path.join(project_dir, 'zeek/logs/')
out_dir = os.path.join(project_dir, 'python/out/')
data = pd.read_csv(os.path.join(out_dir, 'irc_channel_features_all.csv'))
data.drop(['Unnamed: 0'],axis=1,inplace=True)
data.head()
X = data.iloc[:, 2:-3]
y = data.iloc[:, -1]
X['lang'] = X['lang'].astype('category').cat.codes
X = X.apply(lambda x: x.fillna(x.mean()),axis=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.3, random_state=1)
###Output
_____no_output_____
###Markdown
Experiment 1 - Unbalanced Dataset Keep the same ratio between the samples when splitting between test/val/trn, no matter how many samples are malicious or benign- to do this - comment code in experiment 2
###Code
# X, _X = X[:16], X[16:]
# y, _y = y[:16], y[16:]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.3, random_state=1)
X_test_big = X_test#np.concatenate([X_test, _X])
y_test_big = y_test#np.concatenate([y_test, _y])
# X_test_big = np.concatenate([X_test, _X])
# y_test_big = np.concatenate([y_test, _y])
###Output
_____no_output_____
###Markdown
Experiment 2 - Balanced Dataset Down-sample the majority class
###Code
n1 = y.shape[0]
n2 = y[y==0].shape[0]
n3 = y[y==1].shape[0]
# # showing examples of data
print('X: Info - \t Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}\n'.format(n1,n2,n3))
n1 = y_train.shape[0]
n2 = y_train[y_train==0].shape[0]
n3 = y_train[y_train==1].shape[0]
# # showing examples of data
print('X_train: Info - Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}\n'.format(n1,n2,n3))
# n1 = y_val.shape[0]
# n2 = y_val[y_val==0].shape[0]
# n3 = y_val[y_val==1].shape[0]
# # # showing examples of data
# print('X_val: Info - Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}\n'.format(n1,n2,n3))
n1 = y_test.shape[0]
n2 = y_test[y_test==0].shape[0]
n3 = y_test[y_test==1].shape[0]
# # showing examples of data
print('X_test: Info - Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}'.format(n1,n2,n3))
sc = StandardScaler()
X = sc.fit_transform(X)
X_test = sc.transform(X_test)
X_train = sc.transform(X_train)
X_test_big = sc.transform(X_test_big)
import random
def pick_color(n=1):
colors = ["blue","black","brown","red","yellow","green","orange","beige","turquoise","pink"]
random.shuffle(colors)
return colors[:n]
###Output
_____no_output_____
###Markdown
PCA
###Code
pca = PCA(n_components=2)
_pca = pca.fit(X)
X_pca_train = pca.transform(X_train)
X_pca = _pca.transform(X)
lw = 2
# increase fig size when the point annotation is enabled
plt.figure(figsize=(10,10))
plt.title('Principal Component Analysis')
group_offset = 0
for color, i, target_name in zip(['red','blue'], data.malicious.unique(), ['malicious','non-malicious']):
_pca_data_x = X_pca[y == i, 0]
_pca_data_y = X_pca[y == i, 1]
_pca_df = pd.DataFrame({
'x': _pca_data_x,
'y': _pca_data_y,
'group': list(range(group_offset, _pca_data_x.shape[0]+group_offset))
})
group_offset += _pca_data_x.shape[0]
p1 = sns.scatterplot(x='x',y='y',data=_pca_df, color=color, alpha=.8,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.show()
###Output
_____no_output_____
###Markdown
T-SNE
###Code
# 2D
print('t-SNE 2D...')
X_tsne_2d = TSNE(n_components=2,verbose=0).fit_transform(X)
# 3D
print('t-SNE 3D...')
X_tsne_3d = TSNE(n_components=3,verbose=0).fit_transform(X)
print('Done.')
df_tsne_2d = pd.DataFrame({
'x': X_tsne_2d[:,0],
'y':X_tsne_2d[:,1],
'label': y,
'group': list(range(X_tsne_2d.shape[0]))
})
df_arr = []
for l in data.malicious.unique():
df_arr.append(df_tsne_2d.where(df_tsne_2d.label==l))
plt.figure(figsize=(10,10))
plt.title('t-SNE')
for df, l, c in zip(df_arr,['malicious','non-malicious'], ["red","blue"]):
sns.scatterplot(
x='x',y='y',
color=c,
data=df,
label=l,
alpha=1)
plt.xlabel('')
plt.ylabel('')
plt.show()
df_tsne_3d = pd.DataFrame({
'x': X_tsne_3d[:,0],
'y': X_tsne_3d[:,1],
'z': X_tsne_3d[:,2],
'label': y,
'group': list(range(X_tsne_3d.shape[0]))
})
df_arr = []
for l in data.malicious.unique():
df_arr.append(df_tsne_3d.where(df_tsne_3d.label==l))
fig = plt.figure(figsize=(10,10))
fig.suptitle('t-SNE')
ax = Axes3D(fig)
for df, l, c in zip(df_arr,['malicious','non-malicious'], ['red','blue']):
ax.scatter(df.x, df.y, df.z, c=c, marker='o', alpha=0.8, label=l)
ax.legend()
plt.show()
y = y.astype('category').cat.codes
y_train = y_train.astype('category').cat.codes
y_test = y_test.astype('category').cat.codes
###Output
_____no_output_____
###Markdown
UMAP UMAP - supervised
###Code
umap_emb = umap.UMAP(n_neighbors=5).fit(X_train, y=y_train).transform(X)
df_umap = pd.DataFrame({
'x': umap_emb[:,0],
'y': umap_emb[:,1],
'label': y,
# 'group': list(range(umap_emb.shape[0]))
})
df_arr = []
for l in y.unique():
df_arr.append(df_umap.where(df_umap.label==l))
plt.figure(figsize=(10,10))
plt.title('UMAP')
for df, l, c in zip(df_arr, ['malicious','non-malicious'], ['red','blue']):
sns.scatterplot(
x='x',y='y',
color=c,
data=df,
label=l,
alpha=1)
###Output
_____no_output_____
###Markdown
UMAP - unsupervised
###Code
umap_emb = umap.UMAP(n_neighbors=5).fit_transform(X)
df_umap = pd.DataFrame({
'x': umap_emb[:,0],
'y': umap_emb[:,1],
'label': y,
'group': list(range(umap_emb.shape[0]))
})
df_arr = []
for l in y.unique():
df_arr.append(df_umap.where(df_umap.label==l))
plt.figure(figsize=(10,10))
plt.title('UMAP')
for df, l, c in zip(df_arr, ['malicious','non-malicious'], ['red','blue']):
sns.scatterplot(
x='x',y='y',
color=c,
data=df,
label=l,
alpha=1)
###Output
_____no_output_____
###Markdown
Unsupervised Learning K-Means Determine optimal number of clusters for k-means
###Code
sse = []
for k in range(1,15):
km = KMeans(n_clusters=k, init='k-means++', n_init=50,random_state=0 , tol=1.0e-9, verbose=0)
km = km.fit(X)
sse.append(km.inertia_)
print('optimal k is: ', np.argmin(sse))
plt.plot(range(1,15), sse, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
kmeans = KMeans(n_clusters=2, init='k-means++', n_init=50,random_state=0 , tol=1.0e-9, verbose=0)
kmeans.fit(X_train, y_train.values)
y_pred_kmeans = kmeans.predict(X_test_big)
print('K-Means accuracy:\t{}%'.format(round(accuracy_score(y_test_big, y_pred_kmeans)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test_big, y_pred_kmeans)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test_big, y_pred_kmeans)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test_big, y_pred_kmeans)*1e02,2)))
###Output
K-Means accuracy: 72.0%
precision: 14.29%
recall: 50.0%
f1-score: 22.22%
###Markdown
K-Means - PCA embedded space
###Code
kmeans = KMeans(n_clusters=len(y_train.unique()), init='k-means++', n_init=50,random_state=0 , tol=1.0e-9, verbose=0)
kmeans.fit(X_pca)
y_pred_kmeans = kmeans.predict(X_pca)
print('K-Means-PCA: accuracy:\t{}%'.format(round(accuracy_score(y, y_pred_kmeans)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y, y_pred_kmeans)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y, y_pred_kmeans)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y, y_pred_kmeans)*1e02,2)))
###Output
K-Means-PCA: accuracy: 80.25%
precision: 0.0%
recall: 0.0%
f1-score: 0.0%
###Markdown
K-Means - t-SNE 2D embedded space
###Code
tsne2d_train, tsne2d_test = train_test_split(df_tsne_2d,stratify=y, test_size=0.3, random_state=0)
kmeans = KMeans(n_clusters=len(y_train.unique()), init='k-means++', n_init=50,random_state=0 , tol=1.0e-9, verbose=0)
kmeans.fit(np.column_stack([tsne2d_train['x'], tsne2d_train['y']]))
y_tsne_2d_pred_kmeans = kmeans.predict(np.column_stack([tsne2d_test['x'],tsne2d_test['y']]))
print('K-Means-tsne 2D: accuracy:\t{}%'.format(round(accuracy_score(y_test, y_tsne_2d_pred_kmeans)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test,y_tsne_2d_pred_kmeans)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test,y_tsne_2d_pred_kmeans)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test,y_tsne_2d_pred_kmeans)*1e02,2)))
###Output
K-Means-tsne 2D: accuracy: 48.0%
precision: 7.69%
recall: 50.0%
f1-score: 13.33%
###Markdown
K-Means - t-SNE 3D embedded space
###Code
tsne3d_train, tsne3d_test = train_test_split(df_tsne_3d, stratify=y, test_size=0.3, random_state=0)
kmeans = KMeans(n_clusters=len(y.unique()), init='k-means++', n_init=50,random_state=0 , tol=1.0e-9, verbose=0)
kmeans.fit(np.column_stack([tsne3d_train['x'], tsne3d_train['y'],tsne3d_train['z']]), tsne3d_train['label'])
y_tsne_3d_pred_kmeans = kmeans.predict(np.column_stack([tsne3d_test['x'],tsne3d_test['y'], tsne3d_test['z']]))
print('K-Means tsne 3D accuracy:\t{}%'.format(round(accuracy_score(y_test, y_tsne_3d_pred_kmeans)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test,y_tsne_3d_pred_kmeans)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test,y_tsne_3d_pred_kmeans)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test,y_tsne_3d_pred_kmeans)*1e02,2)))
###Output
K-Means tsne 3D accuracy: 92.0%
precision: 0.0%
recall: 0.0%
f1-score: 0.0%
###Markdown
K-NN
###Code
from irc_utils import compute_score
# Create KNN classifier
knn = KNeighborsClassifier(n_neighbors = 3)
# Fit the classifier to the data
knn.fit(X_train,y_train)
y_pred_knn = knn.predict(X_test_big)
print('K-NN accuracy:\t{}%'.format(round(accuracy_score(y_test_big, y_pred_knn)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test_big,y_pred_knn)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test_big,y_pred_knn)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test_big,y_pred_knn)*1e02,2)))
X_train.shape
###Output
_____no_output_____
###Markdown
K-NN on PCA embedded space
###Code
knn_tsne2d = KNeighborsClassifier(n_neighbors = 3)
# Fit the classifier to the data
knn_tsne2d.fit(X_pca_train, y_train)
y_pred_knn_pca = knn_tsne2d.predict(X_pca)
print('K-NN PCA accuracy:\t{}%'.format(round(accuracy_score(y, y_pred_knn_pca)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y,y_pred_knn_pca)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y,y_pred_knn_pca)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y,y_pred_knn_pca)*1e02,2)))
###Output
K-NN PCA accuracy: 88.89%
precision: 0.0%
recall: 0.0%
f1-score: 0.0%
###Markdown
K-NN on t-SNE 2D-embedded space
###Code
tsne2d_train, tsne2d_test = train_test_split(df_tsne_2d, stratify=y, test_size=0.3, random_state=0)
knn_tsne2d = KNeighborsClassifier(n_neighbors = 3)
# Fit the classifier to the data
knn_tsne2d.fit(np.column_stack([df_tsne_2d['x'],df_tsne_2d['y']]),y)
y_pred_knn_tsne2d = knn_tsne2d.predict(np.column_stack([df_tsne_2d['x'],df_tsne_2d['y']]))
print('K-NN t-SNE 2D accuracy:\t{}%'.format(round(accuracy_score(y, y_pred_knn_tsne2d)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y,y_pred_knn_tsne2d)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y,y_pred_knn_tsne2d)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y,y_pred_knn_tsne2d)*1e02,2)))
###Output
K-NN t-SNE 2D accuracy: 92.59%
precision: 75.0%
recall: 37.5%
f1-score: 50.0%
###Markdown
K-NN on t-SNE 3D-embedded space
###Code
tsne3d_train, tsne3d_test = train_test_split(df_tsne_3d, stratify=y, test_size=0.3, random_state=0)
knn_tsne3d = KNeighborsClassifier(n_neighbors = 3)
# Fit the classifier to the data
knn_tsne3d.fit(np.column_stack([tsne3d_train['x'],tsne3d_train['y'], tsne3d_train['z']]), y_train)
y_pred_knn_tsne3d = knn_tsne3d.predict(np.column_stack([tsne3d_test['x'],tsne3d_test['y'],tsne3d_test['z']]))
print('K-NN t-SNE 3D accuracy:\t{}%'.format(round(accuracy_score(y_test, y_pred_knn_tsne3d)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test,y_pred_knn_tsne3d)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test,y_pred_knn_tsne3d)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test,y_pred_knn_tsne3d)*1e02,2)))
###Output
K-NN t-SNE 3D accuracy: 92.0%
precision: 0.0%
recall: 0.0%
f1-score: 0.0%
###Markdown
Hierarchical Clustering
###Code
from sklearn.cluster import AgglomerativeClustering
cluster = AgglomerativeClustering(n_clusters=len(y.unique()), affinity='euclidean', linkage='ward')
y_pred_knn_cluster = cluster.fit_predict(X)
print('AgglomerativeClustering accuracy:\t{}%'.format(round(accuracy_score(y, y_pred_knn_cluster)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y,y_pred_knn_cluster)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y,y_pred_knn_cluster)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y,y_pred_knn_cluster)*1e02,2)))
###Output
AgglomerativeClustering accuracy: 38.27%
precision: 12.5%
recall: 87.5%
f1-score: 21.88%
###Markdown
Supervised Learning Linear Regression
###Code
from sklearn.linear_model import SGDClassifier
# C, kernel, gamma = clf.best_params_['C'], clf.best_params_['kernel'], clf.best_params_['gamma']
linreg = SGDClassifier()
linreg.fit(X_train, y_train)
y_pred = linreg.predict(X_test_big)
print('Linear Regression accuracy:\t{}%'.format(round(accuracy_score(y_test_big, y_pred)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test_big,y_pred)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test_big,y_pred)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test_big,y_pred)*1e02,2)))
###Output
Linear Regression accuracy: 80.0%
precision: 20.0%
recall: 50.0%
f1-score: 28.57%
###Markdown
Logistic Regression
###Code
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
param_grid = [
{'classifier' : [LogisticRegression()],
'classifier__penalty' : ['l1', 'l2'],
'classifier__C' : np.logspace(-4, 4, 20)
}
]
# Create grid search object
pipe = Pipeline([('classifier' , LogisticRegression())])
clf = GridSearchCV(pipe, param_grid = param_grid, cv = 5, verbose=True, n_jobs=-1)
# Fit on data
best_clf = clf.fit(X_train, y_train)
logreg_model = best_clf.best_params_['classifier']
best_clf.best_params_
y_pred = logreg_model.predict(X_test_big)
print('Logistic Regression accuracy:\t{}%'.format(round(accuracy_score(y_test_big, y_pred)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test_big,y_pred)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test_big,y_pred)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test_big,y_pred)*1e02,2)))
train_ratio = np.arange(0.6,0.95,0.005)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
print(y)
X_tmp, _, y_tmp, _ = train_test_split(X, y, train_size=trn_ratio, stratify=y, random_state=0)
train_size.append(len(X_tmp))
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_tmp, y_tmp, stratify=y_tmp, train_size=0.8, random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train2, y_train2)
y_pred_trn = logreg.predict(X_train2)
y_pred_tst = logreg.predict(X_test2)
score_trn = f1_score(y_train2, y_pred_trn)
score_tst = f1_score(y_test2, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0)
score_tst_list = np.stack(score_tst_list, axis=0)
from irc_utils import exponential_moving_average
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0,1.05)
plt.show()
###Output
_____no_output_____
###Markdown
SVM
###Code
from sklearn.svm import SVC
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-2, 1e-3, 1e-4, 1e-5],
'C': [0.001, 0.10, 0.1, 10, 25, 50, 100, 1000]},
{'kernel': ['sigmoid'], 'gamma': [1e-2, 1e-3, 1e-4, 1e-5],
'C': [0.001, 0.10, 0.1, 10, 25, 50, 100, 1000]},
{'kernel': ['linear'], 'C': [0.001, 0.10, 0.1, 10, 25, 50, 100, 1000]}
]
# print("# Tuning hyper-parameters for %s" % score)
# print()
clf = GridSearchCV(SVC(C=1), tuned_parameters,cv=3)
clf.fit(X, y)
clf.best_params_
C, kernel, gamma = clf.best_params_['C'], clf.best_params_['kernel'], clf.best_params_['gamma']
my_svm = SVC(C=C, kernel=kernel, gamma=gamma, probability=True, verbose=True)
my_svm.fit(X_train, y_train)
y_pred = my_svm.predict(X_test_big)
print('SVM accuracy:\t{}%'.format(round(accuracy_score(y_test_big, y_pred)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test_big,y_pred)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test_big,y_pred)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test_big,y_pred)*1e02,2)))
train_ratio = np.arange(0.5,1,0.005)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
X_tmp, _, y_tmp, _ = train_test_split(X, y, train_size=trn_ratio, random_state=0)
train_size.append(len(X_tmp))
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_tmp, y_tmp, train_size=0.8, random_state=0)
svc = SVC()
svc.fit(X_train2, y_train2)
y_pred_trn = svc.predict(X_train2)
y_pred_tst = svc.predict(X_test2)
score_trn = f1_score(y_train2, y_pred_trn)
score_tst = f1_score(y_test2, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0)
score_tst_list = np.stack(score_tst_list, axis=0)
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0,1.05)
plt.show()
###Output
_____no_output_____
###Markdown
Random Forrest
###Code
from sklearn.model_selection import validation_curve
from sklearn.ensemble import RandomForestClassifier
param_range = range(1,10)
train_scores, test_scores = validation_curve(
RandomForestClassifier(),
X = X_train, y = y_train,
param_name = 'n_estimators',
param_range = param_range, cv = 3)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with RandomForestClassifier")
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.ylim(0, 1.05)
lw = 2
plt.plot(param_range, train_scores_mean, label="Training score",
color="navy", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.plot(param_range, test_scores_mean, label="Test score",
color="darkorange", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.legend(loc="best")
plt.show()
###Output
_____no_output_____
###Markdown
Exhaustive Grid Search
###Code
from sklearn.model_selection import GridSearchCV
n_estimators = [1,5,10,15, 20]
max_depth = [1,2,3,4,5,10]
min_samples_split = [2,3,4,5,10,15,20]
min_samples_leaf = [1,2,3,4,5,10]
hyperF = dict(n_estimators = n_estimators, max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf)
gridF = GridSearchCV(RandomForestClassifier(), hyperF, cv = 3, verbose = 1, n_jobs=-1)
bestF = gridF.fit(X_train, y_train)
print('''Best parameters: \n
- max_depth: {} \n
- min_samples_leaf: {} \n
- min_samples_split: {} \n
- n_estimators: {}'''.format(bestF.best_params_['max_depth'],
bestF.best_params_['min_samples_leaf'],
bestF.best_params_['min_samples_split'],
bestF.best_params_['n_estimators']))
max_depth = bestF.best_params_['max_depth']
min_samples_leaf = bestF.best_params_['min_samples_leaf']
min_samples_split = bestF.best_params_['min_samples_split']
n_estimators = bestF.best_params_['n_estimators']
model = RandomForestClassifier(max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split,
n_estimators=n_estimators)
model.fit(X_train, y_train)
y_pred = model.predict(X_test_big)
print('Random Forrest accuracy:\t{}%'.format(round(accuracy_score(y_test_big, y_pred)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test_big,y_pred)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test_big,y_pred)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test_big,y_pred)*1e02,2)))
print(model.feature_importances_)
train_ratio = np.arange(0.5,1,0.01)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
X_tmp, _, y_tmp, _ = train_test_split(X, y, train_size=trn_ratio, random_state=0)
train_size.append(len(X_tmp))
X_train, X_test, y_train, y_test = train_test_split(X_tmp, y_tmp, train_size=0.7, random_state=0)
model = RandomForestClassifier(max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split,
n_estimators=n_estimators)
model.fit(X_train, y_train)
y_pred_trn = model.predict(X_train)
y_pred_tst = model.predict(X_test)
score_trn = f1_score(y_train, y_pred_trn)
score_tst = f1_score(y_test, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0 )
score_tst_list = np.stack(score_tst_list, axis=0 )
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0, 1.05)
plt.show()
###Output
_____no_output_____
###Markdown
Feature Importance
###Code
print(model.feature_importances_)
###Output
[0.50707965 0. 0. 0. 0. 0.
0.37426133 0.09181537 0. 0.02684366]
###Markdown
XGBoost Validation curve
###Code
import xgboost as xgb
from sklearn.model_selection import validation_curve
param_range = range(1,10)
train_scores, test_scores = validation_curve(
xgb.XGBClassifier(),
X = X_train, y = y_train,
param_name = 'n_estimators',
param_range = param_range, cv = 3)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with XGBoost Classifier")
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.ylim(0, 1.05)
lw = 2
plt.plot(param_range, train_scores_mean, label="Training score",
color="navy", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.plot(param_range, test_scores_mean, label="Test score",
color="darkorange", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.legend(loc="best")
plt.show()
###Output
_____no_output_____
###Markdown
Exhaustive Grid Search
###Code
from sklearn.model_selection import GridSearchCV
parameters = {
'max_depth': [2, 3, 4, 5],
'n_estimators': [5, 10, 25],
'gamma': [0, 0.1, 0.2],
'min_child_weight': [0, 0.5, 1],
'colsample_bytree': [0.6, 0.8, 1],
'reg_alpha': [1e-2, 1e-1, 1e1,10],
'reg_lambda': [1e-2, 1e-1, 1e1,10],
}
clf = GridSearchCV(xgb.XGBClassifier(), parameters, scoring = 'accuracy', cv = 3, verbose = 1, n_jobs=-1)
clf.fit(X_train, y_train)
p = clf.best_params_
clf.best_params_
xgb_model = xgb.XGBClassifier(colsample_bytree=0.6, gamma=0, max_depth=2,
min_child_weight=0, n_estimators=10, reg_alpha=0.01,reg_lambda=0.01)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
print('XGBoost accuracy:\t{}%'.format(round(accuracy_score(y_test, y_pred)*1e02,2)))
print('\tprecision:\t{}%'.format(round(precision_score(y_test,y_pred)*1e02,2)))
print('\trecall:\t\t{}%'.format(round(recall_score(y_test,y_pred)*1e02,2)))
print('\tf1-score:\t{}%'.format(round(f1_score(y_test,y_pred)*1e02,2)))
train_ratio = np.arange(0.5,0.9,0.005)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
X_tmp, _, y_tmp, _ = train_test_split(X, y, stratify=y, train_size=trn_ratio, random_state=0)
train_size.append(len(X_tmp))
X_train, X_test, y_train, y_test = train_test_split(X_tmp, y_tmp, stratify=y_tmp, train_size=0.7, random_state=0)
model = xgb.XGBClassifier(colsample_bytree=0.6, gamma=0, max_depth=2,
min_child_weight=0, n_estimators=10, reg_alpha=0.01,reg_lambda=0.01)
model.fit(X_train, y_train)
y_pred_trn = model.predict(X_train)
y_pred_tst = model.predict(X_test)
score_trn = f1_score(y_train, y_pred_trn)
score_tst = f1_score(y_test, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0 )
score_tst_list = np.stack(score_tst_list, axis=0 )
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0, 1.05)
plt.show()
from xgboost import plot_importance
plot_importance(xgb_model)
###Output
_____no_output_____
###Markdown
IRC Channel Classifier
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import umap
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import kneighbors_graph
import os
project_dir = '/Users/preneond/Documents/Work/Stratosphere/IRC-Research/IRC-Behavioral-Analysis/'
# project_dir = '/home/prenek/IRC-Behavioral-Analysis/'
log_dir = os.path.join(project_dir, 'zeek/logs/')
out_dir = os.path.join(project_dir, 'python/out/')
data = pd.read_csv(os.path.join(out_dir, 'irc_channel_features_all.csv'))
data.drop(['Unnamed: 0'],axis=1,inplace=True)
data.head()
X = data.iloc[:, 2:-3]
y = data.iloc[:, -1]
X['lang'] = X['lang'].astype('category').cat.codes
X = X.apply(lambda x: x.fillna(x.mean()),axis=0)
X.columns
###Output
_____no_output_____
###Markdown
Experiment 1 - Unbalanced Dataset Keep the same ratio between the samples when splitting between test/val/trn, no matter how many samples are malicious or benign- to do this - comment code in experiment 2
###Code
print('Original dataset shape %s' % Counter(y))
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X, y = ros.fit_resample(X, y)
from collections import Counter
print(sorted(Counter(y).items()))
# from imblearn.under_sampling import RandomUnderSampler
# ros = RandomUnderSampler(random_state=0)
# X, y = ros.fit_resample(X, y)
# from collections import Counter
# print(sorted(Counter(y).items()))
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.4, random_state=1)
###Output
Original dataset shape Counter({0: 73, 1: 8})
[(0, 73), (1, 73)]
###Markdown
Experiment 2 - Balanced Dataset Down-sample the majority class
###Code
n1 = y.shape[0]
n2 = y[y==0].shape[0]
n3 = y[y==1].shape[0]
# # showing examples of data
print('X: Info - \t Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}\n'.format(n1,n2,n3))
n1 = y_train.shape[0]
n2 = y_train[y_train==0].shape[0]
n3 = y_train[y_train==1].shape[0]
# # showing examples of data
print('X_train: Info - Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}\n'.format(n1,n2,n3))
# n1 = y_val.shape[0]
# n2 = y_val[y_val==0].shape[0]
# n3 = y_val[y_val==1].shape[0]
# # # showing examples of data
# print('X_val: Info - Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}\n'.format(n1,n2,n3))
n1 = y_test.shape[0]
n2 = y_test[y_test==0].shape[0]
n3 = y_test[y_test==1].shape[0]
# # showing examples of data
print('X_test: Info - Number of samples:\t\t{}\n\t\t Number of benign samples:\t{} \n\t\t Number of malicious samples\t{}'.format(n1,n2,n3))
# from sklearn.preprocessing import MinMaxScaler
# sc = MinMaxScaler()
# X = sc.fit_transform(X)
# X_test = sc.transform(X_test)
# X_train = sc.transform(X_train)
# X_test_big = sc.transform(X_test_big)
import random
def pick_color(n=1):
colors = ["blue","black","brown","red","yellow","green","orange","beige","turquoise","pink"]
random.shuffle(colors)
return colors[:n]
###Output
_____no_output_____
###Markdown
PCA
###Code
pca = PCA(n_components=2)
_pca = pca.fit(X)
X_pca_train = pca.transform(X_train)
X_pca = _pca.transform(X)
lw = 2
# increase fig size when the point annotation is enabled
plt.figure(figsize=(10,10))
plt.title('Principal Component Analysis')
group_offset = 0
for color, i, target_name in zip(['red','blue'], data.malicious.unique(), ['malicious','non-malicious']):
_pca_data_x = X_pca[y == i, 0]
_pca_data_y = X_pca[y == i, 1]
_pca_df = pd.DataFrame({
'x': _pca_data_x,
'y': _pca_data_y,
'group': list(range(group_offset, _pca_data_x.shape[0]+group_offset))
})
group_offset += _pca_data_x.shape[0]
p1 = sns.scatterplot(x='x',y='y',data=_pca_df, color=color, alpha=.3,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.show()
###Output
_____no_output_____
###Markdown
T-SNE
###Code
# 2D
print('t-SNE 2D...')
X_tsne_2d = TSNE(n_components=2,verbose=0).fit_transform(X)
# 3D
print('t-SNE 3D...')
X_tsne_3d = TSNE(n_components=3,verbose=0).fit_transform(X)
print('Done.')
df_tsne_2d = pd.DataFrame({
'x': X_tsne_2d[:,0],
'y':X_tsne_2d[:,1],
'label': y,
'group': list(range(X_tsne_2d.shape[0]))
})
df_arr = []
for l in data.malicious.unique():
df_arr.append(df_tsne_2d.where(df_tsne_2d.label==l))
plt.figure(figsize=(10,10))
plt.title('t-SNE')
for df, l, c in zip(df_arr,['malicious','non-malicious'], ["red","blue"]):
sns.scatterplot(
x='x',y='y',
color=c,
data=df,
label=l,
alpha=1)
plt.xlabel('')
plt.ylabel('')
plt.show()
df_tsne_3d = pd.DataFrame({
'x': X_tsne_3d[:,0],
'y': X_tsne_3d[:,1],
'z': X_tsne_3d[:,2],
'label': y,
'group': list(range(X_tsne_3d.shape[0]))
})
df_arr = []
for l in data.malicious.unique():
df_arr.append(df_tsne_3d.where(df_tsne_3d.label==l))
fig = plt.figure(figsize=(10,10))
fig.suptitle('t-SNE')
ax = Axes3D(fig)
for df, l, c in zip(df_arr,['malicious','non-malicious'], ['red','blue']):
ax.scatter(df.x, df.y, df.z, c=c, marker='o', alpha=0.8, label=l)
ax.legend()
plt.show()
y = y.astype('category').cat.codes
y_train = y_train.astype('category').cat.codes
y_test = y_test.astype('category').cat.codes
###Output
_____no_output_____
###Markdown
UMAP UMAP - supervised
###Code
umap_emb = umap.UMAP(n_neighbors=5).fit(X_train, y=y_train).transform(X)
df_umap = pd.DataFrame({
'x': umap_emb[:,0],
'y': umap_emb[:,1],
'label': y,
# 'group': list(range(umap_emb.shape[0]))
})
df_arr = []
for l in y.unique():
df_arr.append(df_umap.where(df_umap.label==l))
plt.figure(figsize=(10,10))
plt.title('UMAP - Supervised')
for df, l, c in zip(df_arr, ['malicious','non-malicious'], ['red','blue']):
sns.scatterplot(
x='x',y='y',
color=c,
data=df,
label=l,
alpha=1)
###Output
_____no_output_____
###Markdown
UMAP - unsupervised
###Code
umap_emb = umap.UMAP(n_neighbors=5).fit_transform(X)
df_umap = pd.DataFrame({
'x': umap_emb[:,0],
'y': umap_emb[:,1],
'label': y,
'group': list(range(umap_emb.shape[0]))
})
df_arr = []
for l in y.unique():
df_arr.append(df_umap.where(df_umap.label==l))
plt.figure(figsize=(10,10))
plt.title('UMAP - Unsupervised')
for df, l, c in zip(df_arr, ['malicious','non-malicious'], ['red','blue']):
sns.scatterplot(
x='x',y='y',
color=c,
data=df,
label=l,
alpha=1)
###Output
_____no_output_____
###Markdown
Unsupervised Learning K-Means Determine optimal number of clusters for k-means
###Code
sse = []
for k in range(1,15):
km = KMeans(n_clusters=k, init='k-means++', n_init=50,random_state=0 , tol=1.0e-9, verbose=0)
km = km.fit(X)
sse.append(km.inertia_)
print('optimal k is: ', np.argmin(sse))
plt.plot(range(1,15), sse, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
from irc_utils import compute_score
kmeans = KMeans(n_clusters=2, init='k-means++', n_init=50,random_state=0 , tol=1.0e-9, verbose=0)
kmeans.fit(X)
y_pred_kmeans = kmeans.predict(X)
print(compute_score(y, y_pred_kmeans))
###Output
Confusion matrix:
[[72 1]
[73 0]]
Sensitivity(=Recall) TPR = TP / (TP + FN): 0.0
Specificity SPC = TN / (FP + TN): 0.9863
Precision PPV = TP / (TP + FP): 0.0
Negative Predictive Value NPV = TN / (TN + FN): 0.4966
False Positive Rate FPR = FP / (FP + TN)): 0.0137
False Discovery rate FDR = FP / (FP + TP): 1.0
False Negative rate FNR = FN / (FN + TP): 1.0
Accuraccy ACC = (TP + TN) / (P + N): 0.4932
F1-score F1 = 2TP / (2TP + FP + FN): 0.0
[0.0, 0.9863, 0.0, 0.4966, 0.0137, 1.0, 1.0, 0.4932, 0.0]
###Markdown
K-NN
###Code
from irc_utils import compute_score
# Create KNN classifier
knn = KNeighborsClassifier(n_neighbors = 3)
# Fit the classifier to the data
knn.fit(X_train,y_train)
y_pred_knn = knn.predict(X_test)
print(compute_score(y_test, y_pred_knn))
X_train.shape
###Output
_____no_output_____
###Markdown
Hierarchical Clustering
###Code
from sklearn.cluster import AgglomerativeClustering
cluster = AgglomerativeClustering(n_clusters=len(y.unique()), affinity='euclidean', linkage='ward')
y_pred_knn_cluster = cluster.fit_predict(X)
print(compute_score(y, y_pred_knn_cluster))
# print('AgglomerativeClustering accuracy:\t{}%'.format(round(accuracy_score(y, y_pred_knn_cluster)*1e02,2)))
# print('\tprecision:\t{}%'.format(round(precision_score(y,y_pred_knn_cluster)*1e02,2)))
# print('\trecall:\t\t{}%'.format(round(recall_score(y,y_pred_knn_cluster)*1e02,2)))
# print('\tf1-score:\t{}%'.format(round(f1_score(y,y_pred_knn_cluster)*1e02,2)))
###Output
Confusion matrix:
[[7 1]
[6 2]]
Sensitivity(=Recall) TPR = TP / (TP + FN): 0.25
Specificity SPC = TN / (FP + TN): 0.875
Precision PPV = TP / (TP + FP): 0.6667
Negative Predictive Value NPV = TN / (TN + FN): 0.5385
False Positive Rate FPR = FP / (FP + TN)): 0.125
False Discovery rate FDR = FP / (FP + TP): 0.3333
False Negative rate FNR = FN / (FN + TP): 0.75
Accuraccy ACC = (TP + TN) / (P + N): 0.5625
F1-score F1 = 2TP / (2TP + FP + FN): 0.3636
[0.25, 0.875, 0.6667, 0.5385, 0.125, 0.3333, 0.75, 0.5625, 0.3636]
###Markdown
Supervised Learning Linear Regression
###Code
from sklearn.linear_model import SGDClassifier
# C, kernel, gamma = clf.best_params_['C'], clf.best_params_['kernel'], clf.best_params_['gamma']
linreg = SGDClassifier()
linreg.fit(X_train, y_train)
y_pred = linreg.predict(X_test)
print(compute_score(y_test, y_pred))
###Output
Confusion matrix:
[[2 1]
[3 1]]
Sensitivity(=Recall) TPR = TP / (TP + FN): 0.25
Specificity SPC = TN / (FP + TN): 0.6667
Precision PPV = TP / (TP + FP): 0.5
Negative Predictive Value NPV = TN / (TN + FN): 0.4
False Positive Rate FPR = FP / (FP + TN)): 0.3333
False Discovery rate FDR = FP / (FP + TP): 0.5
False Negative rate FNR = FN / (FN + TP): 0.75
Accuraccy ACC = (TP + TN) / (P + N): 0.4286
F1-score F1 = 2TP / (2TP + FP + FN): 0.3333
[0.25, 0.6667, 0.5, 0.4, 0.3333, 0.5, 0.75, 0.4286, 0.3333]
###Markdown
Logistic Regression
###Code
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
param_grid = [
{'classifier' : [LogisticRegression()],
'classifier__penalty' : ['l1', 'l2'],
'classifier__C' : np.logspace(-4, 4, 20)
}
]
# Create grid search object
pipe = Pipeline([('classifier' , LogisticRegression())])
clf = GridSearchCV(pipe, param_grid = param_grid, cv = 5, verbose=True, n_jobs=-1, scoring='f1')
# Fit on data
best_clf = clf.fit(X_train, y_train)
logreg_model = best_clf.best_params_['classifier']
best_clf.best_params_
logreg_model.fit(X_train,y_train)
y_pred = logreg_model.predict(X_test)
print(compute_score(y_test,y_pred))
train_ratio = np.arange(0.6,0.94,0.005)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
X_tmp, _, y_tmp, _ = train_test_split(X, y, train_size=trn_ratio, stratify=y, random_state=0)
train_size.append(len(X_tmp))
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_tmp, y_tmp, stratify=y_tmp, train_size=0.8, random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train2, y_train2)
y_pred_trn = logreg.predict(X_train2)
y_pred_tst = logreg.predict(X_test2)
score_trn = f1_score(y_train2, y_pred_trn)
score_tst = f1_score(y_test2, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0)
score_tst_list = np.stack(score_tst_list, axis=0)
from irc_utils import exponential_moving_average
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0,1.05)
plt.show()
###Output
_____no_output_____
###Markdown
SVM
###Code
from sklearn.svm import SVC
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-2, 1e-3, 1e-4, 1e-5],
'C': [0.001, 0.10, 0.1, 10, 25, 50, 100]},
{'kernel': ['sigmoid'], 'gamma': [1e-2, 1e-3, 1e-4, 1e-5],
'C': [0.001, 0.10, 0.1, 10, 25, 50, 100]},
# {'kernel': ['linear'], 'C': [0.001, 0.10, 0.1, 10, 25, 50, 100]}
]
# print("# Tuning hyper-parameters for %s" % score)
# print()
clf = GridSearchCV(SVC(C=1), tuned_parameters,cv=3, n_jobs=-1, scoring='f1')
clf.fit(X, y)
clf.best_params_
C, kernel, gamma = clf.best_params_['C'], clf.best_params_['kernel'], clf.best_params_['gamma']
my_svm = SVC(C=C, kernel=kernel, gamma=gamma, probability=True, verbose=True)
my_svm.fit(X_train, y_train)
y_pred = my_svm.predict(X_test)
print(compute_score(y_test, y_pred))
train_ratio = np.arange(0.5,1,0.005)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
X_tmp, _, y_tmp, _ = train_test_split(X, y, train_size=trn_ratio, random_state=0)
train_size.append(len(X_tmp))
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_tmp, y_tmp, train_size=0.8, random_state=0)
svc = SVC(C=C, kernel=kernel, gamma=gamma)
svc.fit(X_train2, y_train2)
y_pred_trn = svc.predict(X_train2)
y_pred_tst = svc.predict(X_test2)
score_trn = f1_score(y_train2, y_pred_trn)
score_tst = f1_score(y_test2, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0)
score_tst_list = np.stack(score_tst_list, axis=0)
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0,1.05)
plt.show()
###Output
_____no_output_____
###Markdown
Random Forrest
###Code
from sklearn.model_selection import validation_curve
from sklearn.ensemble import RandomForestClassifier
param_range = range(1,10)
train_scores, test_scores = validation_curve(
RandomForestClassifier(),
X = X_train, y = y_train,
param_name = 'n_estimators',
param_range = param_range, cv = 3, scoring='f1')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with RandomForestClassifier")
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.ylim(0, 1.05)
lw = 2
plt.plot(param_range, train_scores_mean, label="Training score",
color="navy", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.plot(param_range, test_scores_mean, label="Test score",
color="darkorange", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.legend(loc="best")
plt.show()
###Output
_____no_output_____
###Markdown
Exhaustive Grid Search
###Code
from sklearn.model_selection import GridSearchCV
n_estimators = [1,2,3,4,5,10,15, 20]
max_depth = [1,2,3,4,5,10]
min_samples_split = [2,3,4,5,10,15,20]
min_samples_leaf = [1,2,3,4,5,6,7,8,9,10,12,14]
hyperF = dict(n_estimators = n_estimators, max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf)
gridF = GridSearchCV(RandomForestClassifier(), hyperF, cv = 3, verbose = 1, n_jobs=-1, scoring='f1')
bestF = gridF.fit(X_train, y_train)
print('''Best parameters: \n
- max_depth: {} \n
- min_samples_leaf: {} \n
- min_samples_split: {} \n
- n_estimators: {}'''.format(bestF.best_params_['max_depth'],
bestF.best_params_['min_samples_leaf'],
bestF.best_params_['min_samples_split'],
bestF.best_params_['n_estimators']))
max_depth = bestF.best_params_['max_depth']
min_samples_leaf = bestF.best_params_['min_samples_leaf']
min_samples_split = bestF.best_params_['min_samples_split']
n_estimators = bestF.best_params_['n_estimators']
model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_samples_split=min_samples_split)
# y_pred_best = None
# f1_best = 0
# for i in range(1000):
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test_big)
# if f1_score(y_test_big,y_pred) > f1_best:
# f1_best = f1_score(y_test_big,y_pred)
# y_pred_best = y_pred
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(compute_score(y_test, y_pred))
###Output
Confusion matrix:
[[3 0]
[3 1]]
Sensitivity(=Recall) TPR = TP / (TP + FN): 0.25
Specificity SPC = TN / (FP + TN): 1.0
Precision PPV = TP / (TP + FP): 1.0
Negative Predictive Value NPV = TN / (TN + FN): 0.5
False Positive Rate FPR = FP / (FP + TN)): 0.0
False Discovery rate FDR = FP / (FP + TP): 0.0
False Negative rate FNR = FN / (FN + TP): 0.75
Accuraccy ACC = (TP + TN) / (P + N): 0.5714
F1-score F1 = 2TP / (2TP + FP + FN): 0.4
[0.25, 1.0, 1.0, 0.5, 0.0, 0.0, 0.75, 0.5714, 0.4]
###Markdown
Feature Importance
###Code
importances = model.feature_importances_
std = np.std([0.3*tree.feature_importances_ for tree in model.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
indices_names = list(map(lambda i: data.columns[i+2], indices))
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
fig = plt.figure(figsize=(15,10))
plt.title("Random Forest- Feature importance")
plt.bar(range(X_train.shape[1]), importances[indices],
color="dodgerblue", yerr=std[indices], ecolor='r',capsize=5, align="center")
plt.xticks(range(X_train.shape[1]), indices_names, size='small', rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.ylim([0,1])
plt.savefig('plot_importances.pdf', format='pdf', bbox_inches='tight')
train_ratio = np.arange(0.5,0.94,0.005)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
X_tmp, _, y_tmp, _ = train_test_split(X, y, train_size=trn_ratio, stratify=y, random_state=0)
train_size.append(len(X_tmp))
X_train, X_test, y_train, y_test = train_test_split(X_tmp, y_tmp, stratify=y_tmp, test_size=0.2, random_state=0)
model = RandomForestClassifier(max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split)
model.fit(X_train, y_train)
y_pred_trn = model.predict(X_train)
y_pred_tst = model.predict(X_test)
score_trn = f1_score(y_train, y_pred_trn)
score_tst = f1_score(y_test, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0 )
score_tst_list = np.stack(score_tst_list, axis=0 )
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0, 1.05)
plt.show()
###Output
_____no_output_____
###Markdown
XGBoost Validation curve
###Code
import xgboost as xgb
from sklearn.model_selection import validation_curve
param_range = range(1,10)
train_scores, test_scores = validation_curve(
xgb.XGBClassifier(),
X = X_train, y = y_train,
param_name = 'n_estimators',
param_range = param_range, cv = 3, scoring='f1')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with XGBoost Classifier")
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.ylim(0, 1.05)
lw = 2
plt.plot(param_range, train_scores_mean, label="Training score",
color="navy", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.plot(param_range, test_scores_mean, label="Test score",
color="darkorange", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.legend(loc="best")
plt.show()
###Output
_____no_output_____
###Markdown
Exhaustive Grid Search
###Code
from sklearn.model_selection import GridSearchCV
parameters = {
'max_depth': [2, 3, 4, 5],
'n_estimators': [5, 10, 25],
'gamma': [0, 0.1, 0.2],
'min_child_weight': [0, 0.5, 1],
'colsample_bytree': [0.6, 0.8, 1],
'reg_alpha': [1e-2, 1e-1, 1e1,10],
'reg_lambda': [1e-2, 1e-1, 1e1,10],
}
clf = GridSearchCV(xgb.XGBClassifier(), parameters, scoring = 'f1', cv = 3, verbose = 1, n_jobs=-1)
clf.fit(X_train, y_train)
p = clf.best_params_
clf.best_params_
xgb_model = xgb.XGBClassifier(colsample_bytree=0.6,
gamma=0,
max_depth=2,
min_child_weight=0,
n_estimators=25,
reg_alpha=0.01,
reg_lambda=0.01)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
print(compute_score(y_test, y_pred))
train_ratio = np.arange(0.6,0.95,0.005)
n_samples = len(X)
train_size = []#list(map(lambda x: round(x*n_samples), train_ratio))
score_trn_list = []
score_tst_list = []
for trn_ratio in train_ratio:
X_tmp, _, y_tmp, _ = train_test_split(X, y, stratify=y, train_size=trn_ratio, random_state=0)
train_size.append(len(X_tmp))
X_train, X_test, y_train, y_test = train_test_split(X_tmp, y_tmp, stratify=y_tmp, train_size=0.7, random_state=0)
model = xgb.XGBClassifier(colsample_bytree=0.6, gamma=0, max_depth=2,
min_child_weight=0, n_estimators=25, reg_alpha=0.01, reg_lambda=0.01)
model.fit(X_train, y_train)
y_pred_trn = model.predict(X_train)
y_pred_tst = model.predict(X_test)
score_trn = f1_score(y_train, y_pred_trn)
score_tst = f1_score(y_test, y_pred_tst)
score_trn_list.append(score_trn)
score_tst_list.append(score_tst)
print('shape: {},{}'.format(len(score_trn_list), score_trn_list[0]))
score_trn_list = np.stack(score_trn_list, axis=0 )
score_tst_list = np.stack(score_tst_list, axis=0 )
trn_score_f1 = exponential_moving_average(score_trn_list, 0.1)
tst_score_f1 = exponential_moving_average(score_tst_list, 0.1)
plt.title("F1 per samples")
plt.xlabel('Number of samples')
plt.ylabel('F1')
plt.plot(train_size, trn_score_f1, label='Train F1',color='navy')
plt.plot(train_size, tst_score_f1, label='Test F1',color="darkorange")
plt.legend(loc="best")
plt.ylim(0, 1.05)
plt.show()
from xgboost import plot_importance
xgb_model.get_booster().feature_names = list(data.columns[2:-3])
plot_importance(xgb_model,grid=False, show_values=False)
plt.show()
###Output
_____no_output_____ |
experiments/07_inferno_interp_0a.ipynb | ###Markdown
INFERNO loss> API details.
###Code
#hide
from nbdev.showdoc import *
from pytorch_inferno.model_wrapper import ModelWrapper
from pytorch_inferno.callback import *
from pytorch_inferno.data import get_paper_data
from pytorch_inferno.plotting import *
from pytorch_inferno.inference import *
from pytorch_inferno.utils import *
from fastcore.all import partialler
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from typing import *
from collections import OrderedDict
from fastcore.all import store_attr
from abc import abstractmethod
import torch.nn.functional as F
from torch import optim, autograd, nn, Tensor
import torch
from torch.distributions import Normal
###Output
_____no_output_____
###Markdown
Network
###Code
bs = 2000
data, test = get_paper_data(200000, bs=bs, n_test=1000000)
# export
class VariableSoftmax(nn.Softmax):
def __init__(self, temp:float=1, dim:int=-1):
super().__init__(dim=dim)
self.temp = temp
def forward(self, x:Tensor) -> Tensor: return super().forward(x/self.temp)
x = torch.randn((1,10))
VariableSoftmax(0.1)(x), VariableSoftmax(0.5)(x), VariableSoftmax(1)(x)
net = nn.Sequential(nn.Linear(3,100), nn.ReLU(),
nn.Linear(100,100),nn.ReLU(),
nn.Linear(100,10), VariableSoftmax(0.1))
init_net(net)
###Output
_____no_output_____
###Markdown
Loss
###Code
x,y,w = next(iter(data.trn_dl))
preds = net(x)
assert preds.shape == (bs,10)
def to_shape(p:Tensor) -> Tensor:
f = p.sum(0)
f = f/f.sum()
return f
m = y.squeeze()==0
f_s = to_shape(preds[~m])
f_b = to_shape(preds[m])
plt.plot(to_np(f_s))
plt.plot(to_np(f_b))
###Output
_____no_output_____
###Markdown
Minimise width with torch.no_grad(): u,d = [],[] b = x[m] b[:,0] += 0.2 u.append(to_shape(net(b))) b[:,0] -= 0.2 b[:,2] *= 3.5/3 u.append(to_shape(net(b))) b[:,2] /= 3.5/3 b[:,0] -= 0.2 d.append(to_shape(net(b))) b[:,0] += 0.2 b[:,2] *= 2.5/3 d.append(to_shape(net(b))) b[:,2] /= 2.5/3 b_up,b_dw = torch.stack(u),torch.stack(d) nll,alpha = calc_profile(f_s=f_s, f_b_nom=f_b, f_b_up=b_up, f_b_dw=b_dw, verbose=False, n=1050, mu_scan=torch.linspace(20,80,61), true_mu=50) nll plot_likelihood(to_np(nll-nll[nll==nll].min())) nll = nll-nll[nll==nll].min()-0.5 nll plot_likelihood(to_np(nll)) nll.max()-nll.min() def get_diff_width(nll:Tensor, mu_scan:np.ndarray) -> Tensor: def lin_root(nll0,nll1,mu0,mu1): a = (nll1-nll0)/(mu1-mu0) b = nll1-(a*mu1) return -b/a u,r,last_mu,last_nll = True,torch.zeros((2)),mu_scan[0],nll[0] for mu,l in zip(mu_scan[1:],nll[1:]): if u and l < 0: r[0] = lin_root(last_nll,l,last_mu,mu) u = False elif not u and l > 0: r[1] = lin_root(last_nll,l,last_mu,mu) break if l == l: last_mu,last_nll = mu,l return r[1]-r[0] w = get_diff_width(nll, mu_scan=np.linspace(20,80,61)); w class AbsInferno(AbsCallback): def __init__(self, n:int, mu_scan:Tensor, true_mu:int, n_steps:int=100, lr:float=0.1): super().__init__() store_attr() def on_train_begin(self) -> None: r''' Fake loss function, callback computes loss in `on_forwards_end` ''' self.wrapper.loss_func = lambda x,y: None self.profiler = partialler(calc_profile, n=self.n, mu_scan=to_device(self.mu_scan, self.wrapper.device), true_mu=self.true_mu, n_steps=self.n_steps, lr=self.lr, verbose=False) @staticmethod def _to_shape(p:Tensor) -> Tensor: f = p.sum(0) f = f + 1e-7 f = f/f.sum() return f @abstractmethod def _get_up_down(self, x:Tensor) -> Tuple[Tensor,Tensor]: pass def _get_diff_width(self, nll:Tensor) -> Tensor: def lin_root(nll0,nll1,mu0,mu1): a = (nll1-nll0)/(mu1-mu0) b = nll1-(a*mu1) return -b/a u,r,last_mu,last_nll = True,torch.zeros((2)),self.mu_scan[0],nll[0] for mu,l in zip(self.mu_scan[1:],nll[1:]): if u and l < 0: r[0] = lin_root(last_nll,l,last_mu,mu) u = False elif not u and l > 0: r[1] = lin_root(last_nll,l,last_mu,mu) break if l == l: last_mu,last_nll = mu,l return r[1]-r[0] def on_forwards_end(self) -> None: Get sig. & bkg. shapes b = self.wrapper.y.squeeze()==0 f_s = self._to_shape(self.wrapper.y_pred[~b]) f_b = self._to_shape(self.wrapper.y_pred[b]) f_b_up,f_b_dw = self._get_up_down(self.wrapper.x[b]) Compute nll nll,_ = self.profiler(f_s=f_s, f_b_nom=f_b, f_b_up=f_b_up, f_b_dw=f_b_dw) try: nll = nll-nll[nll==nll].min()-0.5 except RuntimeError: print(nll, self.wrapper.y_pred) w = self._get_diff_width(nll) print(w) print(self.wrapper.model[4].weight) self.wrapper.loss_val = torch.clamp_min(w, 0) class PaperInferno(AbsInferno): def __init__(self, n:int=1050, mu_scan:Tensor=torch.linspace(20,80,61), true_mu:int=50, n_steps:int=100, lr:float=0.1): super().__init__(n=n, mu_scan=mu_scan, true_mu=true_mu, n_steps=n_steps, lr=lr) def _get_up_down(self, x:Tensor) -> Tuple[Tensor,Tensor]: with torch.no_grad(): u,d = [],[] x[:,0] += 0.2 u.append(self._to_shape(self.wrapper.model(x))) x[:,0] -= 0.2 x[:,2] *= 3.5/3 u.append(self._to_shape(self.wrapper.model(x))) x[:,2] /= 3.5/3 x[:,0] -= 0.2 d.append(self._to_shape(self.wrapper.model(x))) x[:,0] += 0.2 x[:,2] *= 2.5/3 d.append(self._to_shape(self.wrapper.model(x))) x[:,2] /= 2.5/3 return torch.stack(u),torch.stack(d) net = nn.Sequential(nn.Linear(3,100), nn.ReLU(), nn.Linear(100,100),nn.ReLU(), nn.Linear(100,10), VariableSoftmax(0.1))init_net(net)model = ModelWrapper(net)model.fit(200, data=data, opt=partialler(optim.SGD,lr=1e-6), loss=nn.BCELoss(), cbs=[PaperInferno(),LossTracker(),EarlyStopping(5),GradClip(1e-5)])model.save('weights/Inferno_Test.h5')
###Code
class AbsInferno(AbsCallback):
def __init__(self, n:int, mu_scan:Tensor, true_mu:float, aug_alpha:bool=False, n_alphas:int=0, n_steps:int=100, lr:float=0.1):
super().__init__()
store_attr()
def on_train_begin(self) -> None:
self.wrapper.loss_func = None # Ensure loss function is skipped, callback computes loss value in `on_forwards_end`
for c in self.wrapper.cbs:
if hasattr(c, 'loss_is_meaned'): c.loss_is_meaned = False # Ensure that average losses are correct
@staticmethod
def to_shape(p:Tensor) -> Tensor:
f = p.sum(0)+1e-7
return f/f.sum()
@abstractmethod
def _get_up_down(self, x:Tensor) -> Tuple[Tensor,Tensor]: pass
def get_ikk(self, f_s:Tensor, f_b_nom:Tensor, f_b_up:Tensor, f_b_dw:Tensor) -> Tensor:
if self.aug_alpha: alpha = torch.randn((self.n_alphas+1), requires_grad=True, device=self.wrapper.device)
else: alpha = torch.zeros((self.n_alphas+1), requires_grad=True, device=self.wrapper.device)
with torch.no_grad():
alpha /= 10
alpha[0] += self.true_mu
get_nll = partialler(calc_nll, s_true=self.true_mu, b_true=self.n-self.true_mu,
f_s=f_s, f_b_nom=f_b_nom[None,:], f_b_up=f_b_up, f_b_dw=f_b_dw)
if self.aug_alpha: # Alphas carry noise, optimise via Newton
for i in range(self.n_steps): # Newton optimise nuisances & mu
nll = get_nll(s_exp=alpha[0], alpha=alpha[1:])
g,h = calc_grad_hesse(nll, alpha)
s = torch.clamp(self.lr*g.detach()@torch.inverse(h), -100, 100)
alpha = alpha-s
nll = get_nll(s_exp=alpha[0], alpha=alpha[1:])
_,h = calc_grad_hesse(nll, alpha, create_graph=True)
ikk = torch.inverse(h)[0,0]
return ikk
def on_forwards_end(self) -> None:
b = self.wrapper.y.squeeze() == 0
f_s = to_shape(self.wrapper.y_pred[~b])
f_b = to_shape(self.wrapper.y_pred[b])
f_b_up,f_b_dw = self._get_up_down(self.wrapper.x[b])
self.wrapper.loss_val = self.get_ikk(f_s=f_s, f_b_nom=f_b, f_b_up=f_b_up, f_b_dw=f_b_dw)
class PaperInferno(AbsInferno):
def __init__(self, r_mods:Optional[Tuple[float,float]]=(-0.2,0.2), l_mods:Optional[Tuple[float,float]]=(2.5,3.5), l_init:float=3,
n:int=1050, mu_scan:Tensor=torch.linspace(20,80,61), true_mu:int=50, aug_alpha:bool=False, n_steps:int=10, lr:float=0.1):
super().__init__(n=n, mu_scan=mu_scan, true_mu=true_mu, aug_alpha=aug_alpha, n_alphas=(r_mods is not None)+(l_mods is not None), n_steps=n_steps, lr=lr)
self.r_mods,self.l_mods,self.l_init = r_mods,l_mods,l_init
def on_train_begin(self) -> None:
if self.r_mods is not None:
self.r_mod_t = (torch.zeros(1,3, device=self.wrapper.device),torch.zeros(1,3, device=self.wrapper.device))
self.r_mod_t[0][0,0] = self.r_mods[0]
self.r_mod_t[1][0,0] = self.r_mods[1]
if self.l_mods is not None:
self.l_mod_t = (torch.ones(1,3, device=self.wrapper.device),torch.ones(1,3, device=self.wrapper.device))
self.l_mod_t[0][0,2] = self.l_mods[0]/self.l_init
self.l_mod_t[1][0,2] = self.l_mods[1]/self.l_init
def _get_up_down(self, x:Tensor) -> Tuple[Tensor,Tensor]:
if self.r_mods is None and self.l_mods is None: return None,None
u,d = [],[]
if self.r_mods is not None:
x = x+self.r_mod_t[0]
d.append(self.to_shape(self.wrapper.model(x)))
x = x+self.r_mod_t[1]-self.r_mod_t[0]
u.append(self.to_shape(self.wrapper.model(x)))
x = x-self.r_mod_t[1]
if self.l_mods is not None:
x = x*self.l_mod_t[0]
d.append(self.to_shape(self.wrapper.model(x)))
x = x*self.l_mod_t[1]/self.l_mod_t[0]
u.append(self.to_shape(self.wrapper.model(x)))
x = x/self.l_mod_t[1]
return torch.stack(u),torch.stack(d)
net = nn.Sequential(nn.Linear(3,100), nn.ReLU(),
nn.Linear(100,100),nn.ReLU(),
nn.Linear(100,10), VariableSoftmax(0.1))
init_net(net)
model = ModelWrapper(net)
model.fit(200, data=data, opt=partialler(optim.SGD,lr=1e-6), loss=None,
cbs=[PaperInferno(aug_alpha=True, n_steps=10, r_mods=None, l_mods=None),LossTracker(),SaveBest('weights/best_ii0a.h5'),EarlyStopping(10)])
model.save('weights/Inferno_Test_interp_bm0a.h5')
model.load('weights/Inferno_Test_interp_bm0a.h5')
###Output
_____no_output_____
###Markdown
Results
###Code
# export
class InfernoPred(PredHandler):
def get_preds(self) -> np.ndarray: return np.argmax(self.preds, 1)#/len(self.wrapper.model[-2].weight)
###Output
_____no_output_____
###Markdown
BM 0
###Code
preds = model._predict_dl(test, pred_cb=InfernoPred())
df = pd.DataFrame({'pred':preds})
df['gen_target'] = test.dataset.y
df.head()
plot_preds(df, bin_edges=np.linspace(0,10,11))
bin_preds(df)
df.head()
f_s,f_b = get_shape(df,1),get_shape(df,0)
f_s.sum(), f_b.sum()
f_s, f_b
asimov = (50*f_s)+(1000*f_b)
asimov, asimov.sum()
n = 1050
x = np.linspace(20,80,61)
y = np.zeros_like(x)
for i,m in enumerate(x):
pois = torch.distributions.Poisson((m*f_s)+(1000*f_b))
y[i] = -pois.log_prob(asimov).sum()
y
y_tf2 = np.array([31.626238,31.466385,31.313095,31.166267,31.025808,30.891619,30.76361
,30.641693,30.525778,30.415783,30.31162,30.213215,30.120483,30.033348
,29.951736,29.875574,29.804789,29.739307,29.679066,29.623993,29.574026
,29.5291,29.489151,29.454117,29.423939,29.398558,29.377914,29.361954
,29.35062,29.343859,29.341618,29.343842,29.350483,29.36149,29.376812
,29.396404,29.420216,29.448202,29.480318,29.516518,29.556757,29.600994
,29.649185,29.70129,29.757267,29.817076,29.88068,29.948036,30.019108
,30.093859,30.17225,30.25425,30.339819,30.42892,30.521524,30.617598
,30.7171,30.820007,30.926281,31.035892,31.148808], dtype='float32')
y_tf2-y_tf2.min()
plot_likelihood(y-y.min())
plot_likelihood(y_tf2-y_tf2.min())
###Output
_____no_output_____
###Markdown
Nuisances - via interpolation
###Code
bkg = test.dataset.x[test.dataset.y.squeeze() == 0]
assert len(bkg) == 500000
b_shapes = get_paper_syst_shapes(bkg, df, model=model, pred_cb=InfernoPred())
df
plot_preds(df, pred_names=['pred', 'pred_-0.2_3', 'pred_0.2_3', 'pred_0_2.5', 'pred_0_3.5'], bin_edges=np.linspace(0,10,11))
fig = plt.figure(figsize=(12,8))
for r in [-1,0,1]:
for l in [-1,0,1]:
alpha = Tensor((r,l))[None,:]
s = interp_shape(alpha, **b_shapes).squeeze()
print(s)
plt.plot(s, label=f'{r} {l}')
plt.legend()
###Output
tensor([7.0712e-02, 2.6135e-01, 2.0000e-13, 5.1386e-01, 2.0000e-13, 2.0000e-13,
3.9022e-02, 7.0246e-02, 2.0000e-13, 4.4810e-02])
tensor([7.2440e-02, 2.5971e-01, 2.0000e-13, 5.1066e-01, 2.0000e-13, 2.0000e-13,
3.9134e-02, 7.5612e-02, 2.0000e-13, 4.2442e-02])
tensor([7.3838e-02, 2.5795e-01, 2.0000e-13, 5.0729e-01, 2.0000e-13, 2.0000e-13,
3.9424e-02, 8.1330e-02, 2.0000e-13, 4.0172e-02])
tensor([6.7936e-02, 2.5734e-01, 2.0000e-13, 5.3845e-01, 2.0000e-13, 2.0000e-13,
3.4268e-02, 6.3482e-02, 2.0000e-13, 3.8518e-02])
tensor([6.9664e-02, 2.5571e-01, 2.0000e-13, 5.3525e-01, 2.0000e-13, 2.0000e-13,
3.4380e-02, 6.8848e-02, 2.0000e-13, 3.6150e-02])
tensor([7.1062e-02, 2.5394e-01, 2.0000e-13, 5.3188e-01, 2.0000e-13, 2.0000e-13,
3.4670e-02, 7.4566e-02, 2.0000e-13, 3.3880e-02])
tensor([6.4564e-02, 2.5281e-01, 2.0000e-13, 5.6284e-01, 2.0000e-13, 2.0000e-13,
2.9896e-02, 5.6994e-02, 2.0000e-13, 3.2894e-02])
tensor([6.6292e-02, 2.5117e-01, 2.0000e-13, 5.5964e-01, 2.0000e-13, 2.0000e-13,
3.0008e-02, 6.2360e-02, 2.0000e-13, 3.0526e-02])
tensor([6.7690e-02, 2.4941e-01, 2.0000e-13, 5.5627e-01, 2.0000e-13, 2.0000e-13,
3.0298e-02, 6.8078e-02, 2.0000e-13, 2.8256e-02])
###Markdown
Newton
###Code
profiler = partialler(calc_profile, n=1050, mu_scan=torch.linspace(20,80,61), true_mu=50)
###Output
_____no_output_____
###Markdown
BM 1r free, l fixed
###Code
bm1_b_shapes = OrderedDict([('f_b_nom', b_shapes['f_b_nom']),
('f_b_up', b_shapes['f_b_up'][0][None,:]),
('f_b_dw', b_shapes['f_b_dw'][0][None,:])])
bm1_b_shapes['f_b_up'].shape
nll = profiler(f_s=f_s, n_steps=100, **bm1_b_shapes)
nll = to_np(nll)
plot_likelihood(nll-nll.min())
###Output
_____no_output_____
###Markdown
BM 1lr fixed, l free
###Code
bm1l_b_shapes = OrderedDict([('f_b_nom', b_shapes['f_b_nom']),
('f_b_up', b_shapes['f_b_up'][1][None,:]),
('f_b_dw', b_shapes['f_b_dw'][1][None,:])])
nll = profiler(f_s=f_s, n_steps=100, **bm1l_b_shapes)
nll = to_np(nll)
plot_likelihood(nll-nll.min())
###Output
_____no_output_____
###Markdown
BM 2
###Code
nll = profiler(f_s=f_s, n_steps=100, **b_shapes)
nll = to_np(nll)
plot_likelihood(nll-nll.min())
###Output
_____no_output_____
###Markdown
BM 3
###Code
alpha_aux = [Normal(0,2), Normal(0,2)]
nll = profiler(f_s=f_s, n_steps=100, alpha_aux=alpha_aux, **b_shapes)
nll = to_np(nll)
plot_likelihood(nll-nll.min())
###Output
_____no_output_____
###Markdown
BM 4
###Code
alpha_aux = [Normal(0,2), Normal(0,2)]
nll = profiler(f_s=f_s, n_steps=100, alpha_aux=alpha_aux, float_b=True, b_aux=Normal(1000,100), **b_shapes)
nll = to_np(nll)
plot_likelihood(nll-nll.min())
###Output
_____no_output_____ |
demos/01.01-Explore_API_gpytorch_celerite.ipynb | ###Markdown
Explore the GPyTorch and Celerité APIWe start by exploring the APIs of the two packages `GPyTorch` and `celerite`. They are both packages for scalable Gaussian Processes with different strategies for doing the scaling.
###Code
import gpytorch
import celerite
gpytorch.__version__, celerite.__version__
###Output
_____no_output_____
###Markdown
We'll need some other standard and astronomy-specific imports and configurations.
###Code
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
###Output
_____no_output_____
###Markdown
Let's draw synthetic time series "data" with a Gaussian process from celerite. This approach is useful, since we know the answer: and the kernel that generated the data and its parameter values. We'll pick Matérn kernels, since both frameworks offer them out-of-the-box. Technically, the celerite Matern is an approximation, but we'll be sure to make draws with parameter values where the approximation will be near-exact. Matérn 3/2 with celerite.This kernel is characterized by two parameters:$k(\tau) = \sigma^2\,\left(1+ \frac{\sqrt{3}\,\tau}{\rho}\right)\, \exp\left(-\frac{\sqrt{3}\,\tau}{\rho}\right)$ Here are the inputs for `celerite`:> Args: - log_sigma (float): The log of the parameter :math:`\sigma`. - log_rho (float): The log of the parameter :math:`\rho`. - eps (Optional[float]): The value of the parameter :math:`\epsilon`. (default: `0.01`)
###Code
from celerite import terms
true_rho = 1.5
true_sigma = 1.2
true_log_sigma = np.log(true_sigma)
true_log_rho = np.log(true_rho) # Has units of time, so 1/f
kernel_matern = terms.Matern32Term(log_sigma=true_log_sigma, log_rho=true_log_rho, eps=0.00001)
t_vec = np.linspace(0, 40, 500)
gp = celerite.GP(kernel_matern, mean=0, fit_mean=True)
gp.compute(t_vec)
y_true = gp.sample()
noise = np.random.normal(0, 0.3, size=len(y_true))
y_obs = y_true + noise
plt.plot(t_vec, y_obs, label='Noisy observation')
plt.plot(t_vec, y_true, label='"Truth"')
plt.xlabel('$t$')
plt.ylabel('$y$')
plt.legend();
###Output
_____no_output_____
###Markdown
Ok, we have a dataset to work with. Now with GPyTorch and RBF kernel
###Code
import torch
t_ten = torch.from_numpy(t_vec)
y_ten = torch.from_numpy(y_obs)
train_x = t_ten.to(torch.float32)
train_y = y_ten.to(torch.float32)
# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=3/2))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# initialize likelihood and model
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPModel(train_x, train_y, likelihood)
###Output
_____no_output_____
###Markdown
Train the model.
###Code
# Find optimal model hyperparameters
model.train()
likelihood.train()
model.state_dict()
with gpytorch.settings.max_cg_iterations(5000):
# Use the adam optimizer
optimizer = torch.optim.Adam([
{'params': model.parameters()}, # Includes GaussianLikelihood parameters
], lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
training_iter = 300
for i in range(training_iter):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(train_x)
# Calc loss and backprop gradients
loss = -mll(output, train_y)
loss.backward()
if (i % 20) == 0:
print('Iter %d/%d - Loss: %.3f lengthscale: %.3f noise: %.6f' % (
i + 1, training_iter, loss.item(),
model.covar_module.base_kernel.raw_lengthscale.item(),
model.likelihood.noise.item()
))
#print(list(model.parameters()))
optimizer.step()
###Output
Iter 1/300 - Loss: 1.008 lengthscale: 0.000 noise: 0.693247
Iter 21/300 - Loss: 0.472 lengthscale: 1.580 noise: 0.131774
Iter 41/300 - Loss: 0.454 lengthscale: 1.614 noise: 0.071615
Iter 61/300 - Loss: 0.445 lengthscale: 1.663 noise: 0.095406
Iter 81/300 - Loss: 0.438 lengthscale: 1.536 noise: 0.085152
Iter 101/300 - Loss: 0.448 lengthscale: 1.470 noise: 0.088692
Iter 121/300 - Loss: 0.442 lengthscale: 1.452 noise: 0.087375
Iter 141/300 - Loss: 0.432 lengthscale: 1.442 noise: 0.087444
Iter 161/300 - Loss: 0.462 lengthscale: 1.422 noise: 0.088300
Iter 181/300 - Loss: 0.448 lengthscale: 1.442 noise: 0.087291
Iter 201/300 - Loss: 0.450 lengthscale: 1.406 noise: 0.086718
Iter 221/300 - Loss: 0.445 lengthscale: 1.398 noise: 0.087055
Iter 241/300 - Loss: 0.442 lengthscale: 1.436 noise: 0.089390
Iter 261/300 - Loss: 0.443 lengthscale: 1.518 noise: 0.088230
Iter 281/300 - Loss: 0.451 lengthscale: 1.488 noise: 0.087003
###Markdown
How did it do?
###Code
# Get into evaluation (predictive posterior) mode
model.eval()
likelihood.eval()
# Test points are regularly spaced along [0,1]
# Make predictions by feeding model through likelihood
with torch.no_grad(), gpytorch.settings.fast_pred_var(), gpytorch.settings.max_cg_iterations(9000):
test_x = torch.linspace(0, 40, 501, dtype=torch.float32)
observed_pred = likelihood(model(test_x))
with torch.no_grad():
# Initialize plot
f, ax = plt.subplots(1, 1, figsize=(22, 9))
# Get upper and lower confidence bounds
lower, upper = observed_pred.confidence_region()
# Plot training data as black stars
ax.plot(train_x.numpy(), train_y.numpy(), 'k.', alpha=0.5)
# Plot predictive means as blue line
ax.plot(t_vec, y_true, lw=4)
ax.plot(test_x.numpy(), observed_pred.mean.numpy(), lw=4)
# Shade between the lower and upper confidence bounds
ax.fill_between(test_x.numpy(), lower.numpy(), upper.numpy(), alpha=0.5, color='#2ecc71')
ax.legend(['Observed Data', 'Truth', 'Mean', '2 $\sigma$ Confidence'])
###Output
_____no_output_____
###Markdown
Nice! What are the four parameters?
###Code
model.mean_module.constant
likelihood.raw_noise
model.covar_module.raw_outputscale
model.covar_module.base_kernel.raw_lengthscale
###Output
_____no_output_____ |
docs/notebooks/StatisticalDebugger.ipynb | ###Markdown
Statistical DebuggingIn this chapter, we introduce _statistical debugging_ – the idea that specific events during execution could be _statistically correlated_ with failures. We start with coverage of individual lines and then proceed towards further execution features.
###Code
from bookutils import YouTubeVideo
YouTubeVideo("UNuso00zYiI")
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on tracing executions](Tracer.ipynb).
###Code
import bookutils
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from debuggingbook.StatisticalDebugger import ```and then make use of the following features.This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use```python>>> debugger = TarantulaDebugger()>>> with debugger.collect_pass():>>> remove_html_markup("abc")>>> with debugger.collect_pass():>>> remove_html_markup('abc')>>> with debugger.collect_fail():>>> remove_html_markup('"abc"')```Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:```python>>> debugger = TarantulaDebugger()>>> with debugger:>>> remove_html_markup("abc")>>> with debugger:>>> remove_html_markup('abc')>>> with debugger:>>> remove_html_markup('"abc"')>>> assert False raise an exception````with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.```python>>> debugger.event_table(args=True, color=True)```| `remove_html_markup` | `s='abc'` | `s='abc'` | `s='"abc"'` | | --------------------- | ---- | ---- | ---- | | remove_html_markup:1 | X | X | X | | remove_html_markup:2 | X | X | X | | remove_html_markup:3 | X | X | X | | remove_html_markup:4 | X | X | X | | remove_html_markup:6 | X | X | X | | remove_html_markup:7 | X | X | X | | remove_html_markup:8 | - | X | - | | remove_html_markup:9 | X | X | X | | remove_html_markup:10 | - | X | - | | remove_html_markup:11 | X | X | X | | remove_html_markup:12 | - | - | X | | remove_html_markup:13 | X | X | X | | remove_html_markup:14 | X | X | X | | remove_html_markup:16 | X | X | X | Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:```python>>> debugger```<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 1: 50%"> 1 def remove_html_markup(s): type: ignore<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 2: 50%"> 2 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 3: 50%"> 3 quote = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 4: 50%"> 4 out = "" 5 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 6: 50%"> 6 for c in s:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 7: 50%"> 7 if c == &x27;<&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 8: 0%"> 8 tag = True<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 9: 50%"> 9 elif c == &x27;>&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 10: 0%"> 10 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 11: 50%"> 11 elif c == &x27;"&x27; or c == "&x27;" and tag:<pre style="background-color:hsl(0.0, 100.0%, 80%)" title="Line 12: 100%"> 12 quote = not quote<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 13: 50%"> 13 elif not tag:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 14: 50%"> 14 out = out + c 15 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 16: 50%"> 16 return out Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.```python>>> debugger.rank()[('remove_html_markup', 12), ('remove_html_markup', 2), ('remove_html_markup', 14), ('remove_html_markup', 11), ('remove_html_markup', 3), ('remove_html_markup', 9), ('remove_html_markup', 6), ('remove_html_markup', 1), ('remove_html_markup', 7), ('remove_html_markup', 4), ('remove_html_markup', 16), ('remove_html_markup', 13), ('remove_html_markup', 8), ('remove_html_markup', 10)]``` Classes and MethodsHere are all classes defined in this chapter: IntroductionThe idea behind _statistical debugging_ is fairly simple. We have a program that sometimes passes and sometimes fails. This outcome can be _correlated_ with events that precede it – properties of the input, properties of the execution, properties of the program state. If we, for instance, can find that "the program always fails when Line 123 is executed, and it always passes when Line 123 is _not_ executed", then we have a strong correlation between Line 123 being executed and failure.Such _correlation_ does not necessarily mean _causation_. For this, we would have to prove that executing Line 123 _always_ leads to failure, and that _not_ executing it does not lead to (this) failure. Also, a correlation (or even a causation) does not mean that Line 123 contains the defect – for this, we would have to show that it actually is an error. Still, correlations make excellent hints as it comes to search for failure causes – in all generality, if you let your search be guided by _events that correlate with failures_, you are more likely to find _important hints on how the failure comes to be_. Collecting EventsHow can we determine events that correlate with failure? We start with a general mechanism to actually _collect_ events during execution. The abstract `Collector` class provides* a `collect()` method made for collecting events, called from the `traceit()` tracer; and* an `events()` method made for retrieving these events.Both of these are _abstract_ and will be defined further in subclasses.
###Code
from Tracer import Tracer
# ignore
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Set, List, TypeVar, Union
from types import FrameType, TracebackType
class Collector(Tracer):
"""A class to record events during execution."""
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collecting function. To be overridden in subclasses."""
pass
def events(self) -> Set:
"""Return a collection of events. To be overridden in subclasses."""
return set()
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
self.collect(frame, event, arg)
###Output
_____no_output_____
###Markdown
A `Collector` class is used like `Tracer`, using a `with` statement. Let us apply it on the buggy variant of `remove_html_markup()` from the [Introduction to Debugging](Intro_Debugging.ipynb):
###Code
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
with Collector() as c:
out = remove_html_markup('"abc"')
out
###Output
_____no_output_____
###Markdown
There's not much we can do with our collector, as the `collect()` and `events()` methods are yet empty. However, we can introduce an `id()` method which returns a string identifying the collector. This string is defined from the _first function call_ encountered.
###Code
Coverage = Set[Tuple[Callable, int]]
class Collector(Collector):
def __init__(self) -> None:
"""Constructor."""
self._function: Optional[Callable] = None
self._args: Optional[Dict[str, Any]] = None
self._argstring: Optional[str] = None
self._exception: Optional[Type] = None
self.items_to_ignore: List[Union[Type, Callable]] = [self.__class__]
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Tracing function.
Saves the first function and calls collect().
"""
for item in self.items_to_ignore:
if (isinstance(item, type) and 'self' in frame.f_locals and
isinstance(frame.f_locals['self'], item)):
# Ignore this class
return
if item.__name__ == frame.f_code.co_name:
# Ignore this function
return
if self._function is None and event == 'call':
# Save function
self._function = self.create_function(frame)
self._args = frame.f_locals.copy()
self._argstring = ", ".join([f"{var}={repr(self._args[var])}"
for var in self._args])
self.collect(frame, event, arg)
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collector function. To be overloaded in subclasses."""
pass
def id(self) -> str:
"""Return an identifier for the collector,
created from the first call"""
return f"{self.function().__name__}({self.argstring()})"
def function(self) -> Callable:
"""Return the function from the first call, as a function object"""
if not self._function:
raise ValueError("No call collected")
return self._function
def argstring(self) -> str:
"""
Return the list of arguments from the first call,
as a printable string
"""
if not self._argstring:
raise ValueError("No call collected")
return self._argstring
def args(self) -> Dict[str, Any]:
"""Return a dict of argument names and values from the first call"""
if not self._args:
raise ValueError("No call collected")
return self._args
def exception(self) -> Optional[Type]:
"""Return the exception class from the first call,
or None if no exception was raised."""
return self._exception
def __repr__(self) -> str:
"""Return a string representation of the collector"""
# We use the ID as default representation when printed
return self.id()
def covered_functions(self) -> Set[Callable]:
"""Set of covered functions. To be overloaded in subclasses."""
return set()
def coverage(self) -> Coverage:
"""
Return a set (function, lineno) with locations covered.
To be overloaded in subclasses.
"""
return set()
###Output
_____no_output_____
###Markdown
Here's how the collector works. We use a `with` clause to collect details on a function call:
###Code
with Collector() as c:
remove_html_markup('abc')
###Output
_____no_output_____
###Markdown
We can now retrieve details such as the function called...
###Code
c.function()
###Output
_____no_output_____
###Markdown
... or its arguments, as a name/value dictionary.
###Code
c.args()
###Output
_____no_output_____
###Markdown
The `id()` method returns a printable representation of the call:
###Code
c.id()
###Output
_____no_output_____
###Markdown
The `argstring()` method does the same for the argument string only.
###Code
c.argstring()
###Output
_____no_output_____
###Markdown
With this, we can collect the basic information to identify calls – such that we can later correlate their events with success or failure. Error Prevention While collecting, we'd like to avoid collecting events in the collection infrastructure. The `items_to_ignore` attribute takes care of this.
###Code
class Collector(Collector):
def add_items_to_ignore(self,
items_to_ignore: List[Union[Type, Callable]]) \
-> None:
"""
Define additional classes and functions to ignore during collection
(typically `Debugger` classes using these collectors).
"""
self.items_to_ignore += items_to_ignore
###Output
_____no_output_____
###Markdown
If we exit a block without having collected anything, that's likely an error.
###Code
class Collector(Collector):
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
ret = super().__exit__(exc_tp, exc_value, exc_traceback)
if not self._function:
if exc_tp:
return False # re-raise exception
else:
raise ValueError("No call collected")
return ret
###Output
_____no_output_____
###Markdown
Collecting CoverageSo far, our `Collector` class does not collect any events. Let us extend it such that it collects _coverage_ information – that is, the set of locations executed. To this end, we introduce a `CoverageCollector` subclass which saves the coverage in a set containing functions and line numbers.
###Code
from types import FrameType
from StackInspector import StackInspector
class CoverageCollector(Collector, StackInspector):
"""A class to record covered locations during execution."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self._coverage: Coverage = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Save coverage for an observed event.
"""
name = frame.f_code.co_name
function = self.search_func(name, frame)
if function is None:
function = self.create_function(frame)
location = (function, frame.f_lineno)
self._coverage.add(location)
###Output
_____no_output_____
###Markdown
We also override `events()` such that it returns the set of covered locations.
###Code
class CoverageCollector(CoverageCollector):
def events(self) -> Set[Tuple[str, int]]:
"""
Return the set of locations covered.
Each location comes as a pair (`function_name`, `lineno`).
"""
return {(func.__name__, lineno) for func, lineno in self._coverage}
###Output
_____no_output_____
###Markdown
The methods `coverage()` and `covered_functions()` allow precise access to the coverage obtained.
###Code
class CoverageCollector(CoverageCollector):
def covered_functions(self) -> Set[Callable]:
"""Return a set with all functions covered."""
return {func for func, lineno in self._coverage}
def coverage(self) -> Coverage:
"""Return a set (function, lineno) with all locations covered."""
return self._coverage
###Output
_____no_output_____
###Markdown
Here is how we can use `CoverageCollector` to determine the lines executed during a run of `remove_html_markup()`:
###Code
with CoverageCollector() as c:
remove_html_markup('abc')
c.events()
###Output
_____no_output_____
###Markdown
Sets of line numbers alone are not too revealing. They provide more insights if we actually list the code, highlighting these numbers:
###Code
import inspect
from bookutils import getsourcelines # like inspect.getsourcelines(), but in color
def code_with_coverage(function: Callable, coverage: Coverage) -> None:
source_lines, starting_line_number = \
getsourcelines(function)
line_number = starting_line_number
for line in source_lines:
marker = '*' if (function, line_number) in coverage else ' '
print(f"{line_number:4} {marker} {line}", end='')
line_number += 1
code_with_coverage(remove_html_markup, c.coverage())
###Output
1 * [34mdef[39;49;00m [32mremove_html_markup[39;49;00m(s): [37m# type: ignore[39;49;00m
2 * tag = [34mFalse[39;49;00m
3 * quote = [34mFalse[39;49;00m
4 * out = [33m"[39;49;00m[33m"[39;49;00m
5
6 * [34mfor[39;49;00m c [35min[39;49;00m s:
7 * [34mif[39;49;00m c == [33m'[39;49;00m[33m<[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
8 tag = [34mTrue[39;49;00m
9 * [34melif[39;49;00m c == [33m'[39;49;00m[33m>[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
10 tag = [34mFalse[39;49;00m
11 * [34melif[39;49;00m c == [33m'[39;49;00m[33m"[39;49;00m[33m'[39;49;00m [35mor[39;49;00m c == [33m"[39;49;00m[33m'[39;49;00m[33m"[39;49;00m [35mand[39;49;00m tag:
12 quote = [35mnot[39;49;00m quote
13 * [34melif[39;49;00m [35mnot[39;49;00m tag:
14 * out = out + c
15
16 * [34mreturn[39;49;00m out
###Markdown
Remember that the input `s` was `"abc"`? In this listing, we can see which lines were covered and which lines were not. From the listing already, we can see that `s` has neither tags nor quotes. Such coverage computation plays a big role in _testing_, as one wants tests to cover as many different aspects of program execution (and notably code) as possible. But also during debugging, code coverage is essential: If some code was not even executed in the failing run, then any change to it will have no effect.
###Code
from bookutils import quiz
quiz('Let the input be `"<b>Don\'t do this!</b>"`. '
"Which of these lines are executed? Use the code to find out!",
[
"`tag = True`",
"`tag = False`",
"`quote = not quote`",
"`out = out + c`"
], "[ord(c) - ord('a') - 1 for c in 'cdf']")
###Output
_____no_output_____
###Markdown
To find the solution, try this out yourself:
###Code
with CoverageCollector() as c:
remove_html_markup("<b>Don't do this!</b>")
# code_with_coverage(remove_html_markup, c.coverage)
###Output
_____no_output_____
###Markdown
Computing DifferencesLet us get back to the idea that we want to _correlate_ events with passing and failing outcomes. For this, we need to examine events in both _passing_ and _failing_ runs, and determine their _differences_ – since it is these differences we want to associate with their respective outcome. A Base Class for Statistical DebuggingThe `StatisticalDebugger` base class takes a collector class (such as `CoverageCollector`). Its `collect()` method creates a new collector of that very class, which will be maintained by the debugger. As argument, `collect()` takes a string characterizing the outcome (such as `'PASS'` or `'FAIL'`). This is how one would use it:```pythondebugger = StatisticalDebugger()with debugger.collect('PASS'): some_passing_run()with debugger.collect('PASS'): another_passing_run()with debugger.collect('FAIL'): some_failing_run()``` Let us implement `StatisticalDebugger`. The base class gets a collector class as argument:
###Code
class StatisticalDebugger:
"""A class to collect events for multiple outcomes."""
def __init__(self, collector_class: Type = CoverageCollector, log: bool = False):
"""Constructor. Use instances of `collector_class` to collect events."""
self.collector_class = collector_class
self.collectors: Dict[str, List[Collector]] = {}
self.log = log
###Output
_____no_output_____
###Markdown
The `collect()` method creates (and stores) a collector for the given outcome, using the given outcome to characterize the run. Any additional arguments are passed to the collector.
###Code
class StatisticalDebugger(StatisticalDebugger):
def collect(self, outcome: str, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for the given outcome.
Additional args are passed to the collector."""
collector = self.collector_class(*args, **kwargs)
collector.add_items_to_ignore([self.__class__])
return self.add_collector(outcome, collector)
def add_collector(self, outcome: str, collector: Collector) -> Collector:
if outcome not in self.collectors:
self.collectors[outcome] = []
self.collectors[outcome].append(collector)
return collector
###Output
_____no_output_____
###Markdown
The `all_events()` method produces a union of all events observed. If an outcome is given, it produces a union of all events with that outcome:
###Code
class StatisticalDebugger(StatisticalDebugger):
def all_events(self, outcome: Optional[str] = None) -> Set[Any]:
"""Return a set of all events observed."""
all_events = set()
if outcome:
if outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
else:
for outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
return all_events
###Output
_____no_output_____
###Markdown
Here's a simple example of `StatisticalDebugger` in action:
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
The method `all_events()` returns all events collected:
###Code
s.all_events()
###Output
_____no_output_____
###Markdown
If given an outcome as argument, we obtain all events with the given outcome.
###Code
s.all_events('FAIL')
###Output
_____no_output_____
###Markdown
The attribute `collectors` maps outcomes to lists of collectors:
###Code
s.collectors
###Output
_____no_output_____
###Markdown
Here's the collector of the one (and first) passing run:
###Code
s.collectors['PASS'][0].id()
s.collectors['PASS'][0].events()
###Output
_____no_output_____
###Markdown
To better highlight the differences between the collected events, we introduce a method `event_table()` that prints out whether an event took place in a run. Excursion: Printing an Event Table
###Code
from IPython.display import Markdown
import html
class StatisticalDebugger(StatisticalDebugger):
def function(self) -> Optional[Callable]:
"""
Return the entry function from the events observed,
or None if ambiguous.
"""
names_seen = set()
functions = []
for outcome in self.collectors:
for collector in self.collectors[outcome]:
# We may have multiple copies of the function,
# but sharing the same name
func = collector.function()
if func.__name__ not in names_seen:
functions.append(func)
names_seen.add(func.__name__)
if len(functions) != 1:
return None # ambiguous
return functions[0]
def covered_functions(self) -> Set[Callable]:
"""Return a set of all functions observed."""
functions = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
functions |= collector.covered_functions()
return functions
def coverage(self) -> Coverage:
"""Return a set of all (functions, line_numbers) observed"""
coverage = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
coverage |= collector.coverage()
return coverage
def color(self, event: Any) -> Optional[str]:
"""
Return a color for the given event, or None.
To be overloaded in subclasses.
"""
return None
def tooltip(self, event: Any) -> Optional[str]:
"""
Return a tooltip string for the given event, or None.
To be overloaded in subclasses.
"""
return None
def event_str(self, event: Any) -> str:
"""Format the given event. To be overloaded in subclasses."""
if isinstance(event, str):
return event
if isinstance(event, tuple):
return ":".join(self.event_str(elem) for elem in event)
return str(event)
def event_table_text(self, *, args: bool = False, color: bool = False) -> str:
"""
Print out a table of events observed.
If `args` is True, use arguments as headers.
If `color` is True, use colors.
"""
sep = ' | '
all_events = self.all_events()
longest_event = max(len(f"{self.event_str(event)}")
for event in all_events)
out = ""
# Header
if args:
out += '| '
func = self.function()
if func:
out += '`' + func.__name__ + '`'
out += sep
for name in self.collectors:
for collector in self.collectors[name]:
out += '`' + collector.argstring() + '`' + sep
out += '\n'
else:
out += '| ' + ' ' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += name + sep
out += '\n'
out += '| ' + '-' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += '-' * len(name) + sep
out += '\n'
# Data
for event in sorted(all_events):
event_name = self.event_str(event).rjust(longest_event)
tooltip = self.tooltip(event)
if tooltip:
title = f' title="{tooltip}"'
else:
title = ''
if color:
color_name = self.color(event)
if color_name:
event_name = \
f'<samp style="background-color: {color_name}"{title}>' \
f'{html.escape(event_name)}' \
f'</samp>'
out += f"| {event_name}" + sep
for name in self.collectors:
for collector in self.collectors[name]:
out += ' ' * (len(name) - 1)
if event in collector.events():
out += "X"
else:
out += "-"
out += sep
out += '\n'
return out
def event_table(self, **_args: Any) -> Any:
"""Print out event table in Markdown format."""
return Markdown(self.event_table_text(**_args))
def __repr__(self) -> str:
return self.event_table_text()
def _repr_markdown_(self) -> str:
return self.event_table_text(args=True, color=True)
###Output
_____no_output_____
###Markdown
End of Excursion
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
s.event_table(args=True)
quiz("How many lines are executed in the failing run only?",
[
"One",
"Two",
"Three"
], 'len([12])')
###Output
_____no_output_____
###Markdown
Indeed, Line 12 executed in the failing run only would be a correlation to look for. Collecting Passing and Failing RunsWhile our `StatisticalDebugger` class allows arbitrary outcomes, we are typically only interested in two outcomes, namely _passing_ vs. _failing_ runs. We therefore introduce a specialized `DifferenceDebugger` class that provides customized methods to collect and access passing and failing runs.
###Code
class DifferenceDebugger(StatisticalDebugger):
"""A class to collect events for passing and failing outcomes."""
PASS = 'PASS'
FAIL = 'FAIL'
def collect_pass(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for passing runs."""
return self.collect(self.PASS, *args, **kwargs)
def collect_fail(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for failing runs."""
return self.collect(self.FAIL, *args, **kwargs)
def pass_collectors(self) -> List[Collector]:
return self.collectors[self.PASS]
def fail_collectors(self) -> List[Collector]:
return self.collectors[self.FAIL]
def all_fail_events(self) -> Set[Any]:
"""Return all events observed in failing runs."""
return self.all_events(self.FAIL)
def all_pass_events(self) -> Set[Any]:
"""Return all events observed in passing runs."""
return self.all_events(self.PASS)
def only_fail_events(self) -> Set[Any]:
"""Return all events observed only in failing runs."""
return self.all_fail_events() - self.all_pass_events()
def only_pass_events(self) -> Set[Any]:
"""Return all events observed only in passing runs."""
return self.all_pass_events() - self.all_fail_events()
###Output
_____no_output_____
###Markdown
We can use `DifferenceDebugger` just as a `StatisticalDebugger`:
###Code
# ignore
T1 = TypeVar('T1', bound='DifferenceDebugger')
def test_debugger_html_simple(debugger: T1) -> T1:
with debugger.collect_pass():
remove_html_markup('abc')
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
return debugger
###Output
_____no_output_____
###Markdown
However, since the outcome of tests may not always be predetermined, we provide a simpler interface for tests that can fail (= raise an exception) or pass (not raise an exception).
###Code
class DifferenceDebugger(DifferenceDebugger):
def __enter__(self) -> Any:
"""Enter a `with` block. Collect coverage and outcome;
classify as FAIL if the block raises an exception,
and PASS if it does not.
"""
self.collector = self.collector_class()
self.collector.add_items_to_ignore([self.__class__])
self.collector.__enter__()
return self
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
status = self.collector.__exit__(exc_tp, exc_value, exc_traceback)
if status is None:
pass
else:
return False # Internal error; re-raise exception
if exc_tp is None:
outcome = self.PASS
else:
outcome = self.FAIL
self.add_collector(outcome, self.collector)
return True # Ignore exception, if any
###Output
_____no_output_____
###Markdown
Using this interface, we can rewrite `test_debugger_html()`:
###Code
# ignore
T2 = TypeVar('T2', bound='DifferenceDebugger')
def test_debugger_html(debugger: T2) -> T2:
with debugger:
remove_html_markup('abc')
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # Mark test as failing
return debugger
test_debugger_html(DifferenceDebugger())
###Output
_____no_output_____
###Markdown
Analyzing EventsLet us now focus on _analyzing_ events collected. Since events come back as _sets_, we can compute _unions_ and _differences_ between these sets. For instance, we can compute which lines were executed in _any_ of the passing runs of `test_debugger_html()`, above:
###Code
debugger = test_debugger_html(DifferenceDebugger())
pass_1_events = debugger.pass_collectors()[0].events()
pass_2_events = debugger.pass_collectors()[1].events()
in_any_pass = pass_1_events | pass_2_events
in_any_pass
###Output
_____no_output_____
###Markdown
Likewise, we can determine which lines were _only_ executed in the failing run:
###Code
fail_events = debugger.fail_collectors()[0].events()
only_in_fail = fail_events - in_any_pass
only_in_fail
###Output
_____no_output_____
###Markdown
And we see that the "failing" run is characterized by processing quotes:
###Code
code_with_coverage(remove_html_markup, only_in_fail)
debugger = test_debugger_html(DifferenceDebugger())
debugger.all_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the failing run:
###Code
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the passing runs:
###Code
debugger.only_pass_events()
###Output
_____no_output_____
###Markdown
Again, having these lines individually is neat, but things become much more interesting if we can see the associated code lines just as well. That's what we will do in the next section. Visualizing DifferencesTo show correlations of line coverage in context, we introduce a number of _visualization_ techniques that _highlight_ code with different colors. Discrete SpectrumThe first idea is to use a _discrete_ spectrum of three colors:* _red_ for code executed in failing runs only* _green_ for code executed in passing runs only* _yellow_ for code executed in both passing and failing runs.Code that is not executed stays unhighlighted. We first introduce an abstract class `SpectrumDebugger` that provides the essential functions. `suspiciousness()` returns a value between 0 and 1 indicating the suspiciousness of the given event - or `None` if unknown.
###Code
class SpectrumDebugger(DifferenceDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value in the range [0, 1.0]
for the given event, or `None` if unknown.
To be overloaded in subclasses.
"""
return None
###Output
_____no_output_____
###Markdown
The `tooltip()` and `percentage()` methods convert the suspiciousness into a human-readable form.
###Code
class SpectrumDebugger(SpectrumDebugger):
def tooltip(self, event: Any) -> str:
"""
Return a tooltip for the given event (default: percentage).
To be overloaded in subclasses.
"""
return self.percentage(event)
def percentage(self, event: Any) -> str:
"""
Return the suspiciousness for the given event as percentage string.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is not None:
return str(int(suspiciousness * 100)).rjust(3) + '%'
else:
return ' ' * len('100%')
###Output
_____no_output_____
###Markdown
The `code()` method takes a function and shows each of its source code lines using the given spectrum, using HTML markup:
###Code
class SpectrumDebugger(SpectrumDebugger):
def code(self, functions: Optional[Set[Callable]] = None, *,
color: bool = False, suspiciousness: bool = False,
line_numbers: bool = True) -> str:
"""
Return a listing of `functions` (default: covered functions).
If `color` is True, render as HTML, using suspiciousness colors.
If `suspiciousness` is True, include suspiciousness values.
If `line_numbers` is True (default), include line numbers.
"""
if not functions:
functions = self.covered_functions()
out = ""
seen = set()
for function in functions:
source_lines, starting_line_number = \
inspect.getsourcelines(function)
if (function.__name__, starting_line_number) in seen:
continue
seen.add((function.__name__, starting_line_number))
if out:
out += '\n'
if color:
out += '<p/>'
line_number = starting_line_number
for line in source_lines:
if color:
line = html.escape(line)
if line.strip() == '':
line = ' '
location = (function.__name__, line_number)
location_suspiciousness = self.suspiciousness(location)
if location_suspiciousness is not None:
tooltip = f"Line {line_number}: {self.tooltip(location)}"
else:
tooltip = f"Line {line_number}: not executed"
if suspiciousness:
line = self.percentage(location) + ' ' + line
if line_numbers:
line = str(line_number).rjust(4) + ' ' + line
line_color = self.color(location)
if color and line_color:
line = f'''<pre style="background-color:{line_color}"
title="{tooltip}">{line.rstrip()}</pre>'''
elif color:
line = f'<pre title="{tooltip}">{line}</pre>'
else:
line = line.rstrip()
out += line + '\n'
line_number += 1
return out
###Output
_____no_output_____
###Markdown
We introduce a few helper methods to visualize the code with colors in various forms.
###Code
class SpectrumDebugger(SpectrumDebugger):
def _repr_html_(self) -> str:
"""When output in Jupyter, visualize as HTML"""
return self.code(color=True)
def __str__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
def __repr__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
###Output
_____no_output_____
###Markdown
So far, however, central methods like `suspiciousness()` or `color()` were abstract – that is, to be defined in subclasses. Our `DiscreteSpectrumDebugger` subclass provides concrete implementations for these, with `color()` returning one of the three colors depending on the line number:
###Code
class DiscreteSpectrumDebugger(SpectrumDebugger):
"""Visualize differences between executions using three discrete colors"""
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value [0, 1.0]
for the given event, or `None` if unknown.
"""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return 0.5
elif event in failing:
return 1.0
elif event in passing:
return 0.0
else:
return None
def color(self, event: Any) -> Optional[str]:
"""
Return a HTML color for the given event.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
if suspiciousness > 0.8:
return 'mistyrose'
if suspiciousness >= 0.5:
return 'lightyellow'
return 'honeydew'
def tooltip(self, event: Any) -> str:
"""Return a tooltip for the given event."""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return "in passing and failing runs"
elif event in failing:
return "only in failing runs"
elif event in passing:
return "only in passing runs"
else:
return "never"
###Output
_____no_output_____
###Markdown
This is how the `only_pass_events()` and `only_fail_events()` sets look like when visualized with code. The "culprit" line is well highlighted:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
debugger
###Output
_____no_output_____
###Markdown
We can clearly see that the failure is correlated with the presence of quotes in the input string (which is an important hint!). But does this also show us _immediately_ where the defect to be fixed is?
###Code
quiz("Does the line `quote = not quote` actually contain the defect?",
[
"Yes, it should be fixed",
"No, the defect is elsewhere"
], '164 * 2 % 326')
###Output
_____no_output_____
###Markdown
Indeed, it is the _governing condition_ that is wrong – that is, the condition that caused Line 12 to be executed in the first place. In order to fix a program, we have to find a location that1. _causes_ the failure (i.e., it can be changed to make the failure go away); and2. is a _defect_ (i.e., contains an error).In our example above, the highlighted code line is a _symptom_ for the error. To some extent, it is also a _cause_, since, say, commenting it out would also resolve the given failure, at the cost of causing other failures. However, the preceding condition also is a cause, as is the presence of quotes in the input.Only one of these also is a _defect_, though, and that is the preceding condition. Hence, while correlations can provide important hints, they do not necessarily locate defects. For those of us who may not have color HTML output ready, simply printing the debugger lists suspiciousness values as percentages.
###Code
print(debugger)
###Output
1 50% def remove_html_markup(s): # type: ignore
2 50% tag = False
3 50% quote = False
4 50% out = ""
5
6 50% for c in s:
7 50% if c == '<' and not quote:
8 0% tag = True
9 50% elif c == '>' and not quote:
10 0% tag = False
11 50% elif c == '"' or c == "'" and tag:
12 100% quote = not quote
13 50% elif not tag:
14 50% out = out + c
15
16 50% return out
###Markdown
Continuous SpectrumThe criterion that an event should _only_ occur in failing runs (and not in passing runs) can be too aggressive. In particular, if we have another run that executes the "culprit" lines, but does _not_ fail, our "only in fail" criterion will no longer be helpful. Here is an example. The input```htmltext```will trigger the "culprit" line```pythonquote = not quote```but actually produce an output where the tags are properly stripped:
###Code
remove_html_markup('<b color="blue">text</b>')
###Output
_____no_output_____
###Markdown
As a consequence, we no longer have lines that are being executed only in failing runs:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
with debugger.collect_pass():
remove_html_markup('<b link="blue"></b>')
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
In our spectrum output, the effect now is that the "culprit" line is as yellow as all others.
###Code
debugger
###Output
_____no_output_____
###Markdown
We therefore introduce a different method for highlighting lines, based on their _relative_ occurrence with respect to all runs: If a line has been _mostly_ executed in failing runs, its color should shift towards red; if a line has been _mostly_ executed in passing runs, its color should shift towards green. This _continuous spectrum_ has been introduced by the seminal _Tarantula_ tool \cite{Jones2002}. In Tarantula, the color _hue_ for each line is defined as follows: $$\textit{color hue}(\textit{line}) = \textit{low color(red)} + \frac{\%\textit{passed}(\textit{line})}{\%\textit{passed}(\textit{line}) + \%\textit{failed}(\textit{line})} \times \textit{color range}$$ Here, `%passed` and `%failed` denote the percentage at which a line has been executed in passing and failing runs, respectively. A hue of 0.0 stands for red, a hue of 1.0 stands for green, and a hue of 0.5 stands for equal fractions of red and green, yielding yellow. We can implement these measures right away as methods in a new `ContinuousSpectrumDebugger` class:
###Code
class ContinuousSpectrumDebugger(DiscreteSpectrumDebugger):
"""Visualize differences between executions using a color spectrum"""
def collectors_with_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that observed the given event.
"""
all_runs = self.collectors[category]
collectors_with_event = set(collector for collector in all_runs
if event in collector.events())
return collectors_with_event
def collectors_without_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that did not observe the given event.
"""
all_runs = self.collectors[category]
collectors_without_event = set(collector for collector in all_runs
if event not in collector.events())
return collectors_without_event
def event_fraction(self, event: Any, category: str) -> float:
if category not in self.collectors:
return 0.0
all_collectors = self.collectors[category]
collectors_with_event = self.collectors_with_event(event, category)
fraction = len(collectors_with_event) / len(all_collectors)
# print(f"%{category}({event}) = {fraction}")
return fraction
def passed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.PASS)
def failed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.FAIL)
def hue(self, event: Any) -> Optional[float]:
"""Return a color hue from 0.0 (red) to 1.0 (green)."""
passed = self.passed_fraction(event)
failed = self.failed_fraction(event)
if passed + failed > 0:
return passed / (passed + failed)
else:
return None
###Output
_____no_output_____
###Markdown
Having a continuous hue also implies a continuous suspiciousness and associated tooltips:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
hue = self.hue(event)
if hue is None:
return None
return 1 - hue
def tooltip(self, event: Any) -> str:
return self.percentage(event)
###Output
_____no_output_____
###Markdown
The hue for lines executed only in failing runs is (deep) red, as expected:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 12) 0.0
###Markdown
Likewise, the hue for lines executed in passing runs is (deep) green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 8) 1.0
('remove_html_markup', 10) 1.0
###Markdown
The Tarantula tool not only sets the hue for a line, but also uses _brightness_ as measure for support – that is, how often was the line executed at all. The brighter a line, the stronger the correlation with a passing or failing outcome. The brightness is defined as follows: $$\textit{brightness}(line) = \max(\%\textit{passed}(\textit{line}), \%\textit{failed}(\textit{line}))$$ and it is easily implemented, too:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def brightness(self, event: Any) -> float:
return max(self.passed_fraction(event), self.failed_fraction(event))
###Output
_____no_output_____
###Markdown
Our single "only in fail" line has a brightness of 1.0 (the maximum).
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.brightness(location))
###Output
('remove_html_markup', 12) 1.0
###Markdown
With this, we can now define a color for each line. To this end, we override the (previously discrete) `color()` method such that it returns a color specification giving hue and brightness. We use the HTML format `hsl(hue, saturation, lightness)` where the hue is given as a value between 0 and 360 (0 is red, 120 is green) and saturation and lightness are provided as percentages.
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def color(self, event: Any) -> Optional[str]:
hue = self.hue(event)
if hue is None:
return None
saturation = self.brightness(event)
# HSL color values are specified with:
# hsl(hue, saturation, lightness).
return f"hsl({hue * 120}, {saturation * 100}%, 80%)"
debugger = test_debugger_html(ContinuousSpectrumDebugger())
###Output
_____no_output_____
###Markdown
Lines executed only in failing runs are still shown in red:
###Code
for location in debugger.only_fail_events():
print(location, debugger.color(location))
###Output
('remove_html_markup', 12) hsl(0.0, 100.0%, 80%)
###Markdown
... whereas lines executed only in passing runs are still shown in green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.color(location))
debugger
###Output
_____no_output_____
###Markdown
What happens with our `quote = not quote` "culprit" line if it is executed in passing runs, too?
###Code
with debugger.collect_pass():
out = remove_html_markup('<b link="blue"></b>')
quiz('In which color will the `quote = not quote` "culprit" line '
'be shown after executing the above code?',
[
'<span style="background-color: hsl(120.0, 50.0%, 80%)">Green</span>',
'<span style="background-color: hsl(60.0, 100.0%, 80%)">Yellow</span>',
'<span style="background-color: hsl(30.0, 100.0%, 80%)">Orange</span>',
'<span style="background-color: hsl(0.0, 100.0%, 80%)">Red</span>'
], '999 // 333')
###Output
_____no_output_____
###Markdown
We see that it still is shown with an orange-red tint.
###Code
debugger
###Output
_____no_output_____
###Markdown
Here's another example, coming right from the Tarantula paper. The `middle()` function takes three numbers `x`, `y`, and `z`, and returns the one that is neither the minimum nor the maximum of the three:
###Code
def middle(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return y
else:
if x > y:
return y
elif x > z:
return x
return z
middle(1, 2, 3)
###Output
_____no_output_____
###Markdown
Unfortunately, `middle()` can fail:
###Code
middle(2, 1, 3)
###Output
_____no_output_____
###Markdown
Let is see whether we can find the bug with a few additional test cases:
###Code
# ignore
T3 = TypeVar('T3', bound='DifferenceDebugger')
def test_debugger_middle(debugger: T3) -> T3:
with debugger.collect_pass():
middle(3, 3, 5)
with debugger.collect_pass():
middle(1, 2, 3)
with debugger.collect_pass():
middle(3, 2, 1)
with debugger.collect_pass():
middle(5, 5, 5)
with debugger.collect_pass():
middle(5, 3, 4)
with debugger.collect_fail():
middle(2, 1, 3)
return debugger
###Output
_____no_output_____
###Markdown
Note that in order to collect data from multiple function invocations, you need to have a separate `with` clause for every invocation. The following will _not_ work correctly:```python with debugger.collect_pass(): middle(3, 3, 5) middle(1, 2, 3) ...```
###Code
debugger = test_debugger_middle(ContinuousSpectrumDebugger())
debugger.event_table(args=True)
###Output
_____no_output_____
###Markdown
Here comes the visualization. We see that the `return y` line is the culprit here – and actually also the one to be fixed.
###Code
debugger
quiz("Which of the above lines should be fixed?",
[
'<span style="background-color: hsl(45.0, 100%, 80%)">Line 3: `if x < y`</span>',
'<span style="background-color: hsl(34.28571428571429, 100.0%, 80%)">Line 5: `elif x < z`</span>',
'<span style="background-color: hsl(20.000000000000004, 100.0%, 80%)">Line 6: `return y`</span>',
'<span style="background-color: hsl(120.0, 20.0%, 80%)">Line 9: `return y`</span>',
], r'len(" middle ".strip()[:3])')
###Output
_____no_output_____
###Markdown
Indeed, in the `middle()` example, the "reddest" line is also the one to be fixed. Here is the fixed version:
###Code
def middle_fixed(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return x
else:
if x > y:
return y
elif x > z:
return x
return z
middle_fixed(2, 1, 3)
###Output
_____no_output_____
###Markdown
Ranking Lines by SuspiciousnessIn a large program, there can be several locations (and events) that could be flagged as suspicious. It suffices that some large code block of say, 1,000 lines, is mostly executed in failing runs, and then all of this code block will be visualized in some shade of red. To further highlight the "most suspicious" events, one idea is to use a _ranking_ – that is, coming up with a list of events where those events most correlated with failures would be shown at the top. The programmer would then examine these events one by one and proceed down the list. We will show how this works for two "correlation" metrics – first the _Tarantula_ metric, as introduced above, and then the _Ochiai_ metric, which has shown to be one of the best "ranking" metrics. We introduce a base class `RankingDebugger` with an abstract method `suspiciousness()` to be overloaded in subclasses. The method `rank()` returns a list of all events observed, sorted by suspiciousness, highest first.
###Code
class RankingDebugger(DiscreteSpectrumDebugger):
"""Rank events by their suspiciousness"""
def rank(self) -> List[Any]:
"""Return a list of events, sorted by suspiciousness, highest first."""
def susp(event: Any) -> float:
suspiciousness = self.suspiciousness(event)
assert suspiciousness is not None
return suspiciousness
events = list(self.all_events())
events.sort(key=susp, reverse=True)
return events
def __repr__(self) -> str:
return repr(self.rank())
###Output
_____no_output_____
###Markdown
The Tarantula MetricWe can use the Tarantula metric to sort lines according to their suspiciousness. The "redder" a line (a hue of 0.0), the more suspicious it is. We can simply define $$\textit{suspiciousness}_\textit{tarantula}(\textit{event}) = 1 - \textit{color hue}(\textit{event})$$ where $\textit{color hue}$ is as defined above. This is exactly the `suspiciousness()` function as already implemented in our `ContinuousSpectrumDebugger`. We introduce the `TarantulaDebugger` class, inheriting visualization capabilities from the `ContinuousSpectrumDebugger` class as well as the suspiciousness features from the `RankingDebugger` class.
###Code
class TarantulaDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Tarantula metric for suspiciousness"""
pass
###Output
_____no_output_____
###Markdown
Let us list `remove_html_markup()` with highlighted lines again:
###Code
tarantula_html = test_debugger_html(TarantulaDebugger())
tarantula_html
###Output
_____no_output_____
###Markdown
Here's our ranking of lines, from most suspicious to least suspicious:
###Code
tarantula_html.rank()
tarantula_html.suspiciousness(tarantula_html.rank()[0])
###Output
_____no_output_____
###Markdown
We see that the first line in the list is indeed the most suspicious; the two "green" lines come at the very end. For the `middle()` function, we also obtain a ranking from "reddest" to "greenest".
###Code
tarantula_middle = test_debugger_middle(TarantulaDebugger())
tarantula_middle
tarantula_middle.rank()
tarantula_middle.suspiciousness(tarantula_middle.rank()[0])
###Output
_____no_output_____
###Markdown
The Ochiai MetricThe _Ochiai_ Metric \cite{Ochiai1957} first introduced in the biology domain \cite{daSilvaMeyer2004} and later applied for fault localization by Abreu et al. \cite{Abreu2009}, is defined as follows: $$\textit{suspiciousness}_\textit{ochiai} = \frac{\textit{failed}(\textit{event})}{\sqrt{\bigl(\textit{failed}(\textit{event}) + \textit{not-in-failed}(\textit{event})\bigr)\times\bigl(\textit{failed}(\textit{event}) + \textit{passed}(\textit{event})\bigr)}}$$ where* $\textit{failed}(\textit{event})$ is the number of times the event occurred in _failing_ runs* $\textit{not-in-failed}(\textit{event})$ is the number of times the event did _not_ occur in failing runs* $\textit{passed}(\textit{event})$ is the number of times the event occurred in _passing_ runs.We can easily implement this formula:
###Code
import math
class OchiaiDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Ochiai metric for suspiciousness"""
def suspiciousness(self, event: Any) -> Optional[float]:
failed = len(self.collectors_with_event(event, self.FAIL))
not_in_failed = len(self.collectors_without_event(event, self.FAIL))
passed = len(self.collectors_with_event(event, self.PASS))
try:
return failed / math.sqrt((failed + not_in_failed) * (failed + passed))
except ZeroDivisionError:
return None
def hue(self, event: Any) -> Optional[float]:
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
return 1 - suspiciousness
###Output
_____no_output_____
###Markdown
Applied on the `remove_html_markup()` function, the individual suspiciousness scores differ from Tarantula. However, we obtain a very similar visualization, and the same ranking.
###Code
ochiai_html = test_debugger_html(OchiaiDebugger())
ochiai_html
ochiai_html.rank()
ochiai_html.suspiciousness(ochiai_html.rank()[0])
###Output
_____no_output_____
###Markdown
The same observations also apply for the `middle()` function.
###Code
ochiai_middle = test_debugger_middle(OchiaiDebugger())
ochiai_middle
ochiai_middle.rank()
ochiai_middle.suspiciousness(ochiai_middle.rank()[0])
###Output
_____no_output_____
###Markdown
How Useful is Ranking?So, which metric is better? The standard method to evaluate such rankings is to determine a _ground truth_ – that is, the set of locations that eventually are fixed – and to check at which point in the ranking any such location occurs – the earlier, the better. In our `remove_html_markup()` and `middle()` examples, both the Tarantula and the Ochiai metric perform flawlessly, as the "culprit" line is always ranked at the top. However, this need not always be the case; the exact performance depends on the nature of the code and the observed runs. (Also, the question of whether there always is exactly one possible location where the program can be fixed is open for discussion.) You will be surprised that over time, _several dozen_ metrics have been proposed \cite{Wong2016}, each performing somewhat better or somewhat worse depending on which benchmark they were applied on. The two metrics discussed above each have their merits – the Tarantula metric was among the first such metrics, and the Ochiai metric is generally shown to be among the most effective ones \cite{Abreu2009}. While rankings can be easily _evaluated_, it is not necessarily clear whether and how much they serve programmers. As stated above, the assumption of rankings is that developers examine one potentially defective statement after another until they find the actually defective one. However, in a series of human studies with developers, Parnin and Orso \cite{Parnin2011} found that this assumption may not hold:> It is unclear whether developers can actually determine the faulty nature of a statement by simply looking at it, without any additional information (e.g., the state of the program when the statement was executed or the statements that were executed before or after that one).In their study, they found that rankings could help completing a task faster, but this effect was limited to experienced developers and simpler code. Artificially changing the rank of faulty statements had little to no effect, implying that developers would not strictly follow the ranked list of statements, but rather search through the code to understand it. At this point, a _visualization_ as in the Tarantula tool can be helpful to programmers as it _guides_ the search, but a _ranking_ that _defines_ where to search may be less useful. Having said that, ranking has its merits – notably as it comes to informing _automated_ debugging techniques. In the [chapter on program repair](Repairer.ipynb), we will see how ranked lists of potentially faulty statements tell automated repair techniques where to try to repair the program first. And once such a repair is successful, we have a very strong indication on where and how the program could be fixed! Using Large Test Suites In fault localization, the larger and the more thorough the test suite, the higher the precision. Let us try out what happens if we extend the `middle()` test suite with additional test cases. The function `middle_testcase()` returns a random input for `middle()`:
###Code
import random
def middle_testcase() -> Tuple[int, int, int]:
x = random.randrange(10)
y = random.randrange(10)
z = random.randrange(10)
return x, y, z
[middle_testcase() for i in range(5)]
###Output
_____no_output_____
###Markdown
The function `middle_test()` simply checks if `middle()` operates correctly – by placing `x`, `y`, and `z` in a list, sorting it, and checking the middle argument. If `middle()` fails, `middle_test()` raises an exception.
###Code
def middle_test(x: int, y: int, z: int) -> None:
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
middle_test(4, 5, 6)
from ExpectError import ExpectError
with ExpectError():
middle_test(2, 1, 3)
###Output
Traceback (most recent call last):
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_61910/3661663124.py", line 2, in <module>
middle_test(2, 1, 3)
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_61910/40742806.py", line 3, in middle_test
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
The function `middle_passing_testcase()` searches and returns a triple `x`, `y`, `z` that causes `middle_test()` to pass.
###Code
def middle_passing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
return x, y, z
except AssertionError:
pass
(x, y, z) = middle_passing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(1, 6, 1) = 1
###Markdown
The function `middle_failing_testcase()` does the same; but its triple `x`, `y`, `z` causes `middle_test()` to fail.
###Code
def middle_failing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
except AssertionError:
return x, y, z
(x, y, z) = middle_failing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(5, 2, 6) = 2
###Markdown
With these, we can define two sets of test cases, each with 100 inputs.
###Code
MIDDLE_TESTS = 100
MIDDLE_PASSING_TESTCASES = [middle_passing_testcase()
for i in range(MIDDLE_TESTS)]
MIDDLE_FAILING_TESTCASES = [middle_failing_testcase()
for i in range(MIDDLE_TESTS)]
###Output
_____no_output_____
###Markdown
Let us run the `OchiaiDebugger` with these two test sets.
###Code
ochiai_middle = OchiaiDebugger()
for x, y, z in MIDDLE_PASSING_TESTCASES:
with ochiai_middle.collect_pass():
middle(x, y, z)
for x, y, z in MIDDLE_FAILING_TESTCASES:
with ochiai_middle.collect_fail():
middle(x, y, z)
ochiai_middle
###Output
_____no_output_____
###Markdown
We see that the "culprit" line is still the most likely to be fixed, but the two conditions leading to the error (`x < y` and `x < z`) are also listed as potentially faulty. That is because the error might also be fixed be changing these conditions – although this would result in a more complex fix. Other Events besides CoverageWe close this chapter with two directions for further thought. If you wondered why in the above code, we were mostly talking about `events` rather than lines covered, that is because our framework allows for tracking arbitrary events, not just coverage. In fact, any data item a collector can extract from the execution can be used for correlation analysis. (It may not be so easily visualized, though.) Here's an example. We define a `ValueCollector` class that collects pairs of (local) variables and their values during execution. Its `events()` method then returns the set of all these pairs.
###Code
class ValueCollector(Collector):
""""A class to collect local variables and their values."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.vars: Set[str] = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
local_vars = frame.f_locals
for var in local_vars:
value = local_vars[var]
self.vars.add(f"{var} = {repr(value)}")
def events(self) -> Set[str]:
"""A set of (variable, value) pairs observed"""
return self.vars
###Output
_____no_output_____
###Markdown
If we apply this collector on our set of HTML test cases, these are all the events that we obtain – essentially all variables and all values ever seen:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger(ValueCollector))
for event in debugger.all_events():
print(event)
###Output
s = '<b>abc</b>'
s = 'abc'
c = '<'
quote = False
c = '>'
out = 'a'
out = ''
out = 'abc'
c = 'b'
c = 'c'
tag = False
quote = True
c = '"'
tag = True
out = 'ab'
c = 'a'
s = '"abc"'
c = '/'
###Markdown
However, some of these events only occur in the failing run:
###Code
for event in debugger.only_fail_events():
print(event)
###Output
s = '"abc"'
quote = True
c = '"'
###Markdown
Some of these differences are spurious – the string `"abc"` (with quotes) only occurs in the failing run – but others, such as `quote` being True and `c` containing a single quote are actually relevant for explaining when the failure comes to be. We can even visualize the suspiciousness of the individual events, setting the (so far undiscussed) `color` flag for producing an event table:
###Code
debugger.event_table(color=True, args=True)
###Output
_____no_output_____
###Markdown
There are many ways one can continue from here.* Rather than checking for concrete values, one could check for more _abstract properties_, for instance – what is the sign of the value? What is the length of the string? * One could check for specifics of the _control flow_ – is the loop taken? How many times?* One could check for specifics of the _information flow_ – which values flow from one variable to another?There are lots of properties that all could be related to failures – and if we happen to check for the right one, we may obtain a much crisper definition of what causes the failure. We will come up with more ideas on properties to check as it comes to [mining specifications](SpecificationMining,ipynb). Training ClassifiersThe metrics we have discussed so far are pretty _generic_ – that is, they are fixed no matter how the actual event space is structured. The field of _machine learning_ has come up with techniques that learn _classifiers_ from a given set of data – classifiers that are trained from labeled data and then can predict labels for new data sets. In our case, the labels are test outcomes (PASS and FAIL), whereas the data would be features of the events observed. A classifier by itself is not immediately useful for debugging (although it could predict whether future inputs will fail or not). Some classifiers, however, have great _diagnostic_ quality; that is, they can _explain_ how their classification comes to be. [Decision trees](https://scikit-learn.org/stable/modules/tree.html) fall into this very category. A decision tree contains a number of _nodes_, each one associated with a predicate. Depending on whether the predicate is true or false, we follow the given "true" or "false" branch to end up in the next node, which again contains a predicate. Eventually, we end up in the outcome predicted by the tree. The neat thing is that the node predicates actually give important hints on the circumstances that are _most relevant_ for deciding the outcome. Let us illustrate this with an example. We build a class `ClassifyingDebugger` that trains a decision tree from the events collected. To this end, we need to set up our input data such that it can be fed into a classifier. We start with identifying our _samples_ (runs) and the respective _labels_ (outcomes). All values have to be encoded into numerical values.
###Code
class ClassifyingDebugger(DifferenceDebugger):
"""A debugger implementing a decision tree for events"""
PASS_VALUE = +1.0
FAIL_VALUE = -1.0
def samples(self) -> Dict[str, float]:
samples = {}
for collector in self.pass_collectors():
samples[collector.id()] = self.PASS_VALUE
for collector in debugger.fail_collectors():
samples[collector.id()] = self.FAIL_VALUE
return samples
debugger = test_debugger_html(ClassifyingDebugger())
debugger.samples()
###Output
_____no_output_____
###Markdown
Next, we identify the _features_, which in our case is the set of lines executed in each sample:
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def features(self) -> Dict[str, Any]:
features = {}
for collector in debugger.pass_collectors():
features[collector.id()] = collector.events()
for collector in debugger.fail_collectors():
features[collector.id()] = collector.events()
return features
debugger = test_debugger_html(ClassifyingDebugger())
debugger.features()
###Output
_____no_output_____
###Markdown
All our features have names, which must be strings.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def feature_names(self) -> List[str]:
return [repr(feature) for feature in self.all_events()]
debugger = test_debugger_html(ClassifyingDebugger())
debugger.feature_names()
###Output
_____no_output_____
###Markdown
Next, we define the _shape_ for an individual sample, which is a value of +1 or -1 for each feature seen (i.e., +1 if the line was covered, -1 if not).
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def shape(self, sample: str) -> List[float]:
x = []
features = self.features()
for f in self.all_events():
if f in features[sample]:
x += [+1.0]
else:
x += [-1.0]
return x
debugger = test_debugger_html(ClassifyingDebugger())
debugger.shape("remove_html_markup(s='abc')")
###Output
_____no_output_____
###Markdown
Our input X for the classifier now is a list of such shapes, one for each sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def X(self) -> List[List[float]]:
X = []
samples = self.samples()
for key in samples:
X += [self.shape(key)]
return X
debugger = test_debugger_html(ClassifyingDebugger())
debugger.X()
###Output
_____no_output_____
###Markdown
Our input Y for the classifier, in contrast, is the list of labels, again indexed by sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def Y(self) -> List[float]:
Y = []
samples = self.samples()
for key in samples:
Y += [samples[key]]
return Y
debugger = test_debugger_html(ClassifyingDebugger())
debugger.Y()
###Output
_____no_output_____
###Markdown
We now have all our data ready to be fit into a tree classifier. The method `classifier()` creates and returns the (tree) classifier for the observed runs.
###Code
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def classifier(self) -> DecisionTreeClassifier:
classifier = DecisionTreeClassifier()
classifier = classifier.fit(self.X(), self.Y())
return classifier
###Output
_____no_output_____
###Markdown
We define a special method to show classifiers:
###Code
import graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def show_classifier(self, classifier: DecisionTreeClassifier) -> Any:
dot_data = export_graphviz(classifier, out_file=None,
filled=False, rounded=True,
feature_names=self.feature_names(),
class_names=["FAIL", "PASS"],
label='none',
node_ids=False,
impurity=False,
proportion=True,
special_characters=True)
return graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
This is the tree we get for our `remove_html_markup()` tests. The top predicate is whether the "culprit" line was executed (-1 means no, +1 means yes). If not (-1), the outcome is PASS. Otherwise, the outcome is TRUE.
###Code
debugger = test_debugger_html(ClassifyingDebugger())
classifier = debugger.classifier()
debugger.show_classifier(classifier)
###Output
_____no_output_____
###Markdown
We can even use our classifier to predict the outcome of additional runs. If, for instance, we execute all lines except for, say, Line 7, 9, and 11, our tree classifier would predict failure – because the "culprit" line 12 is executed.
###Code
classifier.predict([[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1]])
###Output
_____no_output_____
###Markdown
Again, there are many ways to continue from here. Which events should we train the classifier from? How do classifiers compare in their performance and diagnostic quality? There are lots of possibilities left to explore, and we only begin to realize the potential for automated debugging. SynopsisThis chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use
###Code
debugger = TarantulaDebugger()
with debugger.collect_pass():
remove_html_markup("abc")
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:
###Code
debugger = TarantulaDebugger()
with debugger:
remove_html_markup("abc")
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # raise an exception
###Output
_____no_output_____
###Markdown
`with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.
###Code
debugger.event_table(args=True, color=True)
###Output
_____no_output_____
###Markdown
Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:
###Code
debugger
###Output
_____no_output_____
###Markdown
Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.
###Code
debugger.rank()
###Output
_____no_output_____
###Markdown
Classes and MethodsHere are all classes defined in this chapter:
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([TarantulaDebugger, OchiaiDebugger],
abstract_classes=[
StatisticalDebugger,
DifferenceDebugger,
RankingDebugger
],
public_methods=[
StatisticalDebugger.__init__,
StatisticalDebugger.all_events,
StatisticalDebugger.event_table,
StatisticalDebugger.function,
StatisticalDebugger.coverage,
StatisticalDebugger.covered_functions,
DifferenceDebugger.__enter__,
DifferenceDebugger.__exit__,
DifferenceDebugger.all_pass_events,
DifferenceDebugger.all_fail_events,
DifferenceDebugger.collect_pass,
DifferenceDebugger.collect_fail,
DifferenceDebugger.only_pass_events,
DifferenceDebugger.only_fail_events,
SpectrumDebugger.code,
SpectrumDebugger.__repr__,
SpectrumDebugger.__str__,
SpectrumDebugger._repr_html_,
ContinuousSpectrumDebugger.code,
ContinuousSpectrumDebugger.__repr__,
RankingDebugger.rank
],
project='debuggingbook')
# ignore
display_class_hierarchy([CoverageCollector, ValueCollector],
public_methods=[
Tracer.__init__,
Tracer.__enter__,
Tracer.__exit__,
Tracer.changed_vars, # type: ignore
Collector.__init__,
Collector.__repr__,
Collector.function,
Collector.args,
Collector.argstring,
Collector.exception,
Collector.id,
Collector.collect,
CoverageCollector.coverage,
CoverageCollector.covered_functions,
CoverageCollector.events,
ValueCollector.__init__,
ValueCollector.events
],
project='debuggingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* _Correlations_ between execution events and outcomes (pass/fail) can make important hints for debugging* Events occurring only (or mostly) during failing runs can be _highlighted_ and _ranked_ to guide the search* Important hints include whether the _execution of specific code locations_ correlates with failure Next StepsChapters that build on this one include* [how to determine invariants that correlate with failures](DynamicInvariants.ipynb)* [how to automatically repair programs](Repairer.ipynb) BackgroundThe seminal works on statistical debugging are two papers:* "Visualization of Test Information to Assist Fault Localization" \cite{Jones2002} by James Jones, Mary Jean Harrold, and John Stasko introducing Tarantula and its visualization. The paper won an ACM SIGSOFT 10-year impact award.* "Bug Isolation via Remote Program Sampling" \cite{Liblit2003} by Ben Liblit, Alex Aiken, Alice X. Zheng, and Michael I. Jordan, introducing the term "Statistical debugging". Liblit won the ACM Doctoral Dissertation Award for this work.The Ochiai metric for fault localization was introduced by \cite{Abreu2009}. The overview by Wong et al. \cite{Wong2016} gives a comprehensive overview on the field of statistical fault localization.The study by Parnin and Orso \cite{Parnin2011} is a must to understand the limitations of the technique. Exercises Exercise 1: A Postcondition for MiddleWhat would be a postcondition for `middle()`? How can you check it? **Solution.** A simple postcondition for `middle()` would be```pythonassert m == sorted([x, y, z])[1]```where `m` is the value returned by `middle()`. `sorted()` sorts the given list, and the index `[1]` returns, well, the middle element. (This might also be a much shorter, but possibly slightly more expensive implementation for `middle()`) Since `middle()` has several `return` statements, the easiest way to check the result is to create a wrapper around `middle()`:
###Code
def middle_checked(x, y, z): # type: ignore
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
return m
###Output
_____no_output_____
###Markdown
`middle_checked()` catches the error:
###Code
from ExpectError import ExpectError
with ExpectError():
m = middle_checked(2, 1, 3)
###Output
Traceback (most recent call last):
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_61910/3016629944.py", line 2, in <module>
m = middle_checked(2, 1, 3)
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_61910/1374660292.py", line 3, in middle_checked
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
Statistical DebuggingIn this chapter, we introduce _statistical debugging_ – the idea that specific events during execution could be _statistically correlated_ with failures. We start with coverage of individual lines and then proceed towards further execution features.
###Code
from bookutils import YouTubeVideo
YouTubeVideo("UNuso00zYiI")
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on tracing executions](Tracer.ipynb).
###Code
import bookutils
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from debuggingbook.StatisticalDebugger import ```and then make use of the following features.This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use```python>>> debugger = TarantulaDebugger()>>> with debugger.collect_pass():>>> remove_html_markup("abc")>>> with debugger.collect_pass():>>> remove_html_markup('abc')>>> with debugger.collect_fail():>>> remove_html_markup('"abc"')```Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:```python>>> debugger = TarantulaDebugger()>>> with debugger:>>> remove_html_markup("abc")>>> with debugger:>>> remove_html_markup('abc')>>> with debugger:>>> remove_html_markup('"abc"')>>> assert False raise an exception````with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.```python>>> debugger.event_table(args=True, color=True)```| `remove_html_markup` | `s='abc'` | `s='abc'` | `s='"abc"'` | | --------------------- | ---- | ---- | ---- | | remove_html_markup:1 | X | X | X | | remove_html_markup:2 | X | X | X | | remove_html_markup:3 | X | X | X | | remove_html_markup:4 | X | X | X | | remove_html_markup:6 | X | X | X | | remove_html_markup:7 | X | X | X | | remove_html_markup:8 | - | X | - | | remove_html_markup:9 | X | X | X | | remove_html_markup:10 | - | X | - | | remove_html_markup:11 | X | X | X | | remove_html_markup:12 | - | - | X | | remove_html_markup:13 | X | X | X | | remove_html_markup:14 | X | X | X | | remove_html_markup:16 | X | X | X | Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:```python>>> debugger```<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 1: 50%"> 1 def remove_html_markup(s): type: ignore<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 2: 50%"> 2 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 3: 50%"> 3 quote = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 4: 50%"> 4 out = "" 5 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 6: 50%"> 6 for c in s:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 7: 50%"> 7 if c == &x27;<&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 8: 0%"> 8 tag = True<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 9: 50%"> 9 elif c == &x27;>&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 10: 0%"> 10 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 11: 50%"> 11 elif c == &x27;"&x27; or c == "&x27;" and tag:<pre style="background-color:hsl(0.0, 100.0%, 80%)" title="Line 12: 100%"> 12 quote = not quote<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 13: 50%"> 13 elif not tag:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 14: 50%"> 14 out = out + c 15 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 16: 50%"> 16 return out Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.```python>>> debugger.rank()[('remove_html_markup', 12), ('remove_html_markup', 3), ('remove_html_markup', 1), ('remove_html_markup', 13), ('remove_html_markup', 6), ('remove_html_markup', 11), ('remove_html_markup', 16), ('remove_html_markup', 4), ('remove_html_markup', 9), ('remove_html_markup', 2), ('remove_html_markup', 14), ('remove_html_markup', 7), ('remove_html_markup', 10), ('remove_html_markup', 8)]``` Classes and MethodsHere are all classes defined in this chapter: IntroductionThe idea behind _statistical debugging_ is fairly simple. We have a program that sometimes passes and sometimes fails. This outcome can be _correlated_ with events that precede it – properties of the input, properties of the execution, properties of the program state. If we, for instance, can find that "the program always fails when Line 123 is executed, and it always passes when Line 123 is _not_ executed", then we have a strong correlation between Line 123 being executed and failure.Such _correlation_ does not necessarily mean _causation_. For this, we would have to prove that executing Line 123 _always_ leads to failure, and that _not_ executing it does not lead to (this) failure. Also, a correlation (or even a causation) does not mean that Line 123 contains the defect – for this, we would have to show that it actually is an error. Still, correlations make excellent hints as it comes to search for failure causes – in all generality, if you let your search be guided by _events that correlate with failures_, you are more likely to find _important hints on how the failure comes to be_. Collecting EventsHow can we determine events that correlate with failure? We start with a general mechanism to actually _collect_ events during execution. The abstract `Collector` class provides* a `collect()` method made for collecting events, called from the `traceit()` tracer; and* an `events()` method made for retrieving these events.Both of these are _abstract_ and will be defined further in subclasses.
###Code
from Tracer import Tracer
# ignore
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Set, List, TypeVar, Union
from types import FrameType, TracebackType
class Collector(Tracer):
"""A class to record events during execution."""
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collecting function. To be overridden in subclasses."""
pass
def events(self) -> Set:
"""Return a collection of events. To be overridden in subclasses."""
return set()
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
self.collect(frame, event, arg)
###Output
_____no_output_____
###Markdown
A `Collector` class is used like `Tracer`, using a `with` statement. Let us apply it on the buggy variant of `remove_html_markup()` from the [Introduction to Debugging](Intro_Debugging.ipynb):
###Code
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
with Collector() as c:
out = remove_html_markup('"abc"')
out
###Output
_____no_output_____
###Markdown
There's not much we can do with our collector, as the `collect()` and `events()` methods are yet empty. However, we can introduce an `id()` method which returns a string identifying the collector. This string is defined from the _first function call_ encountered.
###Code
from types import FunctionType
Coverage = Set[Tuple[Callable, int]]
class Collector(Collector):
def __init__(self) -> None:
"""Constructor."""
self._function: Optional[Callable] = None
self._args: Optional[Dict[str, Any]] = None
self._argstring: Optional[str] = None
self._exception: Optional[Type] = None
self.items_to_ignore: List[Union[Type, Callable]] = [self.__class__]
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Tracing function.
Saves the first function and calls collect().
"""
for item in self.items_to_ignore:
if (isinstance(item, type) and 'self' in frame.f_locals and
isinstance(frame.f_locals['self'], item)):
# Ignore this class
return
if item.__name__ == frame.f_code.co_name:
# Ignore this function
return
if self._function is None and event == 'call':
# Save function
self._function = self.create_function(frame)
self._args = frame.f_locals.copy()
self._argstring = ", ".join([f"{var}={repr(self._args[var])}"
for var in self._args])
self.collect(frame, event, arg)
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collector function. To be overloaded in subclasses."""
pass
def id(self) -> str:
"""Return an identifier for the collector,
created from the first call"""
return f"{self.function().__name__}({self.argstring()})"
def function(self) -> Callable:
"""Return the function from the first call, as a function object"""
if not self._function:
raise ValueError("No call collected")
return self._function
def argstring(self) -> str:
"""
Return the list of arguments from the first call,
as a printable string
"""
if not self._argstring:
raise ValueError("No call collected")
return self._argstring
def args(self) -> Dict[str, Any]:
"""Return a dict of argument names and values from the first call"""
if not self._args:
raise ValueError("No call collected")
return self._args
def exception(self) -> Optional[Type]:
"""Return the exception class from the first call,
or None if no exception was raised."""
return self._exception
def __repr__(self) -> str:
"""Return a string representation of the collector"""
# We use the ID as default representation when printed
return self.id()
def covered_functions(self) -> Set[Callable]:
"""Set of covered functions. To be overloaded in subclasses."""
return set()
def coverage(self) -> Coverage:
"""
Return a set (function, lineno) with locations covered.
To be overloaded in subclasses.
"""
return set()
###Output
_____no_output_____
###Markdown
Here's how the collector works. We use a `with` clause to collect details on a function call:
###Code
with Collector() as c:
remove_html_markup('abc')
###Output
_____no_output_____
###Markdown
We can now retrieve details such as the function called...
###Code
c.function()
###Output
_____no_output_____
###Markdown
... or its arguments, as a name/value dictionary.
###Code
c.args()
###Output
_____no_output_____
###Markdown
The `id()` method returns a printable representation of the call:
###Code
c.id()
###Output
_____no_output_____
###Markdown
The `argstring()` method does the same for the argument string only.
###Code
c.argstring()
###Output
_____no_output_____
###Markdown
With this, we can collect the basic information to identify calls – such that we can later correlate their events with success or failure. Error Prevention While collecting, we'd like to avoid collecting events in the collection infrastructure. The `items_to_ignore` attribute takes care of this.
###Code
class Collector(Collector):
def add_items_to_ignore(self,
items_to_ignore: List[Union[Type, Callable]]) \
-> None:
"""
Define additional classes and functions to ignore during collection
(typically `Debugger` classes using these collectors).
"""
self.items_to_ignore += items_to_ignore
###Output
_____no_output_____
###Markdown
If we exit a block without having collected anything, that's likely an error.
###Code
class Collector(Collector):
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
ret = super().__exit__(exc_tp, exc_value, exc_traceback)
if not self._function:
if exc_tp:
return False # re-raise exception
else:
raise ValueError("No call collected")
return ret
###Output
_____no_output_____
###Markdown
Collecting CoverageSo far, our `Collector` class does not collect any events. Let us extend it such that it collects _coverage_ information – that is, the set of locations executed. To this end, we introduce a `CoverageCollector` subclass which saves the coverage in a set containing functions and line numbers.
###Code
from types import FrameType
from StackInspector import StackInspector
class CoverageCollector(Collector, StackInspector):
"""A class to record covered locations during execution."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self._coverage: Coverage = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Save coverage for an observed event.
"""
name = frame.f_code.co_name
function = self.search_func(name, frame)
if function is None:
function = self.create_function(frame)
location = (function, frame.f_lineno)
self._coverage.add(location)
###Output
_____no_output_____
###Markdown
We also override `events()` such that it returns the set of covered locations.
###Code
class CoverageCollector(CoverageCollector):
def events(self) -> Set[Tuple[str, int]]:
"""
Return the set of locations covered.
Each location comes as a pair (`function_name`, `lineno`).
"""
return {(func.__name__, lineno) for func, lineno in self._coverage}
###Output
_____no_output_____
###Markdown
The methods `coverage()` and `covered_functions()` allow precise access to the coverage obtained.
###Code
class CoverageCollector(CoverageCollector):
def covered_functions(self) -> Set[Callable]:
"""Return a set with all functions covered."""
return {func for func, lineno in self._coverage}
def coverage(self) -> Coverage:
"""Return a set (function, lineno) with all locations covered."""
return self._coverage
###Output
_____no_output_____
###Markdown
Here is how we can use `CoverageCollector` to determine the lines executed during a run of `remove_html_markup()`:
###Code
with CoverageCollector() as c:
remove_html_markup('abc')
c.events()
###Output
_____no_output_____
###Markdown
Sets of line numbers alone are not too revealing. They provide more insights if we actually list the code, highlighting these numbers:
###Code
import inspect
from bookutils import getsourcelines # like inspect.getsourcelines(), but in color
def code_with_coverage(function: Callable, coverage: Coverage) -> None:
source_lines, starting_line_number = \
getsourcelines(function)
line_number = starting_line_number
for line in source_lines:
marker = '*' if (function, line_number) in coverage else ' '
print(f"{line_number:4} {marker} {line}", end='')
line_number += 1
code_with_coverage(remove_html_markup, c.coverage())
###Output
1 * [34mdef[39;49;00m [32mremove_html_markup[39;49;00m(s): [37m# type: ignore[39;49;00m
2 * tag = [34mFalse[39;49;00m
3 * quote = [34mFalse[39;49;00m
4 * out = [33m"[39;49;00m[33m"[39;49;00m
5
6 * [34mfor[39;49;00m c [35min[39;49;00m s:
7 * [34mif[39;49;00m c == [33m'[39;49;00m[33m<[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
8 tag = [34mTrue[39;49;00m
9 * [34melif[39;49;00m c == [33m'[39;49;00m[33m>[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
10 tag = [34mFalse[39;49;00m
11 * [34melif[39;49;00m c == [33m'[39;49;00m[33m"[39;49;00m[33m'[39;49;00m [35mor[39;49;00m c == [33m"[39;49;00m[33m'[39;49;00m[33m"[39;49;00m [35mand[39;49;00m tag:
12 quote = [35mnot[39;49;00m quote
13 * [34melif[39;49;00m [35mnot[39;49;00m tag:
14 * out = out + c
15
16 * [34mreturn[39;49;00m out
###Markdown
Remember that the input `s` was `"abc"`? In this listing, we can see which lines were covered and which lines were not. From the listing already, we can see that `s` has neither tags nor quotes. Such coverage computation plays a big role in _testing_, as one wants tests to cover as many different aspects of program execution (and notably code) as possible. But also during debugging, code coverage is essential: If some code was not even executed in the failing run, then any change to it will have no effect.
###Code
from bookutils import quiz
quiz('Let the input be `"<b>Don\'t do this!</b>"`. '
"Which of these lines are executed? Use the code to find out!",
[
"`tag = True`",
"`tag = False`",
"`quote = not quote`",
"`out = out + c`"
], "[ord(c) - ord('a') - 1 for c in 'cdf']")
###Output
_____no_output_____
###Markdown
To find the solution, try this out yourself:
###Code
with CoverageCollector() as c:
remove_html_markup("<b>Don't do this!</b>")
# code_with_coverage(remove_html_markup, c.coverage)
###Output
_____no_output_____
###Markdown
Computing DifferencesLet us get back to the idea that we want to _correlate_ events with passing and failing outcomes. For this, we need to examine events in both _passing_ and _failing_ runs, and determine their _differences_ – since it is these differences we want to associate with their respective outcome. A Base Class for Statistical DebuggingThe `StatisticalDebugger` base class takes a collector class (such as `CoverageCollector`). Its `collect()` method creates a new collector of that very class, which will be maintained by the debugger. As argument, `collect()` takes a string characterizing the outcome (such as `'PASS'` or `'FAIL'`). This is how one would use it:```pythondebugger = StatisticalDebugger()with debugger.collect('PASS'): some_passing_run()with debugger.collect('PASS'): another_passing_run()with debugger.collect('FAIL'): some_failing_run()``` Let us implement `StatisticalDebugger`. The base class gets a collector class as argument:
###Code
class StatisticalDebugger:
"""A class to collect events for multiple outcomes."""
def __init__(self, collector_class: Type = CoverageCollector, log: bool = False):
"""Constructor. Use instances of `collector_class` to collect events."""
self.collector_class = collector_class
self.collectors: Dict[str, List[Collector]] = {}
self.log = log
###Output
_____no_output_____
###Markdown
The `collect()` method creates (and stores) a collector for the given outcome, using the given outcome to characterize the run. Any additional arguments are passed to the collector.
###Code
class StatisticalDebugger(StatisticalDebugger):
def collect(self, outcome: str, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for the given outcome.
Additional args are passed to the collector."""
collector = self.collector_class(*args, **kwargs)
collector.add_items_to_ignore([self.__class__])
return self.add_collector(outcome, collector)
def add_collector(self, outcome: str, collector: Collector) -> Collector:
if outcome not in self.collectors:
self.collectors[outcome] = []
self.collectors[outcome].append(collector)
return collector
###Output
_____no_output_____
###Markdown
The `all_events()` method produces a union of all events observed. If an outcome is given, it produces a union of all events with that outcome:
###Code
class StatisticalDebugger(StatisticalDebugger):
def all_events(self, outcome: Optional[str] = None) -> Set[Any]:
"""Return a set of all events observed."""
all_events = set()
if outcome:
if outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
else:
for outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
return all_events
###Output
_____no_output_____
###Markdown
Here's a simple example of `StatisticalDebugger` in action:
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
The method `all_events()` returns all events collected:
###Code
s.all_events()
###Output
_____no_output_____
###Markdown
If given an outcome as argument, we obtain all events with the given outcome.
###Code
s.all_events('FAIL')
###Output
_____no_output_____
###Markdown
The attribute `collectors` maps outcomes to lists of collectors:
###Code
s.collectors
###Output
_____no_output_____
###Markdown
Here's the collector of the one (and first) passing run:
###Code
s.collectors['PASS'][0].id()
s.collectors['PASS'][0].events()
###Output
_____no_output_____
###Markdown
To better highlight the differences between the collected events, we introduce a method `event_table()` that prints out whether an event took place in a run. Excursion: Printing an Event Table
###Code
from IPython.display import Markdown
import html
class StatisticalDebugger(StatisticalDebugger):
def function(self) -> Optional[Callable]:
"""
Return the entry function from the events observed,
or None if ambiguous.
"""
names_seen = set()
functions = []
for outcome in self.collectors:
for collector in self.collectors[outcome]:
# We may have multiple copies of the function,
# but sharing the same name
func = collector.function()
if func.__name__ not in names_seen:
functions.append(func)
names_seen.add(func.__name__)
if len(functions) != 1:
return None # ambiguous
return functions[0]
def covered_functions(self) -> Set[Callable]:
"""Return a set of all functions observed."""
functions = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
functions |= collector.covered_functions()
return functions
def coverage(self) -> Coverage:
"""Return a set of all (functions, line_numbers) observed"""
coverage = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
coverage |= collector.coverage()
return coverage
def color(self, event: Any) -> Optional[str]:
"""
Return a color for the given event, or None.
To be overloaded in subclasses.
"""
return None
def tooltip(self, event: Any) -> Optional[str]:
"""
Return a tooltip string for the given event, or None.
To be overloaded in subclasses.
"""
return None
def event_str(self, event: Any) -> str:
"""Format the given event. To be overloaded in subclasses."""
if isinstance(event, str):
return event
if isinstance(event, tuple):
return ":".join(self.event_str(elem) for elem in event)
return str(event)
def event_table_text(self, *, args: bool = False, color: bool = False) -> str:
"""
Print out a table of events observed.
If `args` is True, use arguments as headers.
If `color` is True, use colors.
"""
sep = ' | '
all_events = self.all_events()
longest_event = max(len(f"{self.event_str(event)}")
for event in all_events)
out = ""
# Header
if args:
out += '| '
func = self.function()
if func:
out += '`' + func.__name__ + '`'
out += sep
for name in self.collectors:
for collector in self.collectors[name]:
out += '`' + collector.argstring() + '`' + sep
out += '\n'
else:
out += '| ' + ' ' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += name + sep
out += '\n'
out += '| ' + '-' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += '-' * len(name) + sep
out += '\n'
# Data
for event in sorted(all_events):
event_name = self.event_str(event).rjust(longest_event)
tooltip = self.tooltip(event)
if tooltip:
title = f' title="{tooltip}"'
else:
title = ''
if color:
color_name = self.color(event)
if color_name:
event_name = \
f'<samp style="background-color: {color_name}"{title}>' \
f'{html.escape(event_name)}' \
f'</samp>'
out += f"| {event_name}" + sep
for name in self.collectors:
for collector in self.collectors[name]:
out += ' ' * (len(name) - 1)
if event in collector.events():
out += "X"
else:
out += "-"
out += sep
out += '\n'
return out
def event_table(self, **_args: Any) -> Any:
"""Print out event table in Markdown format."""
return Markdown(self.event_table_text(**_args))
def __repr__(self) -> str:
return self.event_table_text()
def _repr_markdown_(self) -> str:
return self.event_table_text(args=True, color=True)
###Output
_____no_output_____
###Markdown
End of Excursion
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
s.event_table(args=True)
quiz("How many lines are executed in the failing run only?",
[
"One",
"Two",
"Three"
], 'len([12])')
###Output
_____no_output_____
###Markdown
Indeed, Line 12 executed in the failing run only would be a correlation to look for. Collecting Passing and Failing RunsWhile our `StatisticalDebugger` class allows arbitrary outcomes, we are typically only interested in two outcomes, namely _passing_ vs. _failing_ runs. We therefore introduce a specialized `DifferenceDebugger` class that provides customized methods to collect and access passing and failing runs.
###Code
class DifferenceDebugger(StatisticalDebugger):
"""A class to collect events for passing and failing outcomes."""
PASS = 'PASS'
FAIL = 'FAIL'
def collect_pass(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for passing runs."""
return self.collect(self.PASS, *args, **kwargs)
def collect_fail(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for failing runs."""
return self.collect(self.FAIL, *args, **kwargs)
def pass_collectors(self) -> List[Collector]:
return self.collectors[self.PASS]
def fail_collectors(self) -> List[Collector]:
return self.collectors[self.FAIL]
def all_fail_events(self) -> Set[Any]:
"""Return all events observed in failing runs."""
return self.all_events(self.FAIL)
def all_pass_events(self) -> Set[Any]:
"""Return all events observed in passing runs."""
return self.all_events(self.PASS)
def only_fail_events(self) -> Set[Any]:
"""Return all events observed only in failing runs."""
return self.all_fail_events() - self.all_pass_events()
def only_pass_events(self) -> Set[Any]:
"""Return all events observed only in passing runs."""
return self.all_pass_events() - self.all_fail_events()
###Output
_____no_output_____
###Markdown
We can use `DifferenceDebugger` just as a `StatisticalDebugger`:
###Code
# ignore
T1 = TypeVar('T1', bound='DifferenceDebugger')
def test_debugger_html_simple(debugger: T1) -> T1:
with debugger.collect_pass():
remove_html_markup('abc')
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
return debugger
###Output
_____no_output_____
###Markdown
However, since the outcome of tests may not always be predetermined, we provide a simpler interface for tests that can fail (= raise an exception) or pass (not raise an exception).
###Code
class DifferenceDebugger(DifferenceDebugger):
def __enter__(self) -> Any:
"""Enter a `with` block. Collect coverage and outcome;
classify as FAIL if the block raises an exception,
and PASS if it does not.
"""
self.collector = self.collector_class()
self.collector.add_items_to_ignore([self.__class__])
self.collector.__enter__()
return self
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
status = self.collector.__exit__(exc_tp, exc_value, exc_traceback)
if status is None:
pass
else:
return False # Internal error; re-raise exception
if exc_tp is None:
outcome = self.PASS
else:
outcome = self.FAIL
self.add_collector(outcome, self.collector)
return True # Ignore exception, if any
###Output
_____no_output_____
###Markdown
Using this interface, we can rewrite `test_debugger_html()`:
###Code
# ignore
T2 = TypeVar('T2', bound='DifferenceDebugger')
def test_debugger_html(debugger: T2) -> T2:
with debugger:
remove_html_markup('abc')
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # Mark test as failing
return debugger
test_debugger_html(DifferenceDebugger())
###Output
_____no_output_____
###Markdown
Analyzing EventsLet us now focus on _analyzing_ events collected. Since events come back as _sets_, we can compute _unions_ and _differences_ between these sets. For instance, we can compute which lines were executed in _any_ of the passing runs of `test_debugger_html()`, above:
###Code
debugger = test_debugger_html(DifferenceDebugger())
pass_1_events = debugger.pass_collectors()[0].events()
pass_2_events = debugger.pass_collectors()[1].events()
in_any_pass = pass_1_events | pass_2_events
in_any_pass
###Output
_____no_output_____
###Markdown
Likewise, we can determine which lines were _only_ executed in the failing run:
###Code
fail_events = debugger.fail_collectors()[0].events()
only_in_fail = fail_events - in_any_pass
only_in_fail
###Output
_____no_output_____
###Markdown
And we see that the "failing" run is characterized by processing quotes:
###Code
code_with_coverage(remove_html_markup, only_in_fail)
debugger = test_debugger_html(DifferenceDebugger())
debugger.all_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the failing run:
###Code
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the passing runs:
###Code
debugger.only_pass_events()
###Output
_____no_output_____
###Markdown
Again, having these lines individually is neat, but things become much more interesting if we can see the associated code lines just as well. That's what we will do in the next section. Visualizing DifferencesTo show correlations of line coverage in context, we introduce a number of _visualization_ techniques that _highlight_ code with different colors. Discrete SpectrumThe first idea is to use a _discrete_ spectrum of three colors:* _red_ for code executed in failing runs only* _green_ for code executed in passing runs only* _yellow_ for code executed in both passing and failing runs.Code that is not executed stays unhighlighted. We first introduce an abstract class `SpectrumDebugger` that provides the essential functions. `suspiciousness()` returns a value between 0 and 1 indicating the suspiciousness of the given event - or `None` if unknown.
###Code
class SpectrumDebugger(DifferenceDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value in the range [0, 1.0]
for the given event, or `None` if unknown.
To be overloaded in subclasses.
"""
return None
###Output
_____no_output_____
###Markdown
The `tooltip()` and `percentage()` methods convert the suspiciousness into a human-readable form.
###Code
class SpectrumDebugger(SpectrumDebugger):
def tooltip(self, event: Any) -> str:
"""
Return a tooltip for the given event (default: percentage).
To be overloaded in subclasses.
"""
return self.percentage(event)
def percentage(self, event: Any) -> str:
"""
Return the suspiciousness for the given event as percentage string.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is not None:
return str(int(suspiciousness * 100)).rjust(3) + '%'
else:
return ' ' * len('100%')
###Output
_____no_output_____
###Markdown
The `code()` method takes a function and shows each of its source code lines using the given spectrum, using HTML markup:
###Code
class SpectrumDebugger(SpectrumDebugger):
def code(self, functions: Optional[Set[Callable]] = None, *,
color: bool = False, suspiciousness: bool = False,
line_numbers: bool = True) -> str:
"""
Return a listing of `functions` (default: covered functions).
If `color` is True, render as HTML, using suspiciousness colors.
If `suspiciousness` is True, include suspiciousness values.
If `line_numbers` is True (default), include line numbers.
"""
if not functions:
functions = self.covered_functions()
out = ""
seen = set()
for function in functions:
source_lines, starting_line_number = \
inspect.getsourcelines(function)
if (function.__name__, starting_line_number) in seen:
continue
seen.add((function.__name__, starting_line_number))
if out:
out += '\n'
if color:
out += '<p/>'
line_number = starting_line_number
for line in source_lines:
if color:
line = html.escape(line)
if line.strip() == '':
line = ' '
location = (function.__name__, line_number)
location_suspiciousness = self.suspiciousness(location)
if location_suspiciousness is not None:
tooltip = f"Line {line_number}: {self.tooltip(location)}"
else:
tooltip = f"Line {line_number}: not executed"
if suspiciousness:
line = self.percentage(location) + ' ' + line
if line_numbers:
line = str(line_number).rjust(4) + ' ' + line
line_color = self.color(location)
if color and line_color:
line = f'''<pre style="background-color:{line_color}"
title="{tooltip}">{line.rstrip()}</pre>'''
elif color:
line = f'<pre title="{tooltip}">{line}</pre>'
else:
line = line.rstrip()
out += line + '\n'
line_number += 1
return out
###Output
_____no_output_____
###Markdown
We introduce a few helper methods to visualize the code with colors in various forms.
###Code
class SpectrumDebugger(SpectrumDebugger):
def _repr_html_(self) -> str:
"""When output in Jupyter, visualize as HTML"""
return self.code(color=True)
def __str__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
def __repr__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
###Output
_____no_output_____
###Markdown
So far, however, central methods like `suspiciousness()` or `color()` were abstract – that is, to be defined in subclasses. Our `DiscreteSpectrumDebugger` subclass provides concrete implementations for these, with `color()` returning one of the three colors depending on the line number:
###Code
class DiscreteSpectrumDebugger(SpectrumDebugger):
"""Visualize differences between executions using three discrete colors"""
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value [0, 1.0]
for the given event, or `None` if unknown.
"""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return 0.5
elif event in failing:
return 1.0
elif event in passing:
return 0.0
else:
return None
def color(self, event: Any) -> Optional[str]:
"""
Return a HTML color for the given event.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
if suspiciousness > 0.8:
return 'mistyrose'
if suspiciousness >= 0.5:
return 'lightyellow'
return 'honeydew'
def tooltip(self, event: Any) -> str:
"""Return a tooltip for the given event."""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return "in passing and failing runs"
elif event in failing:
return "only in failing runs"
elif event in passing:
return "only in passing runs"
else:
return "never"
###Output
_____no_output_____
###Markdown
This is how the `only_pass_events()` and `only_fail_events()` sets look like when visualized with code. The "culprit" line is well highlighted:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
debugger
###Output
_____no_output_____
###Markdown
We can clearly see that the failure is correlated with the presence of quotes in the input string (which is an important hint!). But does this also show us _immediately_ where the defect to be fixed is?
###Code
quiz("Does the line `quote = not quote` actually contain the defect?",
[
"Yes, it should be fixed",
"No, the defect is elsewhere"
], '164 * 2 % 326')
###Output
_____no_output_____
###Markdown
Indeed, it is the _governing condition_ that is wrong – that is, the condition that caused Line 12 to be executed in the first place. In order to fix a program, we have to find a location that1. _causes_ the failure (i.e., it can be changed to make the failure go away); and2. is a _defect_ (i.e., contains an error).In our example above, the highlighted code line is a _symptom_ for the error. To some extent, it is also a _cause_, since, say, commenting it out would also resolve the given failure, at the cost of causing other failures. However, the preceding condition also is a cause, as is the presence of quotes in the input.Only one of these also is a _defect_, though, and that is the preceding condition. Hence, while correlations can provide important hints, they do not necessarily locate defects. For those of us who may not have color HTML output ready, simply printing the debugger lists suspiciousness values as percentages.
###Code
print(debugger)
###Output
1 50% def remove_html_markup(s): # type: ignore
2 50% tag = False
3 50% quote = False
4 50% out = ""
5
6 50% for c in s:
7 50% if c == '<' and not quote:
8 0% tag = True
9 50% elif c == '>' and not quote:
10 0% tag = False
11 50% elif c == '"' or c == "'" and tag:
12 100% quote = not quote
13 50% elif not tag:
14 50% out = out + c
15
16 50% return out
###Markdown
Continuous SpectrumThe criterion that an event should _only_ occur in failing runs (and not in passing runs) can be too aggressive. In particular, if we have another run that executes the "culprit" lines, but does _not_ fail, our "only in fail" criterion will no longer be helpful. Here is an example. The input```htmltext```will trigger the "culprit" line```pythonquote = not quote```but actually produce an output where the tags are properly stripped:
###Code
remove_html_markup('<b color="blue">text</b>')
###Output
_____no_output_____
###Markdown
As a consequence, we no longer have lines that are being executed only in failing runs:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
with debugger.collect_pass():
remove_html_markup('<b link="blue"></b>')
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
In our spectrum output, the effect now is that the "culprit" line is as yellow as all others.
###Code
debugger
###Output
_____no_output_____
###Markdown
We therefore introduce a different method for highlighting lines, based on their _relative_ occurrence with respect to all runs: If a line has been _mostly_ executed in failing runs, its color should shift towards red; if a line has been _mostly_ executed in passing runs, its color should shift towards green. This _continuous spectrum_ has been introduced by the seminal _Tarantula_ tool \cite{Jones2002}. In Tarantula, the color _hue_ for each line is defined as follows: $$\textit{color hue}(\textit{line}) = \textit{low color(red)} + \frac{\%\textit{passed}(\textit{line})}{\%\textit{passed}(\textit{line}) + \%\textit{failed}(\textit{line})} \times \textit{color range}$$ Here, `%passed` and `%failed` denote the percentage at which a line has been executed in passing and failing runs, respectively. A hue of 0.0 stands for red, a hue of 1.0 stands for green, and a hue of 0.5 stands for equal fractions of red and green, yielding yellow. We can implement these measures right away as methods in a new `ContinuousSpectrumDebugger` class:
###Code
class ContinuousSpectrumDebugger(DiscreteSpectrumDebugger):
"""Visualize differences between executions using a color spectrum"""
def collectors_with_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that observed the given event.
"""
all_runs = self.collectors[category]
collectors_with_event = set(collector for collector in all_runs
if event in collector.events())
return collectors_with_event
def collectors_without_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that did not observe the given event.
"""
all_runs = self.collectors[category]
collectors_without_event = set(collector for collector in all_runs
if event not in collector.events())
return collectors_without_event
def event_fraction(self, event: Any, category: str) -> float:
if category not in self.collectors:
return 0.0
all_collectors = self.collectors[category]
collectors_with_event = self.collectors_with_event(event, category)
fraction = len(collectors_with_event) / len(all_collectors)
# print(f"%{category}({event}) = {fraction}")
return fraction
def passed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.PASS)
def failed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.FAIL)
def hue(self, event: Any) -> Optional[float]:
"""Return a color hue from 0.0 (red) to 1.0 (green)."""
passed = self.passed_fraction(event)
failed = self.failed_fraction(event)
if passed + failed > 0:
return passed / (passed + failed)
else:
return None
###Output
_____no_output_____
###Markdown
Having a continuous hue also implies a continuous suspiciousness and associated tooltips:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
hue = self.hue(event)
if hue is None:
return None
return 1 - hue
def tooltip(self, event: Any) -> str:
return self.percentage(event)
###Output
_____no_output_____
###Markdown
The hue for lines executed only in failing runs is (deep) red, as expected:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 12) 0.0
###Markdown
Likewise, the hue for lines executed in passing runs is (deep) green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 8) 1.0
('remove_html_markup', 10) 1.0
###Markdown
The Tarantula tool not only sets the hue for a line, but also uses _brightness_ as measure for support – that is, how often was the line executed at all. The brighter a line, the stronger the correlation with a passing or failing outcome. The brightness is defined as follows: $$\textit{brightness}(line) = \max(\%\textit{passed}(\textit{line}), \%\textit{failed}(\textit{line}))$$ and it is easily implemented, too:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def brightness(self, event: Any) -> float:
return max(self.passed_fraction(event), self.failed_fraction(event))
###Output
_____no_output_____
###Markdown
Our single "only in fail" line has a brightness of 1.0 (the maximum).
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.brightness(location))
###Output
('remove_html_markup', 12) 1.0
###Markdown
With this, we can now define a color for each line. To this end, we override the (previously discrete) `color()` method such that it returns a color specification giving hue and brightness. We use the HTML format `hsl(hue, saturation, lightness)` where the hue is given as a value between 0 and 360 (0 is red, 120 is green) and saturation and lightness are provided as percentages.
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def color(self, event: Any) -> Optional[str]:
hue = self.hue(event)
if hue is None:
return None
saturation = self.brightness(event)
# HSL color values are specified with:
# hsl(hue, saturation, lightness).
return f"hsl({hue * 120}, {saturation * 100}%, 80%)"
debugger = test_debugger_html(ContinuousSpectrumDebugger())
###Output
_____no_output_____
###Markdown
Lines executed only in failing runs are still shown in red:
###Code
for location in debugger.only_fail_events():
print(location, debugger.color(location))
###Output
('remove_html_markup', 12) hsl(0.0, 100.0%, 80%)
###Markdown
... whereas lines executed only in passing runs are still shown in green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.color(location))
debugger
###Output
_____no_output_____
###Markdown
What happens with our `quote = not quote` "culprit" line if it is executed in passing runs, too?
###Code
with debugger.collect_pass():
out = remove_html_markup('<b link="blue"></b>')
quiz('In which color will the `quote = not quote` "culprit" line '
'be shown after executing the above code?',
[
'<span style="background-color: hsl(120.0, 50.0%, 80%)">Green</span>',
'<span style="background-color: hsl(60.0, 100.0%, 80%)">Yellow</span>',
'<span style="background-color: hsl(30.0, 100.0%, 80%)">Orange</span>',
'<span style="background-color: hsl(0.0, 100.0%, 80%)">Red</span>'
], '999 // 333')
###Output
_____no_output_____
###Markdown
We see that it still is shown with an orange-red tint.
###Code
debugger
###Output
_____no_output_____
###Markdown
Here's another example, coming right from the Tarantula paper. The `middle()` function takes three numbers `x`, `y`, and `z`, and returns the one that is neither the minimum nor the maximum of the three:
###Code
def middle(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return y
else:
if x > y:
return y
elif x > z:
return x
return z
middle(1, 2, 3)
###Output
_____no_output_____
###Markdown
Unfortunately, `middle()` can fail:
###Code
middle(2, 1, 3)
###Output
_____no_output_____
###Markdown
Let is see whether we can find the bug with a few additional test cases:
###Code
# ignore
T3 = TypeVar('T3', bound='DifferenceDebugger')
def test_debugger_middle(debugger: T3) -> T3:
with debugger.collect_pass():
middle(3, 3, 5)
with debugger.collect_pass():
middle(1, 2, 3)
with debugger.collect_pass():
middle(3, 2, 1)
with debugger.collect_pass():
middle(5, 5, 5)
with debugger.collect_pass():
middle(5, 3, 4)
with debugger.collect_fail():
middle(2, 1, 3)
return debugger
###Output
_____no_output_____
###Markdown
Note that in order to collect data from multiple function invocations, you need to have a separate `with` clause for every invocation. The following will _not_ work correctly:```python with debugger.collect_pass(): middle(3, 3, 5) middle(1, 2, 3) ...```
###Code
debugger = test_debugger_middle(ContinuousSpectrumDebugger())
debugger.event_table(args=True)
###Output
_____no_output_____
###Markdown
Here comes the visualization. We see that the `return y` line is the culprit here – and actually also the one to be fixed.
###Code
debugger
quiz("Which of the above lines should be fixed?",
[
'<span style="background-color: hsl(45.0, 100%, 80%)">Line 3: `elif x < y`</span>',
'<span style="background-color: hsl(34.28571428571429, 100.0%, 80%)">Line 5: `elif x < z`</span>',
'<span style="background-color: hsl(20.000000000000004, 100.0%, 80%)">Line 6: `return y`</span>',
'<span style="background-color: hsl(120.0, 20.0%, 80%)">Line 9: `return y`</span>',
], r'len(" middle ".strip()[:3])')
###Output
_____no_output_____
###Markdown
Indeed, in the `middle()` example, the "reddest" line is also the one to be fixed. Here is the fixed version:
###Code
def middle_fixed(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return x
else:
if x > y:
return y
elif x > z:
return x
return z
middle_fixed(2, 1, 3)
###Output
_____no_output_____
###Markdown
Ranking Lines by SuspiciousnessIn a large program, there can be several locations (and events) that could be flagged as suspicious. It suffices that some large code block of say, 1,000 lines, is mostly executed in failing runs, and then all of this code block will be visualized in some shade of red. To further highlight the "most suspicious" events, one idea is to use a _ranking_ – that is, coming up with a list of events where those events most correlated with failures would be shown at the top. The programmer would then examine these events one by one and proceed down the list. We will show how this works for two "correlation" metrics – first the _Tarantula_ metric, as introduced above, and then the _Ochiai_ metric, which has shown to be one of the best "ranking" metrics. We introduce a base class `RankingDebugger` with an abstract method `suspiciousness()` to be overloaded in subclasses. The method `rank()` returns a list of all events observed, sorted by suspiciousness, highest first.
###Code
class RankingDebugger(DiscreteSpectrumDebugger):
"""Rank events by their suspiciousness"""
def rank(self) -> List[Any]:
"""Return a list of events, sorted by suspiciousness, highest first."""
def susp(event: Any) -> float:
suspiciousness = self.suspiciousness(event)
assert suspiciousness is not None
return suspiciousness
events = list(self.all_events())
events.sort(key=susp, reverse=True)
return events
def __repr__(self) -> str:
return repr(self.rank())
###Output
_____no_output_____
###Markdown
The Tarantula MetricWe can use the Tarantula metric to sort lines according to their suspiciousness. The "redder" a line (a hue of 0.0), the more suspicious it is. We can simply define $$\textit{suspiciousness}_\textit{tarantula}(\textit{event}) = 1 - \textit{color hue}(\textit{event})$$ where $\textit{color hue}$ is as defined above. This is exactly the `suspiciousness()` function as already implemented in our `ContinuousSpectrumDebugger`. We introduce the `TarantulaDebugger` class, inheriting visualization capabilities from the `ContinuousSpectrumDebugger` class as well as the suspiciousness features from the `RankingDebugger` class.
###Code
class TarantulaDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Tarantula metric for suspiciousness"""
pass
###Output
_____no_output_____
###Markdown
Let us list `remove_html_markup()` with highlighted lines again:
###Code
tarantula_html = test_debugger_html(TarantulaDebugger())
tarantula_html
###Output
_____no_output_____
###Markdown
Here's our ranking of lines, from most suspicious to least suspicious:
###Code
tarantula_html.rank()
tarantula_html.suspiciousness(tarantula_html.rank()[0])
###Output
_____no_output_____
###Markdown
We see that the first line in the list is indeed the most suspicious; the two "green" lines come at the very end. For the `middle()` function, we also obtain a ranking from "reddest" to "greenest".
###Code
tarantula_middle = test_debugger_middle(TarantulaDebugger())
tarantula_middle
tarantula_middle.rank()
tarantula_middle.suspiciousness(tarantula_middle.rank()[0])
###Output
_____no_output_____
###Markdown
The Ochiai MetricThe _Ochiai_ Metric \cite{Ochiai1957} first introduced in the biology domain \cite{daSilvaMeyer2004} and later applied for fault localization by Abreu et al. \cite{Abreu2009}, is defined as follows: $$\textit{suspiciousness}_\textit{ochiai} = \frac{\textit{failed}(\textit{event})}{\sqrt{\bigl(\textit{failed}(\textit{event}) + \textit{not-in-failed}(\textit{event})\bigr)\times\bigl(\textit{failed}(\textit{event}) + \textit{passed}(\textit{event})\bigr)}}$$ where* $\textit{failed}(\textit{event})$ is the number of times the event occurred in _failing_ runs* $\textit{not-in-failed}(\textit{event})$ is the number of times the event did _not_ occur in failing runs* $\textit{passed}(\textit{event})$ is the number of times the event occurred in _passing_ runs.We can easily implement this formula:
###Code
import math
class OchiaiDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Ochiai metric for suspiciousness"""
def suspiciousness(self, event: Any) -> Optional[float]:
failed = len(self.collectors_with_event(event, self.FAIL))
not_in_failed = len(self.collectors_without_event(event, self.FAIL))
passed = len(self.collectors_with_event(event, self.PASS))
try:
return failed / math.sqrt((failed + not_in_failed) * (failed + passed))
except ZeroDivisionError:
return None
def hue(self, event: Any) -> Optional[float]:
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
return 1 - suspiciousness
###Output
_____no_output_____
###Markdown
Applied on the `remove_html_markup()` function, the individual suspiciousness scores differ from Tarantula. However, we obtain a very similar visualization, and the same ranking.
###Code
ochiai_html = test_debugger_html(OchiaiDebugger())
ochiai_html
ochiai_html.rank()
ochiai_html.suspiciousness(ochiai_html.rank()[0])
###Output
_____no_output_____
###Markdown
The same observations also apply for the `middle()` function.
###Code
ochiai_middle = test_debugger_middle(OchiaiDebugger())
ochiai_middle
ochiai_middle.rank()
ochiai_middle.suspiciousness(ochiai_middle.rank()[0])
###Output
_____no_output_____
###Markdown
How Useful is Ranking?So, which metric is better? The standard method to evaluate such rankings is to determine a _ground truth_ – that is, the set of locations that eventually are fixed – and to check at which point in the ranking any such location occurs – the earlier, the better. In our `remove_html_markup()` and `middle()` examples, both the Tarantula and the Ochiai metric perform flawlessly, as the "culprit" line is always ranked at the top. However, this need not always be the case; the exact performance depends on the nature of the code and the observed runs. (Also, the question of whether there always is exactly one possible location where the program can be fixed is open for discussion.) You will be surprised that over time, _several dozen_ metrics have been proposed \cite{Wong2016}, each performing somewhat better or somewhat worse depending on which benchmark they were applied on. The two metrics discussed above each have their merits – the Tarantula metric was among the first such metrics, and the Ochiai metric is generally shown to be among the most effective ones \cite{Abreu2009}. While rankings can be easily _evaluated_, it is not necessarily clear whether and how much they serve programmers. As stated above, the assumption of rankings is that developers examine one potentially defective statement after another until they find the actually defective one. However, in a series of human studies with developers, Parnin and Orso \cite{Parnin2011} found that this assumption may not hold:> It is unclear whether developers can actually determine the faulty nature of a statement by simply looking at it, without any additional information (e.g., the state of the program when the statement was executed or the statements that were executed before or after that one).In their study, they found that rankings could help completing a task faster, but this effect was limited to experienced developers and simpler code. Artificially changing the rank of faulty statements had little to no effect, implying that developers would not strictly follow the ranked list of statements, but rather search through the code to understand it. At this point, a _visualization_ as in the Tarantula tool can be helpful to programmers as it _guides_ the search, but a _ranking_ that _defines_ where to search may be less useful. Having said that, ranking has its merits – notably as it comes to informing _automated_ debugging techniques. In the [chapter on program repair](Repairer.ipynb), we will see how ranked lists of potentially faulty statements tell automated repair techniques where to try to repair the program first. And once such a repair is successful, we have a very strong indication on where and how the program could be fixed! Using Large Test Suites In fault localization, the larger and the more thorough the test suite, the higher the precision. Let us try out what happens if we extend the `middle()` test suite with additional test cases. The function `middle_testcase()` returns a random input for `middle()`:
###Code
import random
def middle_testcase() -> Tuple[int, int, int]:
x = random.randrange(10)
y = random.randrange(10)
z = random.randrange(10)
return x, y, z
[middle_testcase() for i in range(5)]
###Output
_____no_output_____
###Markdown
The function `middle_test()` simply checks if `middle()` operates correctly – by placing `x`, `y`, and `z` in a list, sorting it, and checking the middle argument. If `middle()` fails, `middle_test()` raises an exception.
###Code
def middle_test(x: int, y: int, z: int) -> None:
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
middle_test(4, 5, 6)
from ExpectError import ExpectError
with ExpectError():
middle_test(2, 1, 3)
###Output
Traceback (most recent call last):
File "<ipython-input-1-ae2957225406>", line 2, in <module>
middle_test(2, 1, 3)
File "<ipython-input-1-e1407680b9f2>", line 3, in middle_test
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
The function `middle_passing_testcase()` searches and returns a triple `x`, `y`, `z` that causes `middle_test()` to pass.
###Code
def middle_passing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
return x, y, z
except AssertionError:
pass
(x, y, z) = middle_passing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(2, 6, 7) = 6
###Markdown
The function `middle_failing_testcase()` does the same; but its triple `x`, `y`, `z` causes `middle_test()` to fail.
###Code
def middle_failing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
except AssertionError:
return x, y, z
(x, y, z) = middle_failing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(5, 4, 6) = 4
###Markdown
With these, we can define two sets of test cases, each with 100 inputs.
###Code
MIDDLE_TESTS = 100
MIDDLE_PASSING_TESTCASES = [middle_passing_testcase()
for i in range(MIDDLE_TESTS)]
MIDDLE_FAILING_TESTCASES = [middle_failing_testcase()
for i in range(MIDDLE_TESTS)]
###Output
_____no_output_____
###Markdown
Let us run the `OchiaiDebugger` with these two test sets.
###Code
ochiai_middle = OchiaiDebugger()
for x, y, z in MIDDLE_PASSING_TESTCASES:
with ochiai_middle.collect_pass():
middle(x, y, z)
for x, y, z in MIDDLE_FAILING_TESTCASES:
with ochiai_middle.collect_fail():
middle(x, y, z)
ochiai_middle
###Output
_____no_output_____
###Markdown
We see that the "culprit" line is still the most likely to be fixed, but the two conditions leading to the error (`x < y` and `x < z`) are also listed as potentially faulty. That is because the error might also be fixed be changing these conditions – although this would result in a more complex fix. Other Events besides CoverageWe close this chapter with two directions for further thought. If you wondered why in the above code, we were mostly talking about `events` rather than lines covered, that is because our framework allows for tracking arbitrary events, not just coverage. In fact, any data item a collector can extract from the execution can be used for correlation analysis. (It may not be so easily visualized, though.) Here's an example. We define a `ValueCollector` class that collects pairs of (local) variables and their values during execution. Its `events()` method then returns the set of all these pairs.
###Code
class ValueCollector(Collector):
""""A class to collect local variables and their values."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.vars: Set[str] = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
local_vars = frame.f_locals
for var in local_vars:
value = local_vars[var]
self.vars.add(f"{var} = {repr(value)}")
def events(self) -> Set[str]:
"""A set of (variable, value) pairs observed"""
return self.vars
###Output
_____no_output_____
###Markdown
If we apply this collector on our set of HTML test cases, these are all the events that we obtain – essentially all variables and all values ever seen:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger(ValueCollector))
for event in debugger.all_events():
print(event)
###Output
s = 'abc'
c = '"'
c = 'a'
s = '"abc"'
c = 'c'
c = '>'
tag = False
c = 'b'
out = 'abc'
quote = True
out = ''
s = '<b>abc</b>'
tag = True
quote = False
c = '<'
out = 'a'
out = 'ab'
c = '/'
###Markdown
However, some of these events only occur in the failing run:
###Code
for event in debugger.only_fail_events():
print(event)
###Output
c = '"'
s = '"abc"'
quote = True
###Markdown
Some of these differences are spurious – the string `"abc"` (with quotes) only occurs in the failing run – but others, such as `quote` being True and `c` containing a single quote are actually relevant for explaining when the failure comes to be. We can even visualize the suspiciousness of the individual events, setting the (so far undiscussed) `color` flag for producing an event table:
###Code
debugger.event_table(color=True, args=True)
###Output
_____no_output_____
###Markdown
There are many ways one can continue from here.* Rather than checking for concrete values, one could check for more _abstract properties_, for instance – what is the sign of the value? What is the length of the string? * One could check for specifics of the _control flow_ – is the loop taken? How many times?* One could check for specifics of the _information flow_ – which values flow from one variable to another?There are lots of properties that all could be related to failures – and if we happen to check for the right one, we may obtain a much crisper definition of what causes the failure. We will come up with more ideas on properties to check as it comes to [mining specifications](SpecificationMining,ipynb). Training ClassifiersThe metrics we have discussed so far are pretty _generic_ – that is, they are fixed no matter how the actual event space is structured. The field of _machine learning_ has come up with techniques that learn _classifiers_ from a given set of data – classifiers that are trained from labeled data and then can predict labels for new data sets. In our case, the labels are test outcomes (PASS and FAIL), whereas the data would be features of the events observed. A classifier by itself is not immediately useful for debugging (although it could predict whether future inputs will fail or not). Some classifiers, however, have great _diagnostic_ quality; that is, they can _explain_ how their classification comes to be. [Decision trees](https://scikit-learn.org/stable/modules/tree.html) fall into this very category. A decision tree contains a number of _nodes_, each one associated with a predicate. Depending on whether the predicate is true or false, we follow the given "true" or "false" branch to end up in the next node, which again contains a predicate. Eventually, we end up in the outcome predicted by the tree. The neat thing is that the node predicates actually give important hints on the circumstances that are _most relevant_ for deciding the outcome. Let us illustrate this with an example. We build a class `ClassifyingDebugger` that trains a decision tree from the events collected. To this end, we need to set up our input data such that it can be fed into a classifier. We start with identifying our _samples_ (runs) and the respective _labels_ (outcomes). All values have to be encoded into numerical values.
###Code
class ClassifyingDebugger(DifferenceDebugger):
"""A debugger implementing a decision tree for events"""
PASS_VALUE = +1.0
FAIL_VALUE = -1.0
def samples(self) -> Dict[str, float]:
samples = {}
for collector in self.pass_collectors():
samples[collector.id()] = self.PASS_VALUE
for collector in debugger.fail_collectors():
samples[collector.id()] = self.FAIL_VALUE
return samples
debugger = test_debugger_html(ClassifyingDebugger())
debugger.samples()
###Output
_____no_output_____
###Markdown
Next, we identify the _features_, which in our case is the set of lines executed in each sample:
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def features(self) -> Dict[str, Any]:
features = {}
for collector in debugger.pass_collectors():
features[collector.id()] = collector.events()
for collector in debugger.fail_collectors():
features[collector.id()] = collector.events()
return features
debugger = test_debugger_html(ClassifyingDebugger())
debugger.features()
###Output
_____no_output_____
###Markdown
All our features have names, which must be strings.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def feature_names(self) -> List[str]:
return [repr(feature) for feature in self.all_events()]
debugger = test_debugger_html(ClassifyingDebugger())
debugger.feature_names()
###Output
_____no_output_____
###Markdown
Next, we define the _shape_ for an individual sample, which is a value of +1 or -1 for each feature seen (i.e., +1 if the line was covered, -1 if not).
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def shape(self, sample: str) -> List[float]:
x = []
features = self.features()
for f in self.all_events():
if f in features[sample]:
x += [+1.0]
else:
x += [-1.0]
return x
debugger = test_debugger_html(ClassifyingDebugger())
debugger.shape("remove_html_markup(s='abc')")
###Output
_____no_output_____
###Markdown
Our input X for the classifier now is a list of such shapes, one for each sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def X(self) -> List[List[float]]:
X = []
samples = self.samples()
for key in samples:
X += [self.shape(key)]
return X
debugger = test_debugger_html(ClassifyingDebugger())
debugger.X()
###Output
_____no_output_____
###Markdown
Our input Y for the classifier, in contrast, is the list of labels, again indexed by sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def Y(self) -> List[float]:
Y = []
samples = self.samples()
for key in samples:
Y += [samples[key]]
return Y
debugger = test_debugger_html(ClassifyingDebugger())
debugger.Y()
###Output
_____no_output_____
###Markdown
We now have all our data ready to be fit into a tree classifier. The method `classifier()` creates and returns the (tree) classifier for the observed runs.
###Code
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def classifier(self) -> DecisionTreeClassifier:
classifier = DecisionTreeClassifier()
classifier = classifier.fit(self.X(), self.Y())
return classifier
###Output
_____no_output_____
###Markdown
We define a special method to show classifiers:
###Code
import graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def show_classifier(self, classifier: DecisionTreeClassifier) -> Any:
dot_data = export_graphviz(classifier, out_file=None,
filled=False, rounded=True,
feature_names=self.feature_names(),
class_names=["FAIL", "PASS"],
label='none',
node_ids=False,
impurity=False,
proportion=True,
special_characters=True)
return graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
This is the tree we get for our `remove_html_markup()` tests. The top predicate is whether the "culprit" line was executed (-1 means no, +1 means yes). If not (-1), the outcome is PASS. Otherwise, the outcome is TRUE.
###Code
debugger = test_debugger_html(ClassifyingDebugger())
classifier = debugger.classifier()
debugger.show_classifier(classifier)
###Output
_____no_output_____
###Markdown
We can even use our classifier to predict the outcome of additional runs. If, for instance, we execute all lines except for, say, Line 7, 9, and 11, our tree classifier would predict failure – because the "culprit" line 12 is executed.
###Code
classifier.predict([[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1]])
###Output
_____no_output_____
###Markdown
Again, there are many ways to continue from here. Which events should we train the classifier from? How do classifiers compare in their performance and diagnostic quality? There are lots of possibilities left to explore, and we only begin to realize the potential for automated debugging. SynopsisThis chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use
###Code
debugger = TarantulaDebugger()
with debugger.collect_pass():
remove_html_markup("abc")
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:
###Code
debugger = TarantulaDebugger()
with debugger:
remove_html_markup("abc")
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # raise an exception
###Output
_____no_output_____
###Markdown
`with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.
###Code
debugger.event_table(args=True, color=True)
###Output
_____no_output_____
###Markdown
Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:
###Code
debugger
###Output
_____no_output_____
###Markdown
Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.
###Code
debugger.rank()
###Output
_____no_output_____
###Markdown
Classes and MethodsHere are all classes defined in this chapter:
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([TarantulaDebugger, OchiaiDebugger],
abstract_classes=[
StatisticalDebugger,
DifferenceDebugger,
RankingDebugger
],
public_methods=[
StatisticalDebugger.__init__,
StatisticalDebugger.all_events,
StatisticalDebugger.event_table,
StatisticalDebugger.function,
StatisticalDebugger.coverage,
StatisticalDebugger.covered_functions,
DifferenceDebugger.__enter__,
DifferenceDebugger.__exit__,
DifferenceDebugger.all_pass_events,
DifferenceDebugger.all_fail_events,
DifferenceDebugger.collect_pass,
DifferenceDebugger.collect_fail,
DifferenceDebugger.only_pass_events,
DifferenceDebugger.only_fail_events,
SpectrumDebugger.code,
SpectrumDebugger.__repr__,
SpectrumDebugger.__str__,
SpectrumDebugger._repr_html_,
ContinuousSpectrumDebugger.code,
ContinuousSpectrumDebugger.__repr__,
RankingDebugger.rank
],
project='debuggingbook')
# ignore
display_class_hierarchy([CoverageCollector, ValueCollector],
public_methods=[
Tracer.__init__,
Tracer.__enter__,
Tracer.__exit__,
Tracer.changed_vars, # type: ignore
Collector.__init__,
Collector.__repr__,
Collector.function,
Collector.args,
Collector.argstring,
Collector.exception,
Collector.id,
Collector.collect,
CoverageCollector.coverage,
CoverageCollector.covered_functions,
CoverageCollector.events,
ValueCollector.__init__,
ValueCollector.events
],
project='debuggingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* _Correlations_ between execution events and outcomes (pass/fail) can make important hints for debugging* Events occurring only (or mostly) during failing runs can be _highlighted_ and _ranked_ to guide the search* Important hints include whether the _execution of specific code locations_ correlates with failure Next StepsChapters that build on this one include* [how to determine invariants that correlate with failures](DynamicInvariants.ipynb)* [how to automatically repair programs](Repairer.ipynb) BackgroundThe seminal works on statistical debugging are two papers:* "Visualization of Test Information to Assist Fault Localization" \cite{Jones2002} by James Jones, Mary Jean Harrold, and John Stasko introducing Tarantula and its visualization. The paper won an ACM SIGSOFT 10-year impact award.* "Bug Isolation via Remote Program Sampling" \cite{Liblit2003} by Ben Liblit, Alex Aiken, Alice X. Zheng, and Michael I. Jordan, introducing the term "Statistical debugging". Liblit won the ACM Doctoral Dissertation Award for this work.The Ochiai metric for fault localization was introduced by \cite{Abreu2009}. The overview by Wong et al. \cite{Wong2016} gives a comprehensive overview on the field of statistical fault localization.The study by Parnin and Orso \cite{Parnin2011} is a must to understand the limitations of the technique. Exercises Exercise 1: A Postcondition for MiddleWhat would be a postcondition for `middle()`? How can you check it? **Solution.** A simple postcondition for `middle()` would be```pythonassert m == sorted([x, y, z])[1]```where `m` is the value returned by `middle()`. `sorted()` sorts the given list, and the index `[1]` returns, well, the middle element. (This might also be a much shorter, but possibly slightly more expensive implementation for `middle()`) Since `middle()` has several `return` statements, the easiest way to check the result is to create a wrapper around `middle()`:
###Code
def middle_checked(x, y, z): # type: ignore
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
return m
###Output
_____no_output_____
###Markdown
`middle_checked()` catches the error:
###Code
from ExpectError import ExpectError
with ExpectError():
m = middle_checked(2, 1, 3)
###Output
Traceback (most recent call last):
File "<ipython-input-1-3c03371d2614>", line 2, in <module>
m = middle_checked(2, 1, 3)
File "<ipython-input-1-7a70e9d5c211>", line 3, in middle_checked
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
Statistical DebuggingIn this chapter, we introduce _statistical debugging_ – the idea that specific events during execution could be _statistically correlated_ with failures. We start with coverage of individual lines and then proceed towards further execution features.
###Code
from bookutils import YouTubeVideo
YouTubeVideo("UNuso00zYiI")
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on tracing executions](Tracer.ipynb).
###Code
import bookutils
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from debuggingbook.StatisticalDebugger import ```and then make use of the following features.This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use```python>>> debugger = TarantulaDebugger()>>> with debugger.collect_pass():>>> remove_html_markup("abc")>>> with debugger.collect_pass():>>> remove_html_markup('abc')>>> with debugger.collect_fail():>>> remove_html_markup('"abc"')```Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:```python>>> debugger = TarantulaDebugger()>>> with debugger:>>> remove_html_markup("abc")>>> with debugger:>>> remove_html_markup('abc')>>> with debugger:>>> remove_html_markup('"abc"')>>> assert False raise an exception````with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.```python>>> debugger.event_table(args=True, color=True)```| `remove_html_markup` | `s='abc'` | `s='abc'` | `s='"abc"'` | | --------------------- | ---- | ---- | ---- | | remove_html_markup:1 | X | X | X | | remove_html_markup:2 | X | X | X | | remove_html_markup:3 | X | X | X | | remove_html_markup:4 | X | X | X | | remove_html_markup:6 | X | X | X | | remove_html_markup:7 | X | X | X | | remove_html_markup:8 | - | X | - | | remove_html_markup:9 | X | X | X | | remove_html_markup:10 | - | X | - | | remove_html_markup:11 | X | X | X | | remove_html_markup:12 | - | - | X | | remove_html_markup:13 | X | X | X | | remove_html_markup:14 | X | X | X | | remove_html_markup:16 | X | X | X | Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:```python>>> debugger```<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 1: 50%"> 1 def remove_html_markup(s): type: ignore<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 2: 50%"> 2 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 3: 50%"> 3 quote = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 4: 50%"> 4 out = "" 5 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 6: 50%"> 6 for c in s:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 7: 50%"> 7 if c == &x27;<&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 8: 0%"> 8 tag = True<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 9: 50%"> 9 elif c == &x27;>&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 10: 0%"> 10 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 11: 50%"> 11 elif c == &x27;"&x27; or c == "&x27;" and tag:<pre style="background-color:hsl(0.0, 100.0%, 80%)" title="Line 12: 100%"> 12 quote = not quote<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 13: 50%"> 13 elif not tag:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 14: 50%"> 14 out = out + c 15 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 16: 50%"> 16 return out Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.```python>>> debugger.rank()[('remove_html_markup', 12), ('remove_html_markup', 4), ('remove_html_markup', 6), ('remove_html_markup', 13), ('remove_html_markup', 2), ('remove_html_markup', 9), ('remove_html_markup', 16), ('remove_html_markup', 11), ('remove_html_markup', 7), ('remove_html_markup', 14), ('remove_html_markup', 1), ('remove_html_markup', 3), ('remove_html_markup', 10), ('remove_html_markup', 8)]``` Classes and MethodsHere are all classes defined in this chapter: IntroductionThe idea behind _statistical debugging_ is fairly simple. We have a program that sometimes passes and sometimes fails. This outcome can be _correlated_ with events that precede it – properties of the input, properties of the execution, properties of the program state. If we, for instance, can find that "the program always fails when Line 123 is executed, and it always passes when Line 123 is _not_ executed", then we have a strong correlation between Line 123 being executed and failure.Such _correlation_ does not necessarily mean _causation_. For this, we would have to prove that executing Line 123 _always_ leads to failure, and that _not_ executing it does not lead to (this) failure. Also, a correlation (or even a causation) does not mean that Line 123 contains the defect – for this, we would have to show that it actually is an error. Still, correlations make excellent hints as it comes to search for failure causes – in all generality, if you let your search be guided by _events that correlate with failures_, you are more likely to find _important hints on how the failure comes to be_. Collecting EventsHow can we determine events that correlate with failure? We start with a general mechanism to actually _collect_ events during execution. The abstract `Collector` class provides* a `collect()` method made for collecting events, called from the `traceit()` tracer; and* an `events()` method made for retrieving these events.Both of these are _abstract_ and will be defined further in subclasses.
###Code
from Tracer import Tracer
# ignore
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Set, List, TypeVar, Union
from types import FrameType, TracebackType
class Collector(Tracer):
"""A class to record events during execution."""
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collecting function. To be overridden in subclasses."""
pass
def events(self) -> Set:
"""Return a collection of events. To be overridden in subclasses."""
return set()
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
self.collect(frame, event, arg)
###Output
_____no_output_____
###Markdown
A `Collector` class is used like `Tracer`, using a `with` statement. Let us apply it on the buggy variant of `remove_html_markup()` from the [Introduction to Debugging](Intro_Debugging.ipynb):
###Code
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
with Collector() as c:
out = remove_html_markup('"abc"')
out
###Output
_____no_output_____
###Markdown
There's not much we can do with our collector, as the `collect()` and `events()` methods are yet empty. However, we can introduce an `id()` method which returns a string identifying the collector. This string is defined from the _first function call_ encountered.
###Code
from types import FunctionType
Coverage = Set[Tuple[Callable, int]]
class Collector(Collector):
def __init__(self) -> None:
"""Constructor."""
self._function: Optional[Callable] = None
self._args: Optional[Dict[str, Any]] = None
self._argstring: Optional[str] = None
self._exception: Optional[Type] = None
self.items_to_ignore: List[Union[Type, Callable]] = [self.__class__]
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Tracing function.
Saves the first function and calls collect().
"""
for item in self.items_to_ignore:
if (isinstance(item, type) and 'self' in frame.f_locals and
isinstance(frame.f_locals['self'], item)):
# Ignore this class
return
if item.__name__ == frame.f_code.co_name:
# Ignore this function
return
if self._function is None and event == 'call':
# Save function
self._function = self.create_function(frame)
self._args = frame.f_locals.copy()
self._argstring = ", ".join([f"{var}={repr(self._args[var])}"
for var in self._args])
self.collect(frame, event, arg)
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collector function. To be overloaded in subclasses."""
pass
def id(self) -> str:
"""Return an identifier for the collector,
created from the first call"""
return f"{self.function().__name__}({self.argstring()})"
def function(self) -> Callable:
"""Return the function from the first call, as a function object"""
if not self._function:
raise ValueError("No call collected")
return self._function
def argstring(self) -> str:
"""
Return the list of arguments from the first call,
as a printable string
"""
if not self._argstring:
raise ValueError("No call collected")
return self._argstring
def args(self) -> Dict[str, Any]:
"""Return a dict of argument names and values from the first call"""
if not self._args:
raise ValueError("No call collected")
return self._args
def exception(self) -> Optional[Type]:
"""Return the exception class from the first call,
or None if no exception was raised."""
return self._exception
def __repr__(self) -> str:
"""Return a string representation of the collector"""
# We use the ID as default representation when printed
return self.id()
def covered_functions(self) -> Set[Callable]:
"""Set of covered functions. To be overloaded in subclasses."""
return set()
def coverage(self) -> Coverage:
"""
Return a set (function, lineno) with locations covered.
To be overloaded in subclasses.
"""
return set()
###Output
_____no_output_____
###Markdown
Here's how the collector works. We use a `with` clause to collect details on a function call:
###Code
with Collector() as c:
remove_html_markup('abc')
###Output
_____no_output_____
###Markdown
We can now retrieve details such as the function called...
###Code
c.function()
###Output
_____no_output_____
###Markdown
... or its arguments, as a name/value dictionary.
###Code
c.args()
###Output
_____no_output_____
###Markdown
The `id()` method returns a printable representation of the call:
###Code
c.id()
###Output
_____no_output_____
###Markdown
The `argstring()` method does the same for the argument string only.
###Code
c.argstring()
###Output
_____no_output_____
###Markdown
With this, we can collect the basic information to identify calls – such that we can later correlate their events with success or failure. Error Prevention While collecting, we'd like to avoid collecting events in the collection infrastructure. The `items_to_ignore` attribute takes care of this.
###Code
class Collector(Collector):
def add_items_to_ignore(self,
items_to_ignore: List[Union[Type, Callable]]) \
-> None:
"""
Define additional classes and functions to ignore during collection
(typically `Debugger` classes using these collectors).
"""
self.items_to_ignore += items_to_ignore
###Output
_____no_output_____
###Markdown
If we exit a block without having collected anything, that's likely an error.
###Code
class Collector(Collector):
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
ret = super().__exit__(exc_tp, exc_value, exc_traceback)
if not self._function:
if exc_tp:
return False # re-raise exception
else:
raise ValueError("No call collected")
return ret
###Output
_____no_output_____
###Markdown
Collecting CoverageSo far, our `Collector` class does not collect any events. Let us extend it such that it collects _coverage_ information – that is, the set of locations executed. To this end, we introduce a `CoverageCollector` subclass which saves the coverage in a set containing functions and line numbers.
###Code
from types import FrameType
from StackInspector import StackInspector
class CoverageCollector(Collector, StackInspector):
"""A class to record covered locations during execution."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self._coverage: Coverage = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Save coverage for an observed event.
"""
name = frame.f_code.co_name
function = self.search_func(name, frame)
if function is None:
function = self.create_function(frame)
location = (function, frame.f_lineno)
self._coverage.add(location)
###Output
_____no_output_____
###Markdown
We also override `events()` such that it returns the set of covered locations.
###Code
class CoverageCollector(CoverageCollector):
def events(self) -> Set[Tuple[str, int]]:
"""
Return the set of locations covered.
Each location comes as a pair (`function_name`, `lineno`).
"""
return {(func.__name__, lineno) for func, lineno in self._coverage}
###Output
_____no_output_____
###Markdown
The methods `coverage()` and `covered_functions()` allow precise access to the coverage obtained.
###Code
class CoverageCollector(CoverageCollector):
def covered_functions(self) -> Set[Callable]:
"""Return a set with all functions covered."""
return {func for func, lineno in self._coverage}
def coverage(self) -> Coverage:
"""Return a set (function, lineno) with all locations covered."""
return self._coverage
###Output
_____no_output_____
###Markdown
Here is how we can use `CoverageCollector` to determine the lines executed during a run of `remove_html_markup()`:
###Code
with CoverageCollector() as c:
remove_html_markup('abc')
c.events()
###Output
_____no_output_____
###Markdown
Sets of line numbers alone are not too revealing. They provide more insights if we actually list the code, highlighting these numbers:
###Code
import inspect
from bookutils import getsourcelines # like inspect.getsourcelines(), but in color
def code_with_coverage(function: Callable, coverage: Coverage) -> None:
source_lines, starting_line_number = \
getsourcelines(function)
line_number = starting_line_number
for line in source_lines:
marker = '*' if (function, line_number) in coverage else ' '
print(f"{line_number:4} {marker} {line}", end='')
line_number += 1
code_with_coverage(remove_html_markup, c.coverage())
###Output
1 * [34mdef[39;49;00m [32mremove_html_markup[39;49;00m(s): [37m# type: ignore[39;49;00m
2 * tag = [34mFalse[39;49;00m
3 * quote = [34mFalse[39;49;00m
4 * out = [33m"[39;49;00m[33m"[39;49;00m
5
6 * [34mfor[39;49;00m c [35min[39;49;00m s:
7 * [34mif[39;49;00m c == [33m'[39;49;00m[33m<[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
8 tag = [34mTrue[39;49;00m
9 * [34melif[39;49;00m c == [33m'[39;49;00m[33m>[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
10 tag = [34mFalse[39;49;00m
11 * [34melif[39;49;00m c == [33m'[39;49;00m[33m"[39;49;00m[33m'[39;49;00m [35mor[39;49;00m c == [33m"[39;49;00m[33m'[39;49;00m[33m"[39;49;00m [35mand[39;49;00m tag:
12 quote = [35mnot[39;49;00m quote
13 * [34melif[39;49;00m [35mnot[39;49;00m tag:
14 * out = out + c
15
16 * [34mreturn[39;49;00m out
###Markdown
Remember that the input `s` was `"abc"`? In this listing, we can see which lines were covered and which lines were not. From the listing already, we can see that `s` has neither tags nor quotes. Such coverage computation plays a big role in _testing_, as one wants tests to cover as many different aspects of program execution (and notably code) as possible. But also during debugging, code coverage is essential: If some code was not even executed in the failing run, then any change to it will have no effect.
###Code
from bookutils import quiz
quiz('Let the input be `"<b>Don\'t do this!</b>"`. '
"Which of these lines are executed? Use the code to find out!",
[
"`tag = True`",
"`tag = False`",
"`quote = not quote`",
"`out = out + c`"
], "[ord(c) - ord('a') - 1 for c in 'cdf']")
###Output
_____no_output_____
###Markdown
To find the solution, try this out yourself:
###Code
with CoverageCollector() as c:
remove_html_markup("<b>Don't do this!</b>")
# code_with_coverage(remove_html_markup, c.coverage)
###Output
_____no_output_____
###Markdown
Computing DifferencesLet us get back to the idea that we want to _correlate_ events with passing and failing outcomes. For this, we need to examine events in both _passing_ and _failing_ runs, and determine their _differences_ – since it is these differences we want to associate with their respective outcome. A Base Class for Statistical DebuggingThe `StatisticalDebugger` base class takes a collector class (such as `CoverageCollector`). Its `collect()` method creates a new collector of that very class, which will be maintained by the debugger. As argument, `collect()` takes a string characterizing the outcome (such as `'PASS'` or `'FAIL'`). This is how one would use it:```pythondebugger = StatisticalDebugger()with debugger.collect('PASS'): some_passing_run()with debugger.collect('PASS'): another_passing_run()with debugger.collect('FAIL'): some_failing_run()``` Let us implement `StatisticalDebugger`. The base class gets a collector class as argument:
###Code
class StatisticalDebugger:
"""A class to collect events for multiple outcomes."""
def __init__(self, collector_class: Type = CoverageCollector, log: bool = False):
"""Constructor. Use instances of `collector_class` to collect events."""
self.collector_class = collector_class
self.collectors: Dict[str, List[Collector]] = {}
self.log = log
###Output
_____no_output_____
###Markdown
The `collect()` method creates (and stores) a collector for the given outcome, using the given outcome to characterize the run. Any additional arguments are passed to the collector.
###Code
class StatisticalDebugger(StatisticalDebugger):
def collect(self, outcome: str, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for the given outcome.
Additional args are passed to the collector."""
collector = self.collector_class(*args, **kwargs)
collector.add_items_to_ignore([self.__class__])
return self.add_collector(outcome, collector)
def add_collector(self, outcome: str, collector: Collector) -> Collector:
if outcome not in self.collectors:
self.collectors[outcome] = []
self.collectors[outcome].append(collector)
return collector
###Output
_____no_output_____
###Markdown
The `all_events()` method produces a union of all events observed. If an outcome is given, it produces a union of all events with that outcome:
###Code
class StatisticalDebugger(StatisticalDebugger):
def all_events(self, outcome: Optional[str] = None) -> Set[Any]:
"""Return a set of all events observed."""
all_events = set()
if outcome:
if outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
else:
for outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
return all_events
###Output
_____no_output_____
###Markdown
Here's a simple example of `StatisticalDebugger` in action:
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
The method `all_events()` returns all events collected:
###Code
s.all_events()
###Output
_____no_output_____
###Markdown
If given an outcome as argument, we obtain all events with the given outcome.
###Code
s.all_events('FAIL')
###Output
_____no_output_____
###Markdown
The attribute `collectors` maps outcomes to lists of collectors:
###Code
s.collectors
###Output
_____no_output_____
###Markdown
Here's the collector of the one (and first) passing run:
###Code
s.collectors['PASS'][0].id()
s.collectors['PASS'][0].events()
###Output
_____no_output_____
###Markdown
To better highlight the differences between the collected events, we introduce a method `event_table()` that prints out whether an event took place in a run. Excursion: Printing an Event Table
###Code
from IPython.display import Markdown
import html
class StatisticalDebugger(StatisticalDebugger):
def function(self) -> Optional[Callable]:
"""
Return the entry function from the events observed,
or None if ambiguous.
"""
names_seen = set()
functions = []
for outcome in self.collectors:
for collector in self.collectors[outcome]:
# We may have multiple copies of the function,
# but sharing the same name
func = collector.function()
if func.__name__ not in names_seen:
functions.append(func)
names_seen.add(func.__name__)
if len(functions) != 1:
return None # ambiguous
return functions[0]
def covered_functions(self) -> Set[Callable]:
"""Return a set of all functions observed."""
functions = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
functions |= collector.covered_functions()
return functions
def coverage(self) -> Coverage:
"""Return a set of all (functions, line_numbers) observed"""
coverage = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
coverage |= collector.coverage()
return coverage
def color(self, event: Any) -> Optional[str]:
"""
Return a color for the given event, or None.
To be overloaded in subclasses.
"""
return None
def tooltip(self, event: Any) -> Optional[str]:
"""
Return a tooltip string for the given event, or None.
To be overloaded in subclasses.
"""
return None
def event_str(self, event: Any) -> str:
"""Format the given event. To be overloaded in subclasses."""
if isinstance(event, str):
return event
if isinstance(event, tuple):
return ":".join(self.event_str(elem) for elem in event)
return str(event)
def event_table_text(self, *, args: bool = False, color: bool = False) -> str:
"""
Print out a table of events observed.
If `args` is True, use arguments as headers.
If `color` is True, use colors.
"""
sep = ' | '
all_events = self.all_events()
longest_event = max(len(f"{self.event_str(event)}")
for event in all_events)
out = ""
# Header
if args:
out += '| '
func = self.function()
if func:
out += '`' + func.__name__ + '`'
out += sep
for name in self.collectors:
for collector in self.collectors[name]:
out += '`' + collector.argstring() + '`' + sep
out += '\n'
else:
out += '| ' + ' ' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += name + sep
out += '\n'
out += '| ' + '-' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += '-' * len(name) + sep
out += '\n'
# Data
for event in sorted(all_events):
event_name = self.event_str(event).rjust(longest_event)
tooltip = self.tooltip(event)
if tooltip:
title = f' title="{tooltip}"'
else:
title = ''
if color:
color_name = self.color(event)
if color_name:
event_name = \
f'<samp style="background-color: {color_name}"{title}>' \
f'{html.escape(event_name)}' \
f'</samp>'
out += f"| {event_name}" + sep
for name in self.collectors:
for collector in self.collectors[name]:
out += ' ' * (len(name) - 1)
if event in collector.events():
out += "X"
else:
out += "-"
out += sep
out += '\n'
return out
def event_table(self, **_args: Any) -> Any:
"""Print out event table in Markdown format."""
return Markdown(self.event_table_text(**_args))
def __repr__(self) -> str:
return self.event_table_text()
def _repr_markdown_(self) -> str:
return self.event_table_text(args=True, color=True)
###Output
_____no_output_____
###Markdown
End of Excursion
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
s.event_table(args=True)
quiz("How many lines are executed in the failing run only?",
[
"One",
"Two",
"Three"
], 'len([12])')
###Output
_____no_output_____
###Markdown
Indeed, Line 12 executed in the failing run only would be a correlation to look for. Collecting Passing and Failing RunsWhile our `StatisticalDebugger` class allows arbitrary outcomes, we are typically only interested in two outcomes, namely _passing_ vs. _failing_ runs. We therefore introduce a specialized `DifferenceDebugger` class that provides customized methods to collect and access passing and failing runs.
###Code
class DifferenceDebugger(StatisticalDebugger):
"""A class to collect events for passing and failing outcomes."""
PASS = 'PASS'
FAIL = 'FAIL'
def collect_pass(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for passing runs."""
return self.collect(self.PASS, *args, **kwargs)
def collect_fail(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for failing runs."""
return self.collect(self.FAIL, *args, **kwargs)
def pass_collectors(self) -> List[Collector]:
return self.collectors[self.PASS]
def fail_collectors(self) -> List[Collector]:
return self.collectors[self.FAIL]
def all_fail_events(self) -> Set[Any]:
"""Return all events observed in failing runs."""
return self.all_events(self.FAIL)
def all_pass_events(self) -> Set[Any]:
"""Return all events observed in passing runs."""
return self.all_events(self.PASS)
def only_fail_events(self) -> Set[Any]:
"""Return all events observed only in failing runs."""
return self.all_fail_events() - self.all_pass_events()
def only_pass_events(self) -> Set[Any]:
"""Return all events observed only in passing runs."""
return self.all_pass_events() - self.all_fail_events()
###Output
_____no_output_____
###Markdown
We can use `DifferenceDebugger` just as a `StatisticalDebugger`:
###Code
# ignore
T1 = TypeVar('T1', bound='DifferenceDebugger')
def test_debugger_html_simple(debugger: T1) -> T1:
with debugger.collect_pass():
remove_html_markup('abc')
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
return debugger
###Output
_____no_output_____
###Markdown
However, since the outcome of tests may not always be predetermined, we provide a simpler interface for tests that can fail (= raise an exception) or pass (not raise an exception).
###Code
class DifferenceDebugger(DifferenceDebugger):
def __enter__(self) -> Any:
"""Enter a `with` block. Collect coverage and outcome;
classify as FAIL if the block raises an exception,
and PASS if it does not.
"""
self.collector = self.collector_class()
self.collector.add_items_to_ignore([self.__class__])
self.collector.__enter__()
return self
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
status = self.collector.__exit__(exc_tp, exc_value, exc_traceback)
if status is None:
pass
else:
return False # Internal error; re-raise exception
if exc_tp is None:
outcome = self.PASS
else:
outcome = self.FAIL
self.add_collector(outcome, self.collector)
return True # Ignore exception, if any
###Output
_____no_output_____
###Markdown
Using this interface, we can rewrite `test_debugger_html()`:
###Code
# ignore
T2 = TypeVar('T2', bound='DifferenceDebugger')
def test_debugger_html(debugger: T2) -> T2:
with debugger:
remove_html_markup('abc')
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # Mark test as failing
return debugger
test_debugger_html(DifferenceDebugger())
###Output
_____no_output_____
###Markdown
Analyzing EventsLet us now focus on _analyzing_ events collected. Since events come back as _sets_, we can compute _unions_ and _differences_ between these sets. For instance, we can compute which lines were executed in _any_ of the passing runs of `test_debugger_html()`, above:
###Code
debugger = test_debugger_html(DifferenceDebugger())
pass_1_events = debugger.pass_collectors()[0].events()
pass_2_events = debugger.pass_collectors()[1].events()
in_any_pass = pass_1_events | pass_2_events
in_any_pass
###Output
_____no_output_____
###Markdown
Likewise, we can determine which lines were _only_ executed in the failing run:
###Code
fail_events = debugger.fail_collectors()[0].events()
only_in_fail = fail_events - in_any_pass
only_in_fail
###Output
_____no_output_____
###Markdown
And we see that the "failing" run is characterized by processing quotes:
###Code
code_with_coverage(remove_html_markup, only_in_fail)
debugger = test_debugger_html(DifferenceDebugger())
debugger.all_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the failing run:
###Code
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the passing runs:
###Code
debugger.only_pass_events()
###Output
_____no_output_____
###Markdown
Again, having these lines individually is neat, but things become much more interesting if we can see the associated code lines just as well. That's what we will do in the next section. Visualizing DifferencesTo show correlations of line coverage in context, we introduce a number of _visualization_ techniques that _highlight_ code with different colors. Discrete SpectrumThe first idea is to use a _discrete_ spectrum of three colors:* _red_ for code executed in failing runs only* _green_ for code executed in passing runs only* _yellow_ for code executed in both passing and failing runs.Code that is not executed stays unhighlighted. We first introduce an abstract class `SpectrumDebugger` that provides the essential functions. `suspiciousness()` returns a value between 0 and 1 indicating the suspiciousness of the given event - or `None` if unknown.
###Code
class SpectrumDebugger(DifferenceDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value in the range [0, 1.0]
for the given event, or `None` if unknown.
To be overloaded in subclasses.
"""
return None
###Output
_____no_output_____
###Markdown
The `tooltip()` and `percentage()` methods convert the suspiciousness into a human-readable form.
###Code
class SpectrumDebugger(SpectrumDebugger):
def tooltip(self, event: Any) -> str:
"""
Return a tooltip for the given event (default: percentage).
To be overloaded in subclasses.
"""
return self.percentage(event)
def percentage(self, event: Any) -> str:
"""
Return the suspiciousness for the given event as percentage string.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is not None:
return str(int(suspiciousness * 100)).rjust(3) + '%'
else:
return ' ' * len('100%')
###Output
_____no_output_____
###Markdown
The `code()` method takes a function and shows each of its source code lines using the given spectrum, using HTML markup:
###Code
class SpectrumDebugger(SpectrumDebugger):
def code(self, functions: Optional[Set[Callable]] = None, *,
color: bool = False, suspiciousness: bool = False,
line_numbers: bool = True) -> str:
"""
Return a listing of `functions` (default: covered functions).
If `color` is True, render as HTML, using suspiciousness colors.
If `suspiciousness` is True, include suspiciousness values.
If `line_numbers` is True (default), include line numbers.
"""
if not functions:
functions = self.covered_functions()
out = ""
seen = set()
for function in functions:
source_lines, starting_line_number = \
inspect.getsourcelines(function)
if (function.__name__, starting_line_number) in seen:
continue
seen.add((function.__name__, starting_line_number))
if out:
out += '\n'
if color:
out += '<p/>'
line_number = starting_line_number
for line in source_lines:
if color:
line = html.escape(line)
if line.strip() == '':
line = ' '
location = (function.__name__, line_number)
location_suspiciousness = self.suspiciousness(location)
if location_suspiciousness is not None:
tooltip = f"Line {line_number}: {self.tooltip(location)}"
else:
tooltip = f"Line {line_number}: not executed"
if suspiciousness:
line = self.percentage(location) + ' ' + line
if line_numbers:
line = str(line_number).rjust(4) + ' ' + line
line_color = self.color(location)
if color and line_color:
line = f'''<pre style="background-color:{line_color}"
title="{tooltip}">{line.rstrip()}</pre>'''
elif color:
line = f'<pre title="{tooltip}">{line}</pre>'
else:
line = line.rstrip()
out += line + '\n'
line_number += 1
return out
###Output
_____no_output_____
###Markdown
We introduce a few helper methods to visualize the code with colors in various forms.
###Code
class SpectrumDebugger(SpectrumDebugger):
def _repr_html_(self) -> str:
"""When output in Jupyter, visualize as HTML"""
return self.code(color=True)
def __str__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
def __repr__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
###Output
_____no_output_____
###Markdown
So far, however, central methods like `suspiciousness()` or `color()` were abstract – that is, to be defined in subclasses. Our `DiscreteSpectrumDebugger` subclass provides concrete implementations for these, with `color()` returning one of the three colors depending on the line number:
###Code
class DiscreteSpectrumDebugger(SpectrumDebugger):
"""Visualize differences between executions using three discrete colors"""
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value [0, 1.0]
for the given event, or `None` if unknown.
"""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return 0.5
elif event in failing:
return 1.0
elif event in passing:
return 0.0
else:
return None
def color(self, event: Any) -> Optional[str]:
"""
Return a HTML color for the given event.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
if suspiciousness > 0.8:
return 'mistyrose'
if suspiciousness >= 0.5:
return 'lightyellow'
return 'honeydew'
def tooltip(self, event: Any) -> str:
"""Return a tooltip for the given event."""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return "in passing and failing runs"
elif event in failing:
return "only in failing runs"
elif event in passing:
return "only in passing runs"
else:
return "never"
###Output
_____no_output_____
###Markdown
This is how the `only_pass_events()` and `only_fail_events()` sets look like when visualized with code. The "culprit" line is well highlighted:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
debugger
###Output
_____no_output_____
###Markdown
We can clearly see that the failure is correlated with the presence of quotes in the input string (which is an important hint!). But does this also show us _immediately_ where the defect to be fixed is?
###Code
quiz("Does the line `quote = not quote` actually contain the defect?",
[
"Yes, it should be fixed",
"No, the defect is elsewhere"
], '164 * 2 % 326')
###Output
_____no_output_____
###Markdown
Indeed, it is the _governing condition_ that is wrong – that is, the condition that caused Line 12 to be executed in the first place. In order to fix a program, we have to find a location that1. _causes_ the failure (i.e., it can be changed to make the failure go away); and2. is a _defect_ (i.e., contains an error).In our example above, the highlighted code line is a _symptom_ for the error. To some extent, it is also a _cause_, since, say, commenting it out would also resolve the given failure, at the cost of causing other failures. However, the preceding condition also is a cause, as is the presence of quotes in the input.Only one of these also is a _defect_, though, and that is the preceding condition. Hence, while correlations can provide important hints, they do not necessarily locate defects. For those of us who may not have color HTML output ready, simply printing the debugger lists suspiciousness values as percentages.
###Code
print(debugger)
###Output
1 50% def remove_html_markup(s): # type: ignore
2 50% tag = False
3 50% quote = False
4 50% out = ""
5
6 50% for c in s:
7 50% if c == '<' and not quote:
8 0% tag = True
9 50% elif c == '>' and not quote:
10 0% tag = False
11 50% elif c == '"' or c == "'" and tag:
12 100% quote = not quote
13 50% elif not tag:
14 50% out = out + c
15
16 50% return out
###Markdown
Continuous SpectrumThe criterion that an event should _only_ occur in failing runs (and not in passing runs) can be too aggressive. In particular, if we have another run that executes the "culprit" lines, but does _not_ fail, our "only in fail" criterion will no longer be helpful. Here is an example. The input```htmltext```will trigger the "culprit" line```pythonquote = not quote```but actually produce an output where the tags are properly stripped:
###Code
remove_html_markup('<b color="blue">text</b>')
###Output
_____no_output_____
###Markdown
As a consequence, we no longer have lines that are being executed only in failing runs:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
with debugger.collect_pass():
remove_html_markup('<b link="blue"></b>')
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
In our spectrum output, the effect now is that the "culprit" line is as yellow as all others.
###Code
debugger
###Output
_____no_output_____
###Markdown
We therefore introduce a different method for highlighting lines, based on their _relative_ occurrence with respect to all runs: If a line has been _mostly_ executed in failing runs, its color should shift towards red; if a line has been _mostly_ executed in passing runs, its color should shift towards green. This _continuous spectrum_ has been introduced by the seminal _Tarantula_ tool \cite{Jones2002}. In Tarantula, the color _hue_ for each line is defined as follows: $$\textit{color hue}(\textit{line}) = \textit{low color(red)} + \frac{\%\textit{passed}(\textit{line})}{\%\textit{passed}(\textit{line}) + \%\textit{failed}(\textit{line})} \times \textit{color range}$$ Here, `%passed` and `%failed` denote the percentage at which a line has been executed in passing and failing runs, respectively. A hue of 0.0 stands for red, a hue of 1.0 stands for green, and a hue of 0.5 stands for equal fractions of red and green, yielding yellow. We can implement these measures right away as methods in a new `ContinuousSpectrumDebugger` class:
###Code
class ContinuousSpectrumDebugger(DiscreteSpectrumDebugger):
"""Visualize differences between executions using a color spectrum"""
def collectors_with_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that observed the given event.
"""
all_runs = self.collectors[category]
collectors_with_event = set(collector for collector in all_runs
if event in collector.events())
return collectors_with_event
def collectors_without_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that did not observe the given event.
"""
all_runs = self.collectors[category]
collectors_without_event = set(collector for collector in all_runs
if event not in collector.events())
return collectors_without_event
def event_fraction(self, event: Any, category: str) -> float:
if category not in self.collectors:
return 0.0
all_collectors = self.collectors[category]
collectors_with_event = self.collectors_with_event(event, category)
fraction = len(collectors_with_event) / len(all_collectors)
# print(f"%{category}({event}) = {fraction}")
return fraction
def passed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.PASS)
def failed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.FAIL)
def hue(self, event: Any) -> Optional[float]:
"""Return a color hue from 0.0 (red) to 1.0 (green)."""
passed = self.passed_fraction(event)
failed = self.failed_fraction(event)
if passed + failed > 0:
return passed / (passed + failed)
else:
return None
###Output
_____no_output_____
###Markdown
Having a continuous hue also implies a continuous suspiciousness and associated tooltips:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
hue = self.hue(event)
if hue is None:
return None
return 1 - hue
def tooltip(self, event: Any) -> str:
return self.percentage(event)
###Output
_____no_output_____
###Markdown
The hue for lines executed only in failing runs is (deep) red, as expected:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 12) 0.0
###Markdown
Likewise, the hue for lines executed in passing runs is (deep) green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 10) 1.0
('remove_html_markup', 8) 1.0
###Markdown
The Tarantula tool not only sets the hue for a line, but also uses _brightness_ as measure for support – that is, how often was the line executed at all. The brighter a line, the stronger the correlation with a passing or failing outcome. The brightness is defined as follows: $$\textit{brightness}(line) = \max(\%\textit{passed}(\textit{line}), \%\textit{failed}(\textit{line}))$$ and it is easily implemented, too:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def brightness(self, event: Any) -> float:
return max(self.passed_fraction(event), self.failed_fraction(event))
###Output
_____no_output_____
###Markdown
Our single "only in fail" line has a brightness of 1.0 (the maximum).
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.brightness(location))
###Output
('remove_html_markup', 12) 1.0
###Markdown
With this, we can now define a color for each line. To this end, we override the (previously discrete) `color()` method such that it returns a color specification giving hue and brightness. We use the HTML format `hsl(hue, saturation, lightness)` where the hue is given as a value between 0 and 360 (0 is red, 120 is green) and saturation and lightness are provided as percentages.
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def color(self, event: Any) -> Optional[str]:
hue = self.hue(event)
if hue is None:
return None
saturation = self.brightness(event)
# HSL color values are specified with:
# hsl(hue, saturation, lightness).
return f"hsl({hue * 120}, {saturation * 100}%, 80%)"
debugger = test_debugger_html(ContinuousSpectrumDebugger())
###Output
_____no_output_____
###Markdown
Lines executed only in failing runs are still shown in red:
###Code
for location in debugger.only_fail_events():
print(location, debugger.color(location))
###Output
('remove_html_markup', 12) hsl(0.0, 100.0%, 80%)
###Markdown
... whereas lines executed only in passing runs are still shown in green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.color(location))
debugger
###Output
_____no_output_____
###Markdown
What happens with our `quote = not quote` "culprit" line if it is executed in passing runs, too?
###Code
with debugger.collect_pass():
out = remove_html_markup('<b link="blue"></b>')
quiz('In which color will the `quote = not quote` "culprit" line '
'be shown after executing the above code?',
[
'<span style="background-color: hsl(120.0, 50.0%, 80%)">Green</span>',
'<span style="background-color: hsl(60.0, 100.0%, 80%)">Yellow</span>',
'<span style="background-color: hsl(30.0, 100.0%, 80%)">Orange</span>',
'<span style="background-color: hsl(0.0, 100.0%, 80%)">Red</span>'
], '999 // 333')
###Output
_____no_output_____
###Markdown
We see that it still is shown with an orange-red tint.
###Code
debugger
###Output
_____no_output_____
###Markdown
Here's another example, coming right from the Tarantula paper. The `middle()` function takes three numbers `x`, `y`, and `z`, and returns the one that is neither the minimum nor the maximum of the three:
###Code
def middle(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return y
else:
if x > y:
return y
elif x > z:
return x
return z
middle(1, 2, 3)
###Output
_____no_output_____
###Markdown
Unfortunately, `middle()` can fail:
###Code
middle(2, 1, 3)
###Output
_____no_output_____
###Markdown
Let is see whether we can find the bug with a few additional test cases:
###Code
# ignore
T3 = TypeVar('T3', bound='DifferenceDebugger')
def test_debugger_middle(debugger: T3) -> T3:
with debugger.collect_pass():
middle(3, 3, 5)
with debugger.collect_pass():
middle(1, 2, 3)
with debugger.collect_pass():
middle(3, 2, 1)
with debugger.collect_pass():
middle(5, 5, 5)
with debugger.collect_pass():
middle(5, 3, 4)
with debugger.collect_fail():
middle(2, 1, 3)
return debugger
###Output
_____no_output_____
###Markdown
Note that in order to collect data from multiple function invocations, you need to have a separate `with` clause for every invocation. The following will _not_ work correctly:```python with debugger.collect_pass(): middle(3, 3, 5) middle(1, 2, 3) ...```
###Code
debugger = test_debugger_middle(ContinuousSpectrumDebugger())
debugger.event_table(args=True)
###Output
_____no_output_____
###Markdown
Here comes the visualization. We see that the `return y` line is the culprit here – and actually also the one to be fixed.
###Code
debugger
quiz("Which of the above lines should be fixed?",
[
'<span style="background-color: hsl(45.0, 100%, 80%)">Line 3: `elif x < y`</span>',
'<span style="background-color: hsl(34.28571428571429, 100.0%, 80%)">Line 5: `elif x < z`</span>',
'<span style="background-color: hsl(20.000000000000004, 100.0%, 80%)">Line 6: `return y`</span>',
'<span style="background-color: hsl(120.0, 20.0%, 80%)">Line 9: `return y`</span>',
], r'len(" middle ".strip()[:3])')
###Output
_____no_output_____
###Markdown
Indeed, in the `middle()` example, the "reddest" line is also the one to be fixed. Here is the fixed version:
###Code
def middle_fixed(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return x
else:
if x > y:
return y
elif x > z:
return x
return z
middle_fixed(2, 1, 3)
###Output
_____no_output_____
###Markdown
Ranking Lines by SuspiciousnessIn a large program, there can be several locations (and events) that could be flagged as suspicious. It suffices that some large code block of say, 1,000 lines, is mostly executed in failing runs, and then all of this code block will be visualized in some shade of red. To further highlight the "most suspicious" events, one idea is to use a _ranking_ – that is, coming up with a list of events where those events most correlated with failures would be shown at the top. The programmer would then examine these events one by one and proceed down the list. We will show how this works for two "correlation" metrics – first the _Tarantula_ metric, as introduced above, and then the _Ochiai_ metric, which has shown to be one of the best "ranking" metrics. We introduce a base class `RankingDebugger` with an abstract method `suspiciousness()` to be overloaded in subclasses. The method `rank()` returns a list of all events observed, sorted by suspiciousness, highest first.
###Code
class RankingDebugger(DiscreteSpectrumDebugger):
"""Rank events by their suspiciousness"""
def rank(self) -> List[Any]:
"""Return a list of events, sorted by suspiciousness, highest first."""
def susp(event: Any) -> float:
suspiciousness = self.suspiciousness(event)
assert suspiciousness is not None
return suspiciousness
events = list(self.all_events())
events.sort(key=susp, reverse=True)
return events
def __repr__(self) -> str:
return repr(self.rank())
###Output
_____no_output_____
###Markdown
The Tarantula MetricWe can use the Tarantula metric to sort lines according to their suspiciousness. The "redder" a line (a hue of 0.0), the more suspicious it is. We can simply define $$\textit{suspiciousness}_\textit{tarantula}(\textit{event}) = 1 - \textit{color hue}(\textit{event})$$ where $\textit{color hue}$ is as defined above. This is exactly the `suspiciousness()` function as already implemented in our `ContinuousSpectrumDebugger`. We introduce the `TarantulaDebugger` class, inheriting visualization capabilities from the `ContinuousSpectrumDebugger` class as well as the suspiciousness features from the `RankingDebugger` class.
###Code
class TarantulaDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Tarantula metric for suspiciousness"""
pass
###Output
_____no_output_____
###Markdown
Let us list `remove_html_markup()` with highlighted lines again:
###Code
tarantula_html = test_debugger_html(TarantulaDebugger())
tarantula_html
###Output
_____no_output_____
###Markdown
Here's our ranking of lines, from most suspicious to least suspicious:
###Code
tarantula_html.rank()
tarantula_html.suspiciousness(tarantula_html.rank()[0])
###Output
_____no_output_____
###Markdown
We see that the first line in the list is indeed the most suspicious; the two "green" lines come at the very end. For the `middle()` function, we also obtain a ranking from "reddest" to "greenest".
###Code
tarantula_middle = test_debugger_middle(TarantulaDebugger())
tarantula_middle
tarantula_middle.rank()
tarantula_middle.suspiciousness(tarantula_middle.rank()[0])
###Output
_____no_output_____
###Markdown
The Ochiai MetricThe _Ochiai_ Metric \cite{Ochiai1957} first introduced in the biology domain \cite{daSilvaMeyer2004} and later applied for fault localization by Abreu et al. \cite{Abreu2009}, is defined as follows: $$\textit{suspiciousness}_\textit{ochiai} = \frac{\textit{failed}(\textit{event})}{\sqrt{\bigl(\textit{failed}(\textit{event}) + \textit{not-in-failed}(\textit{event})\bigr)\times\bigl(\textit{failed}(\textit{event}) + \textit{passed}(\textit{event})\bigr)}}$$ where* $\textit{failed}(\textit{event})$ is the number of times the event occurred in _failing_ runs* $\textit{not-in-failed}(\textit{event})$ is the number of times the event did _not_ occur in failing runs* $\textit{passed}(\textit{event})$ is the number of times the event occurred in _passing_ runs.We can easily implement this formula:
###Code
import math
class OchiaiDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Ochiai metric for suspiciousness"""
def suspiciousness(self, event: Any) -> Optional[float]:
failed = len(self.collectors_with_event(event, self.FAIL))
not_in_failed = len(self.collectors_without_event(event, self.FAIL))
passed = len(self.collectors_with_event(event, self.PASS))
try:
return failed / math.sqrt((failed + not_in_failed) * (failed + passed))
except ZeroDivisionError:
return None
def hue(self, event: Any) -> Optional[float]:
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
return 1 - suspiciousness
###Output
_____no_output_____
###Markdown
Applied on the `remove_html_markup()` function, the individual suspiciousness scores differ from Tarantula. However, we obtain a very similar visualization, and the same ranking.
###Code
ochiai_html = test_debugger_html(OchiaiDebugger())
ochiai_html
ochiai_html.rank()
ochiai_html.suspiciousness(ochiai_html.rank()[0])
###Output
_____no_output_____
###Markdown
The same observations also apply for the `middle()` function.
###Code
ochiai_middle = test_debugger_middle(OchiaiDebugger())
ochiai_middle
ochiai_middle.rank()
ochiai_middle.suspiciousness(ochiai_middle.rank()[0])
###Output
_____no_output_____
###Markdown
How Useful is Ranking?So, which metric is better? The standard method to evaluate such rankings is to determine a _ground truth_ – that is, the set of locations that eventually are fixed – and to check at which point in the ranking any such location occurs – the earlier, the better. In our `remove_html_markup()` and `middle()` examples, both the Tarantula and the Ochiai metric perform flawlessly, as the "culprit" line is always ranked at the top. However, this need not always be the case; the exact performance depends on the nature of the code and the observed runs. (Also, the question of whether there always is exactly one possible location where the program can be fixed is open for discussion.) You will be surprised that over time, _several dozen_ metrics have been proposed \cite{Wong2016}, each performing somewhat better or somewhat worse depending on which benchmark they were applied on. The two metrics discussed above each have their merits – the Tarantula metric was among the first such metrics, and the Ochiai metric is generally shown to be among the most effective ones \cite{Abreu2009}. While rankings can be easily _evaluated_, it is not necessarily clear whether and how much they serve programmers. As stated above, the assumption of rankings is that developers examine one potentially defective statement after another until they find the actually defective one. However, in a series of human studies with developers, Parnin and Orso \cite{Parnin2011} found that this assumption may not hold:> It is unclear whether developers can actually determine the faulty nature of a statement by simply looking at it, without any additional information (e.g., the state of the program when the statement was executed or the statements that were executed before or after that one).In their study, they found that rankings could help completing a task faster, but this effect was limited to experienced developers and simpler code. Artificially changing the rank of faulty statements had little to no effect, implying that developers would not strictly follow the ranked list of statements, but rather search through the code to understand it. At this point, a _visualization_ as in the Tarantula tool can be helpful to programmers as it _guides_ the search, but a _ranking_ that _defines_ where to search may be less useful. Having said that, ranking has its merits – notably as it comes to informing _automated_ debugging techniques. In the [chapter on program repair](Repairer.ipynb), we will see how ranked lists of potentially faulty statements tell automated repair techniques where to try to repair the program first. And once such a repair is successful, we have a very strong indication on where and how the program could be fixed! Using Large Test Suites In fault localization, the larger and the more thorough the test suite, the higher the precision. Let us try out what happens if we extend the `middle()` test suite with additional test cases. The function `middle_testcase()` returns a random input for `middle()`:
###Code
import random
def middle_testcase() -> Tuple[int, int, int]:
x = random.randrange(10)
y = random.randrange(10)
z = random.randrange(10)
return x, y, z
[middle_testcase() for i in range(5)]
###Output
_____no_output_____
###Markdown
The function `middle_test()` simply checks if `middle()` operates correctly – by placing `x`, `y`, and `z` in a list, sorting it, and checking the middle argument. If `middle()` fails, `middle_test()` raises an exception.
###Code
def middle_test(x: int, y: int, z: int) -> None:
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
middle_test(4, 5, 6)
from ExpectError import ExpectError
with ExpectError():
middle_test(2, 1, 3)
###Output
Traceback (most recent call last):
File "<ipython-input-1-ae2957225406>", line 2, in <module>
middle_test(2, 1, 3)
File "<ipython-input-1-e1407680b9f2>", line 3, in middle_test
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
The function `middle_passing_testcase()` searches and returns a triple `x`, `y`, `z` that causes `middle_test()` to pass.
###Code
def middle_passing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
return x, y, z
except AssertionError:
pass
(x, y, z) = middle_passing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(2, 6, 7) = 6
###Markdown
The function `middle_failing_testcase()` does the same; but its triple `x`, `y`, `z` causes `middle_test()` to fail.
###Code
def middle_failing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
except AssertionError:
return x, y, z
(x, y, z) = middle_failing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(5, 4, 6) = 4
###Markdown
With these, we can define two sets of test cases, each with 100 inputs.
###Code
MIDDLE_TESTS = 100
MIDDLE_PASSING_TESTCASES = [middle_passing_testcase()
for i in range(MIDDLE_TESTS)]
MIDDLE_FAILING_TESTCASES = [middle_failing_testcase()
for i in range(MIDDLE_TESTS)]
###Output
_____no_output_____
###Markdown
Let us run the `OchiaiDebugger` with these two test sets.
###Code
ochiai_middle = OchiaiDebugger()
for x, y, z in MIDDLE_PASSING_TESTCASES:
with ochiai_middle.collect_pass():
middle(x, y, z)
for x, y, z in MIDDLE_FAILING_TESTCASES:
with ochiai_middle.collect_fail():
middle(x, y, z)
ochiai_middle
###Output
_____no_output_____
###Markdown
We see that the "culprit" line is still the most likely to be fixed, but the two conditions leading to the error (`x < y` and `x < z`) are also listed as potentially faulty. That is because the error might also be fixed be changing these conditions – although this would result in a more complex fix. Other Events besides CoverageWe close this chapter with two directions for further thought. If you wondered why in the above code, we were mostly talking about `events` rather than lines covered, that is because our framework allows for tracking arbitrary events, not just coverage. In fact, any data item a collector can extract from the execution can be used for correlation analysis. (It may not be so easily visualized, though.) Here's an example. We define a `ValueCollector` class that collects pairs of (local) variables and their values during execution. Its `events()` method then returns the set of all these pairs.
###Code
class ValueCollector(Collector):
""""A class to collect local variables and their values."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.vars: Set[str] = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
local_vars = frame.f_locals
for var in local_vars:
value = local_vars[var]
self.vars.add(f"{var} = {repr(value)}")
def events(self) -> Set[str]:
"""A set of (variable, value) pairs observed"""
return self.vars
###Output
_____no_output_____
###Markdown
If we apply this collector on our set of HTML test cases, these are all the events that we obtain – essentially all variables and all values ever seen:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger(ValueCollector))
for event in debugger.all_events():
print(event)
###Output
quote = True
c = '"'
quote = False
s = '"abc"'
c = '<'
out = 'a'
c = 'a'
tag = False
s = 'abc'
c = 'c'
tag = True
out = 'ab'
c = 'b'
s = '<b>abc</b>'
c = '>'
out = 'abc'
out = ''
c = '/'
###Markdown
However, some of these events only occur in the failing run:
###Code
for event in debugger.only_fail_events():
print(event)
###Output
quote = True
s = '"abc"'
c = '"'
###Markdown
Some of these differences are spurious – the string `"abc"` (with quotes) only occurs in the failing run – but others, such as `quote` being True and `c` containing a single quote are actually relevant for explaining when the failure comes to be. We can even visualize the suspiciousness of the individual events, setting the (so far undiscussed) `color` flag for producing an event table:
###Code
debugger.event_table(color=True, args=True)
###Output
_____no_output_____
###Markdown
There are many ways one can continue from here.* Rather than checking for concrete values, one could check for more _abstract properties_, for instance – what is the sign of the value? What is the length of the string? * One could check for specifics of the _control flow_ – is the loop taken? How many times?* One could check for specifics of the _information flow_ – which values flow from one variable to another?There are lots of properties that all could be related to failures – and if we happen to check for the right one, we may obtain a much crisper definition of what causes the failure. We will come up with more ideas on properties to check as it comes to [mining specifications](SpecificationMining,ipynb). Training ClassifiersThe metrics we have discussed so far are pretty _generic_ – that is, they are fixed no matter how the actual event space is structured. The field of _machine learning_ has come up with techniques that learn _classifiers_ from a given set of data – classifiers that are trained from labeled data and then can predict labels for new data sets. In our case, the labels are test outcomes (PASS and FAIL), whereas the data would be features of the events observed. A classifier by itself is not immediately useful for debugging (although it could predict whether future inputs will fail or not). Some classifiers, however, have great _diagnostic_ quality; that is, they can _explain_ how their classification comes to be. [Decision trees](https://scikit-learn.org/stable/modules/tree.html) fall into this very category. A decision tree contains a number of _nodes_, each one associated with a predicate. Depending on whether the predicate is true or false, we follow the given "true" or "false" branch to end up in the next node, which again contains a predicate. Eventually, we end up in the outcome predicted by the tree. The neat thing is that the node predicates actually give important hints on the circumstances that are _most relevant_ for deciding the outcome. Let us illustrate this with an example. We build a class `ClassifyingDebugger` that trains a decision tree from the events collected. To this end, we need to set up our input data such that it can be fed into a classifier. We start with identifying our _samples_ (runs) and the respective _labels_ (outcomes). All values have to be encoded into numerical values.
###Code
class ClassifyingDebugger(DifferenceDebugger):
"""A debugger implementing a decision tree for events"""
PASS_VALUE = +1.0
FAIL_VALUE = -1.0
def samples(self) -> Dict[str, float]:
samples = {}
for collector in self.pass_collectors():
samples[collector.id()] = self.PASS_VALUE
for collector in debugger.fail_collectors():
samples[collector.id()] = self.FAIL_VALUE
return samples
debugger = test_debugger_html(ClassifyingDebugger())
debugger.samples()
###Output
_____no_output_____
###Markdown
Next, we identify the _features_, which in our case is the set of lines executed in each sample:
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def features(self) -> Dict[str, Any]:
features = {}
for collector in debugger.pass_collectors():
features[collector.id()] = collector.events()
for collector in debugger.fail_collectors():
features[collector.id()] = collector.events()
return features
debugger = test_debugger_html(ClassifyingDebugger())
debugger.features()
###Output
_____no_output_____
###Markdown
All our features have names, which must be strings.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def feature_names(self) -> List[str]:
return [repr(feature) for feature in self.all_events()]
debugger = test_debugger_html(ClassifyingDebugger())
debugger.feature_names()
###Output
_____no_output_____
###Markdown
Next, we define the _shape_ for an individual sample, which is a value of +1 or -1 for each feature seen (i.e., +1 if the line was covered, -1 if not).
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def shape(self, sample: str) -> List[float]:
x = []
features = self.features()
for f in self.all_events():
if f in features[sample]:
x += [+1.0]
else:
x += [-1.0]
return x
debugger = test_debugger_html(ClassifyingDebugger())
debugger.shape("remove_html_markup(s='abc')")
###Output
_____no_output_____
###Markdown
Our input X for the classifier now is a list of such shapes, one for each sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def X(self) -> List[List[float]]:
X = []
samples = self.samples()
for key in samples:
X += [self.shape(key)]
return X
debugger = test_debugger_html(ClassifyingDebugger())
debugger.X()
###Output
_____no_output_____
###Markdown
Our input Y for the classifier, in contrast, is the list of labels, again indexed by sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def Y(self) -> List[float]:
Y = []
samples = self.samples()
for key in samples:
Y += [samples[key]]
return Y
debugger = test_debugger_html(ClassifyingDebugger())
debugger.Y()
###Output
_____no_output_____
###Markdown
We now have all our data ready to be fit into a tree classifier. The method `classifier()` creates and returns the (tree) classifier for the observed runs.
###Code
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def classifier(self) -> DecisionTreeClassifier:
classifier = DecisionTreeClassifier()
classifier = classifier.fit(self.X(), self.Y())
return classifier
###Output
_____no_output_____
###Markdown
We define a special method to show classifiers:
###Code
import graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def show_classifier(self, classifier: DecisionTreeClassifier) -> Any:
dot_data = export_graphviz(classifier, out_file=None,
filled=False, rounded=True,
feature_names=self.feature_names(),
class_names=["FAIL", "PASS"],
label='none',
node_ids=False,
impurity=False,
proportion=True,
special_characters=True)
return graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
This is the tree we get for our `remove_html_markup()` tests. The top predicate is whether the "culprit" line was executed (-1 means no, +1 means yes). If not (-1), the outcome is PASS. Otherwise, the outcome is TRUE.
###Code
debugger = test_debugger_html(ClassifyingDebugger())
classifier = debugger.classifier()
debugger.show_classifier(classifier)
###Output
_____no_output_____
###Markdown
We can even use our classifier to predict the outcome of additional runs. If, for instance, we execute all lines except for, say, Line 7, 9, and 11, our tree classifier would predict failure – because the "culprit" line 12 is executed.
###Code
classifier.predict([[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1]])
###Output
_____no_output_____
###Markdown
Again, there are many ways to continue from here. Which events should we train the classifier from? How do classifiers compare in their performance and diagnostic quality? There are lots of possibilities left to explore, and we only begin to realize the potential for automated debugging. SynopsisThis chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use
###Code
debugger = TarantulaDebugger()
with debugger.collect_pass():
remove_html_markup("abc")
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:
###Code
debugger = TarantulaDebugger()
with debugger:
remove_html_markup("abc")
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # raise an exception
###Output
_____no_output_____
###Markdown
`with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.
###Code
debugger.event_table(args=True, color=True)
###Output
_____no_output_____
###Markdown
Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:
###Code
debugger
###Output
_____no_output_____
###Markdown
Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.
###Code
debugger.rank()
###Output
_____no_output_____
###Markdown
Classes and MethodsHere are all classes defined in this chapter:
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([TarantulaDebugger, OchiaiDebugger],
abstract_classes=[
StatisticalDebugger,
DifferenceDebugger,
RankingDebugger
],
public_methods=[
StatisticalDebugger.__init__,
StatisticalDebugger.all_events,
StatisticalDebugger.event_table,
StatisticalDebugger.function,
StatisticalDebugger.coverage,
StatisticalDebugger.covered_functions,
DifferenceDebugger.__enter__,
DifferenceDebugger.__exit__,
DifferenceDebugger.all_pass_events,
DifferenceDebugger.all_fail_events,
DifferenceDebugger.collect_pass,
DifferenceDebugger.collect_fail,
DifferenceDebugger.only_pass_events,
DifferenceDebugger.only_fail_events,
SpectrumDebugger.code,
SpectrumDebugger.__repr__,
SpectrumDebugger.__str__,
SpectrumDebugger._repr_html_,
ContinuousSpectrumDebugger.code,
ContinuousSpectrumDebugger.__repr__,
RankingDebugger.rank
],
project='debuggingbook')
# ignore
display_class_hierarchy([CoverageCollector, ValueCollector],
public_methods=[
Tracer.__init__,
Tracer.__enter__,
Tracer.__exit__,
Tracer.changed_vars, # type: ignore
Collector.__init__,
Collector.__repr__,
Collector.function,
Collector.args,
Collector.argstring,
Collector.exception,
Collector.id,
Collector.collect,
CoverageCollector.coverage,
CoverageCollector.covered_functions,
CoverageCollector.events,
ValueCollector.__init__,
ValueCollector.events
],
project='debuggingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* _Correlations_ between execution events and outcomes (pass/fail) can make important hints for debugging* Events occurring only (or mostly) during failing runs can be _highlighted_ and _ranked_ to guide the search* Important hints include whether the _execution of specific code locations_ correlates with failure Next StepsChapters that build on this one include* [how to determine invariants that correlate with failures](DynamicInvariants.ipynb)* [how to automatically repair programs](Repairer.ipynb) BackgroundThe seminal works on statistical debugging are two papers:* "Visualization of Test Information to Assist Fault Localization" \cite{Jones2002} by James Jones, Mary Jean Harrold, and John Stasko introducing Tarantula and its visualization. The paper won an ACM SIGSOFT 10-year impact award.* "Bug Isolation via Remote Program Sampling" \cite{Liblit2003} by Ben Liblit, Alex Aiken, Alice X. Zheng, and Michael I. Jordan, introducing the term "Statistical debugging". Liblit won the ACM Doctoral Dissertation Award for this work.The Ochiai metric for fault localization was introduced by \cite{Abreu2009}. The overview by Wong et al. \cite{Wong2016} gives a comprehensive overview on the field of statistical fault localization.The study by Parnin and Orso \cite{Parnin2011} is a must to understand the limitations of the technique. Exercises Exercise 1: A Postcondition for MiddleWhat would be a postcondition for `middle()`? How can you check it? **Solution.** A simple postcondition for `middle()` would be```pythonassert m == sorted([x, y, z])[1]```where `m` is the value returned by `middle()`. `sorted()` sorts the given list, and the index `[1]` returns, well, the middle element. (This might also be a much shorter, but possibly slightly more expensive implementation for `middle()`) Since `middle()` has several `return` statements, the easiest way to check the result is to create a wrapper around `middle()`:
###Code
def middle_checked(x, y, z): # type: ignore
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
return m
###Output
_____no_output_____
###Markdown
`middle_checked()` catches the error:
###Code
from ExpectError import ExpectError
with ExpectError():
m = middle_checked(2, 1, 3)
###Output
Traceback (most recent call last):
File "<ipython-input-1-3c03371d2614>", line 2, in <module>
m = middle_checked(2, 1, 3)
File "<ipython-input-1-7a70e9d5c211>", line 3, in middle_checked
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
Statistical DebuggingIn this chapter, we introduce _statistical debugging_ – the idea that specific events during execution could be _statistically correlated_ with failures. We start with coverage of individual lines and then proceed towards further execution features.
###Code
from bookutils import YouTubeVideo
YouTubeVideo("UNuso00zYiI")
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on tracing executions](Tracer.ipynb).
###Code
import bookutils
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from debuggingbook.StatisticalDebugger import ```and then make use of the following features.This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use```python>>> debugger = TarantulaDebugger()>>> with debugger.collect_pass():>>> remove_html_markup("abc")>>> with debugger.collect_pass():>>> remove_html_markup('abc')>>> with debugger.collect_fail():>>> remove_html_markup('"abc"')```Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:```python>>> debugger = TarantulaDebugger()>>> with debugger:>>> remove_html_markup("abc")>>> with debugger:>>> remove_html_markup('abc')>>> with debugger:>>> remove_html_markup('"abc"')>>> assert False raise an exception````with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.```python>>> debugger.event_table(args=True, color=True)```| `remove_html_markup` | `s='abc'` | `s='abc'` | `s='"abc"'` | | --------------------- | ---- | ---- | ---- | | remove_html_markup:1 | X | X | X | | remove_html_markup:2 | X | X | X | | remove_html_markup:3 | X | X | X | | remove_html_markup:4 | X | X | X | | remove_html_markup:6 | X | X | X | | remove_html_markup:7 | X | X | X | | remove_html_markup:8 | - | X | - | | remove_html_markup:9 | X | X | X | | remove_html_markup:10 | - | X | - | | remove_html_markup:11 | X | X | X | | remove_html_markup:12 | - | - | X | | remove_html_markup:13 | X | X | X | | remove_html_markup:14 | X | X | X | | remove_html_markup:16 | X | X | X | Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:```python>>> debugger```<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 1: 50%"> 1 def remove_html_markup(s): type: ignore<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 2: 50%"> 2 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 3: 50%"> 3 quote = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 4: 50%"> 4 out = "" 5 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 6: 50%"> 6 for c in s:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 7: 50%"> 7 if c == &x27;<&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 8: 0%"> 8 tag = True<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 9: 50%"> 9 elif c == &x27;>&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 10: 0%"> 10 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 11: 50%"> 11 elif c == &x27;"&x27; or c == "&x27;" and tag:<pre style="background-color:hsl(0.0, 100.0%, 80%)" title="Line 12: 100%"> 12 quote = not quote<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 13: 50%"> 13 elif not tag:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 14: 50%"> 14 out = out + c 15 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 16: 50%"> 16 return out Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.```python>>> debugger.rank()[('remove_html_markup', 12), ('remove_html_markup', 16), ('remove_html_markup', 2), ('remove_html_markup', 14), ('remove_html_markup', 11), ('remove_html_markup', 3), ('remove_html_markup', 6), ('remove_html_markup', 9), ('remove_html_markup', 1), ('remove_html_markup', 7), ('remove_html_markup', 4), ('remove_html_markup', 13), ('remove_html_markup', 8), ('remove_html_markup', 10)]``` Classes and MethodsHere are all classes defined in this chapter: IntroductionThe idea behind _statistical debugging_ is fairly simple. We have a program that sometimes passes and sometimes fails. This outcome can be _correlated_ with events that precede it – properties of the input, properties of the execution, properties of the program state. If we, for instance, can find that "the program always fails when Line 123 is executed, and it always passes when Line 123 is _not_ executed", then we have a strong correlation between Line 123 being executed and failure.Such _correlation_ does not necessarily mean _causation_. For this, we would have to prove that executing Line 123 _always_ leads to failure, and that _not_ executing it does not lead to (this) failure. Also, a correlation (or even a causation) does not mean that Line 123 contains the defect – for this, we would have to show that it actually is an error. Still, correlations make excellent hints as it comes to search for failure causes – in all generality, if you let your search be guided by _events that correlate with failures_, you are more likely to find _important hints on how the failure comes to be_. Collecting EventsHow can we determine events that correlate with failure? We start with a general mechanism to actually _collect_ events during execution. The abstract `Collector` class provides* a `collect()` method made for collecting events, called from the `traceit()` tracer; and* an `events()` method made for retrieving these events.Both of these are _abstract_ and will be defined further in subclasses.
###Code
from Tracer import Tracer
# ignore
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Set, List, TypeVar, Union
from types import FrameType, TracebackType
class Collector(Tracer):
"""A class to record events during execution."""
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collecting function. To be overridden in subclasses."""
pass
def events(self) -> Set:
"""Return a collection of events. To be overridden in subclasses."""
return set()
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
self.collect(frame, event, arg)
###Output
_____no_output_____
###Markdown
A `Collector` class is used like `Tracer`, using a `with` statement. Let us apply it on the buggy variant of `remove_html_markup()` from the [Introduction to Debugging](Intro_Debugging.ipynb):
###Code
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
with Collector() as c:
out = remove_html_markup('"abc"')
out
###Output
_____no_output_____
###Markdown
There's not much we can do with our collector, as the `collect()` and `events()` methods are yet empty. However, we can introduce an `id()` method which returns a string identifying the collector. This string is defined from the _first function call_ encountered.
###Code
Coverage = Set[Tuple[Callable, int]]
class Collector(Collector):
def __init__(self) -> None:
"""Constructor."""
self._function: Optional[Callable] = None
self._args: Optional[Dict[str, Any]] = None
self._argstring: Optional[str] = None
self._exception: Optional[Type] = None
self.items_to_ignore: List[Union[Type, Callable]] = [self.__class__]
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Tracing function.
Saves the first function and calls collect().
"""
for item in self.items_to_ignore:
if (isinstance(item, type) and 'self' in frame.f_locals and
isinstance(frame.f_locals['self'], item)):
# Ignore this class
return
if item.__name__ == frame.f_code.co_name:
# Ignore this function
return
if self._function is None and event == 'call':
# Save function
self._function = self.create_function(frame)
self._args = frame.f_locals.copy()
self._argstring = ", ".join([f"{var}={repr(self._args[var])}"
for var in self._args])
self.collect(frame, event, arg)
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collector function. To be overloaded in subclasses."""
pass
def id(self) -> str:
"""Return an identifier for the collector,
created from the first call"""
return f"{self.function().__name__}({self.argstring()})"
def function(self) -> Callable:
"""Return the function from the first call, as a function object"""
if not self._function:
raise ValueError("No call collected")
return self._function
def argstring(self) -> str:
"""
Return the list of arguments from the first call,
as a printable string
"""
if not self._argstring:
raise ValueError("No call collected")
return self._argstring
def args(self) -> Dict[str, Any]:
"""Return a dict of argument names and values from the first call"""
if not self._args:
raise ValueError("No call collected")
return self._args
def exception(self) -> Optional[Type]:
"""Return the exception class from the first call,
or None if no exception was raised."""
return self._exception
def __repr__(self) -> str:
"""Return a string representation of the collector"""
# We use the ID as default representation when printed
return self.id()
def covered_functions(self) -> Set[Callable]:
"""Set of covered functions. To be overloaded in subclasses."""
return set()
def coverage(self) -> Coverage:
"""
Return a set (function, lineno) with locations covered.
To be overloaded in subclasses.
"""
return set()
###Output
_____no_output_____
###Markdown
Here's how the collector works. We use a `with` clause to collect details on a function call:
###Code
with Collector() as c:
remove_html_markup('abc')
###Output
_____no_output_____
###Markdown
We can now retrieve details such as the function called...
###Code
c.function()
###Output
_____no_output_____
###Markdown
... or its arguments, as a name/value dictionary.
###Code
c.args()
###Output
_____no_output_____
###Markdown
The `id()` method returns a printable representation of the call:
###Code
c.id()
###Output
_____no_output_____
###Markdown
The `argstring()` method does the same for the argument string only.
###Code
c.argstring()
###Output
_____no_output_____
###Markdown
With this, we can collect the basic information to identify calls – such that we can later correlate their events with success or failure. Error Prevention While collecting, we'd like to avoid collecting events in the collection infrastructure. The `items_to_ignore` attribute takes care of this.
###Code
class Collector(Collector):
def add_items_to_ignore(self,
items_to_ignore: List[Union[Type, Callable]]) \
-> None:
"""
Define additional classes and functions to ignore during collection
(typically `Debugger` classes using these collectors).
"""
self.items_to_ignore += items_to_ignore
###Output
_____no_output_____
###Markdown
If we exit a block without having collected anything, that's likely an error.
###Code
class Collector(Collector):
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
ret = super().__exit__(exc_tp, exc_value, exc_traceback)
if not self._function:
if exc_tp:
return False # re-raise exception
else:
raise ValueError("No call collected")
return ret
###Output
_____no_output_____
###Markdown
Collecting CoverageSo far, our `Collector` class does not collect any events. Let us extend it such that it collects _coverage_ information – that is, the set of locations executed. To this end, we introduce a `CoverageCollector` subclass which saves the coverage in a set containing functions and line numbers.
###Code
from types import FrameType
from StackInspector import StackInspector
class CoverageCollector(Collector, StackInspector):
"""A class to record covered locations during execution."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self._coverage: Coverage = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Save coverage for an observed event.
"""
name = frame.f_code.co_name
function = self.search_func(name, frame)
if function is None:
function = self.create_function(frame)
location = (function, frame.f_lineno)
self._coverage.add(location)
###Output
_____no_output_____
###Markdown
We also override `events()` such that it returns the set of covered locations.
###Code
class CoverageCollector(CoverageCollector):
def events(self) -> Set[Tuple[str, int]]:
"""
Return the set of locations covered.
Each location comes as a pair (`function_name`, `lineno`).
"""
return {(func.__name__, lineno) for func, lineno in self._coverage}
###Output
_____no_output_____
###Markdown
The methods `coverage()` and `covered_functions()` allow precise access to the coverage obtained.
###Code
class CoverageCollector(CoverageCollector):
def covered_functions(self) -> Set[Callable]:
"""Return a set with all functions covered."""
return {func for func, lineno in self._coverage}
def coverage(self) -> Coverage:
"""Return a set (function, lineno) with all locations covered."""
return self._coverage
###Output
_____no_output_____
###Markdown
Here is how we can use `CoverageCollector` to determine the lines executed during a run of `remove_html_markup()`:
###Code
with CoverageCollector() as c:
remove_html_markup('abc')
c.events()
###Output
_____no_output_____
###Markdown
Sets of line numbers alone are not too revealing. They provide more insights if we actually list the code, highlighting these numbers:
###Code
import inspect
from bookutils import getsourcelines # like inspect.getsourcelines(), but in color
def code_with_coverage(function: Callable, coverage: Coverage) -> None:
source_lines, starting_line_number = \
getsourcelines(function)
line_number = starting_line_number
for line in source_lines:
marker = '*' if (function, line_number) in coverage else ' '
print(f"{line_number:4} {marker} {line}", end='')
line_number += 1
code_with_coverage(remove_html_markup, c.coverage())
###Output
1 * [34mdef[39;49;00m [32mremove_html_markup[39;49;00m(s): [37m# type: ignore[39;49;00m
2 * tag = [34mFalse[39;49;00m
3 * quote = [34mFalse[39;49;00m
4 * out = [33m"[39;49;00m[33m"[39;49;00m
5
6 * [34mfor[39;49;00m c [35min[39;49;00m s:
7 * [34mif[39;49;00m c == [33m'[39;49;00m[33m<[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
8 tag = [34mTrue[39;49;00m
9 * [34melif[39;49;00m c == [33m'[39;49;00m[33m>[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
10 tag = [34mFalse[39;49;00m
11 * [34melif[39;49;00m c == [33m'[39;49;00m[33m"[39;49;00m[33m'[39;49;00m [35mor[39;49;00m c == [33m"[39;49;00m[33m'[39;49;00m[33m"[39;49;00m [35mand[39;49;00m tag:
12 quote = [35mnot[39;49;00m quote
13 * [34melif[39;49;00m [35mnot[39;49;00m tag:
14 * out = out + c
15
16 * [34mreturn[39;49;00m out
###Markdown
Remember that the input `s` was `"abc"`? In this listing, we can see which lines were covered and which lines were not. From the listing already, we can see that `s` has neither tags nor quotes. Such coverage computation plays a big role in _testing_, as one wants tests to cover as many different aspects of program execution (and notably code) as possible. But also during debugging, code coverage is essential: If some code was not even executed in the failing run, then any change to it will have no effect.
###Code
from bookutils import quiz
quiz('Let the input be `"<b>Don\'t do this!</b>"`. '
"Which of these lines are executed? Use the code to find out!",
[
"`tag = True`",
"`tag = False`",
"`quote = not quote`",
"`out = out + c`"
], "[ord(c) - ord('a') - 1 for c in 'cdf']")
###Output
_____no_output_____
###Markdown
To find the solution, try this out yourself:
###Code
with CoverageCollector() as c:
remove_html_markup("<b>Don't do this!</b>")
# code_with_coverage(remove_html_markup, c.coverage)
###Output
_____no_output_____
###Markdown
Computing DifferencesLet us get back to the idea that we want to _correlate_ events with passing and failing outcomes. For this, we need to examine events in both _passing_ and _failing_ runs, and determine their _differences_ – since it is these differences we want to associate with their respective outcome. A Base Class for Statistical DebuggingThe `StatisticalDebugger` base class takes a collector class (such as `CoverageCollector`). Its `collect()` method creates a new collector of that very class, which will be maintained by the debugger. As argument, `collect()` takes a string characterizing the outcome (such as `'PASS'` or `'FAIL'`). This is how one would use it:```pythondebugger = StatisticalDebugger()with debugger.collect('PASS'): some_passing_run()with debugger.collect('PASS'): another_passing_run()with debugger.collect('FAIL'): some_failing_run()``` Let us implement `StatisticalDebugger`. The base class gets a collector class as argument:
###Code
class StatisticalDebugger:
"""A class to collect events for multiple outcomes."""
def __init__(self, collector_class: Type = CoverageCollector, log: bool = False):
"""Constructor. Use instances of `collector_class` to collect events."""
self.collector_class = collector_class
self.collectors: Dict[str, List[Collector]] = {}
self.log = log
###Output
_____no_output_____
###Markdown
The `collect()` method creates (and stores) a collector for the given outcome, using the given outcome to characterize the run. Any additional arguments are passed to the collector.
###Code
class StatisticalDebugger(StatisticalDebugger):
def collect(self, outcome: str, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for the given outcome.
Additional args are passed to the collector."""
collector = self.collector_class(*args, **kwargs)
collector.add_items_to_ignore([self.__class__])
return self.add_collector(outcome, collector)
def add_collector(self, outcome: str, collector: Collector) -> Collector:
if outcome not in self.collectors:
self.collectors[outcome] = []
self.collectors[outcome].append(collector)
return collector
###Output
_____no_output_____
###Markdown
The `all_events()` method produces a union of all events observed. If an outcome is given, it produces a union of all events with that outcome:
###Code
class StatisticalDebugger(StatisticalDebugger):
def all_events(self, outcome: Optional[str] = None) -> Set[Any]:
"""Return a set of all events observed."""
all_events = set()
if outcome:
if outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
else:
for outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
return all_events
###Output
_____no_output_____
###Markdown
Here's a simple example of `StatisticalDebugger` in action:
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
The method `all_events()` returns all events collected:
###Code
s.all_events()
###Output
_____no_output_____
###Markdown
If given an outcome as argument, we obtain all events with the given outcome.
###Code
s.all_events('FAIL')
###Output
_____no_output_____
###Markdown
The attribute `collectors` maps outcomes to lists of collectors:
###Code
s.collectors
###Output
_____no_output_____
###Markdown
Here's the collector of the one (and first) passing run:
###Code
s.collectors['PASS'][0].id()
s.collectors['PASS'][0].events()
###Output
_____no_output_____
###Markdown
To better highlight the differences between the collected events, we introduce a method `event_table()` that prints out whether an event took place in a run. Excursion: Printing an Event Table
###Code
from IPython.display import Markdown
import html
class StatisticalDebugger(StatisticalDebugger):
def function(self) -> Optional[Callable]:
"""
Return the entry function from the events observed,
or None if ambiguous.
"""
names_seen = set()
functions = []
for outcome in self.collectors:
for collector in self.collectors[outcome]:
# We may have multiple copies of the function,
# but sharing the same name
func = collector.function()
if func.__name__ not in names_seen:
functions.append(func)
names_seen.add(func.__name__)
if len(functions) != 1:
return None # ambiguous
return functions[0]
def covered_functions(self) -> Set[Callable]:
"""Return a set of all functions observed."""
functions = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
functions |= collector.covered_functions()
return functions
def coverage(self) -> Coverage:
"""Return a set of all (functions, line_numbers) observed"""
coverage = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
coverage |= collector.coverage()
return coverage
def color(self, event: Any) -> Optional[str]:
"""
Return a color for the given event, or None.
To be overloaded in subclasses.
"""
return None
def tooltip(self, event: Any) -> Optional[str]:
"""
Return a tooltip string for the given event, or None.
To be overloaded in subclasses.
"""
return None
def event_str(self, event: Any) -> str:
"""Format the given event. To be overloaded in subclasses."""
if isinstance(event, str):
return event
if isinstance(event, tuple):
return ":".join(self.event_str(elem) for elem in event)
return str(event)
def event_table_text(self, *, args: bool = False, color: bool = False) -> str:
"""
Print out a table of events observed.
If `args` is True, use arguments as headers.
If `color` is True, use colors.
"""
sep = ' | '
all_events = self.all_events()
longest_event = max(len(f"{self.event_str(event)}")
for event in all_events)
out = ""
# Header
if args:
out += '| '
func = self.function()
if func:
out += '`' + func.__name__ + '`'
out += sep
for name in self.collectors:
for collector in self.collectors[name]:
out += '`' + collector.argstring() + '`' + sep
out += '\n'
else:
out += '| ' + ' ' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += name + sep
out += '\n'
out += '| ' + '-' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += '-' * len(name) + sep
out += '\n'
# Data
for event in sorted(all_events):
event_name = self.event_str(event).rjust(longest_event)
tooltip = self.tooltip(event)
if tooltip:
title = f' title="{tooltip}"'
else:
title = ''
if color:
color_name = self.color(event)
if color_name:
event_name = \
f'<samp style="background-color: {color_name}"{title}>' \
f'{html.escape(event_name)}' \
f'</samp>'
out += f"| {event_name}" + sep
for name in self.collectors:
for collector in self.collectors[name]:
out += ' ' * (len(name) - 1)
if event in collector.events():
out += "X"
else:
out += "-"
out += sep
out += '\n'
return out
def event_table(self, **_args: Any) -> Any:
"""Print out event table in Markdown format."""
return Markdown(self.event_table_text(**_args))
def __repr__(self) -> str:
return self.event_table_text()
def _repr_markdown_(self) -> str:
return self.event_table_text(args=True, color=True)
###Output
_____no_output_____
###Markdown
End of Excursion
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
s.event_table(args=True)
quiz("How many lines are executed in the failing run only?",
[
"One",
"Two",
"Three"
], 'len([12])')
###Output
_____no_output_____
###Markdown
Indeed, Line 12 executed in the failing run only would be a correlation to look for. Collecting Passing and Failing RunsWhile our `StatisticalDebugger` class allows arbitrary outcomes, we are typically only interested in two outcomes, namely _passing_ vs. _failing_ runs. We therefore introduce a specialized `DifferenceDebugger` class that provides customized methods to collect and access passing and failing runs.
###Code
class DifferenceDebugger(StatisticalDebugger):
"""A class to collect events for passing and failing outcomes."""
PASS = 'PASS'
FAIL = 'FAIL'
def collect_pass(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for passing runs."""
return self.collect(self.PASS, *args, **kwargs)
def collect_fail(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for failing runs."""
return self.collect(self.FAIL, *args, **kwargs)
def pass_collectors(self) -> List[Collector]:
return self.collectors[self.PASS]
def fail_collectors(self) -> List[Collector]:
return self.collectors[self.FAIL]
def all_fail_events(self) -> Set[Any]:
"""Return all events observed in failing runs."""
return self.all_events(self.FAIL)
def all_pass_events(self) -> Set[Any]:
"""Return all events observed in passing runs."""
return self.all_events(self.PASS)
def only_fail_events(self) -> Set[Any]:
"""Return all events observed only in failing runs."""
return self.all_fail_events() - self.all_pass_events()
def only_pass_events(self) -> Set[Any]:
"""Return all events observed only in passing runs."""
return self.all_pass_events() - self.all_fail_events()
###Output
_____no_output_____
###Markdown
We can use `DifferenceDebugger` just as a `StatisticalDebugger`:
###Code
# ignore
T1 = TypeVar('T1', bound='DifferenceDebugger')
def test_debugger_html_simple(debugger: T1) -> T1:
with debugger.collect_pass():
remove_html_markup('abc')
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
return debugger
###Output
_____no_output_____
###Markdown
However, since the outcome of tests may not always be predetermined, we provide a simpler interface for tests that can fail (= raise an exception) or pass (not raise an exception).
###Code
class DifferenceDebugger(DifferenceDebugger):
def __enter__(self) -> Any:
"""Enter a `with` block. Collect coverage and outcome;
classify as FAIL if the block raises an exception,
and PASS if it does not.
"""
self.collector = self.collector_class()
self.collector.add_items_to_ignore([self.__class__])
self.collector.__enter__()
return self
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
status = self.collector.__exit__(exc_tp, exc_value, exc_traceback)
if status is None:
pass
else:
return False # Internal error; re-raise exception
if exc_tp is None:
outcome = self.PASS
else:
outcome = self.FAIL
self.add_collector(outcome, self.collector)
return True # Ignore exception, if any
###Output
_____no_output_____
###Markdown
Using this interface, we can rewrite `test_debugger_html()`:
###Code
# ignore
T2 = TypeVar('T2', bound='DifferenceDebugger')
def test_debugger_html(debugger: T2) -> T2:
with debugger:
remove_html_markup('abc')
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # Mark test as failing
return debugger
test_debugger_html(DifferenceDebugger())
###Output
_____no_output_____
###Markdown
Analyzing EventsLet us now focus on _analyzing_ events collected. Since events come back as _sets_, we can compute _unions_ and _differences_ between these sets. For instance, we can compute which lines were executed in _any_ of the passing runs of `test_debugger_html()`, above:
###Code
debugger = test_debugger_html(DifferenceDebugger())
pass_1_events = debugger.pass_collectors()[0].events()
pass_2_events = debugger.pass_collectors()[1].events()
in_any_pass = pass_1_events | pass_2_events
in_any_pass
###Output
_____no_output_____
###Markdown
Likewise, we can determine which lines were _only_ executed in the failing run:
###Code
fail_events = debugger.fail_collectors()[0].events()
only_in_fail = fail_events - in_any_pass
only_in_fail
###Output
_____no_output_____
###Markdown
And we see that the "failing" run is characterized by processing quotes:
###Code
code_with_coverage(remove_html_markup, only_in_fail)
debugger = test_debugger_html(DifferenceDebugger())
debugger.all_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the failing run:
###Code
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the passing runs:
###Code
debugger.only_pass_events()
###Output
_____no_output_____
###Markdown
Again, having these lines individually is neat, but things become much more interesting if we can see the associated code lines just as well. That's what we will do in the next section. Visualizing DifferencesTo show correlations of line coverage in context, we introduce a number of _visualization_ techniques that _highlight_ code with different colors. Discrete SpectrumThe first idea is to use a _discrete_ spectrum of three colors:* _red_ for code executed in failing runs only* _green_ for code executed in passing runs only* _yellow_ for code executed in both passing and failing runs.Code that is not executed stays unhighlighted. We first introduce an abstract class `SpectrumDebugger` that provides the essential functions. `suspiciousness()` returns a value between 0 and 1 indicating the suspiciousness of the given event - or `None` if unknown.
###Code
class SpectrumDebugger(DifferenceDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value in the range [0, 1.0]
for the given event, or `None` if unknown.
To be overloaded in subclasses.
"""
return None
###Output
_____no_output_____
###Markdown
The `tooltip()` and `percentage()` methods convert the suspiciousness into a human-readable form.
###Code
class SpectrumDebugger(SpectrumDebugger):
def tooltip(self, event: Any) -> str:
"""
Return a tooltip for the given event (default: percentage).
To be overloaded in subclasses.
"""
return self.percentage(event)
def percentage(self, event: Any) -> str:
"""
Return the suspiciousness for the given event as percentage string.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is not None:
return str(int(suspiciousness * 100)).rjust(3) + '%'
else:
return ' ' * len('100%')
###Output
_____no_output_____
###Markdown
The `code()` method takes a function and shows each of its source code lines using the given spectrum, using HTML markup:
###Code
class SpectrumDebugger(SpectrumDebugger):
def code(self, functions: Optional[Set[Callable]] = None, *,
color: bool = False, suspiciousness: bool = False,
line_numbers: bool = True) -> str:
"""
Return a listing of `functions` (default: covered functions).
If `color` is True, render as HTML, using suspiciousness colors.
If `suspiciousness` is True, include suspiciousness values.
If `line_numbers` is True (default), include line numbers.
"""
if not functions:
functions = self.covered_functions()
out = ""
seen = set()
for function in functions:
source_lines, starting_line_number = \
inspect.getsourcelines(function)
if (function.__name__, starting_line_number) in seen:
continue
seen.add((function.__name__, starting_line_number))
if out:
out += '\n'
if color:
out += '<p/>'
line_number = starting_line_number
for line in source_lines:
if color:
line = html.escape(line)
if line.strip() == '':
line = ' '
location = (function.__name__, line_number)
location_suspiciousness = self.suspiciousness(location)
if location_suspiciousness is not None:
tooltip = f"Line {line_number}: {self.tooltip(location)}"
else:
tooltip = f"Line {line_number}: not executed"
if suspiciousness:
line = self.percentage(location) + ' ' + line
if line_numbers:
line = str(line_number).rjust(4) + ' ' + line
line_color = self.color(location)
if color and line_color:
line = f'''<pre style="background-color:{line_color}"
title="{tooltip}">{line.rstrip()}</pre>'''
elif color:
line = f'<pre title="{tooltip}">{line}</pre>'
else:
line = line.rstrip()
out += line + '\n'
line_number += 1
return out
###Output
_____no_output_____
###Markdown
We introduce a few helper methods to visualize the code with colors in various forms.
###Code
class SpectrumDebugger(SpectrumDebugger):
def _repr_html_(self) -> str:
"""When output in Jupyter, visualize as HTML"""
return self.code(color=True)
def __str__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
def __repr__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
###Output
_____no_output_____
###Markdown
So far, however, central methods like `suspiciousness()` or `color()` were abstract – that is, to be defined in subclasses. Our `DiscreteSpectrumDebugger` subclass provides concrete implementations for these, with `color()` returning one of the three colors depending on the line number:
###Code
class DiscreteSpectrumDebugger(SpectrumDebugger):
"""Visualize differences between executions using three discrete colors"""
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value [0, 1.0]
for the given event, or `None` if unknown.
"""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return 0.5
elif event in failing:
return 1.0
elif event in passing:
return 0.0
else:
return None
def color(self, event: Any) -> Optional[str]:
"""
Return a HTML color for the given event.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
if suspiciousness > 0.8:
return 'mistyrose'
if suspiciousness >= 0.5:
return 'lightyellow'
return 'honeydew'
def tooltip(self, event: Any) -> str:
"""Return a tooltip for the given event."""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return "in passing and failing runs"
elif event in failing:
return "only in failing runs"
elif event in passing:
return "only in passing runs"
else:
return "never"
###Output
_____no_output_____
###Markdown
This is how the `only_pass_events()` and `only_fail_events()` sets look like when visualized with code. The "culprit" line is well highlighted:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
debugger
###Output
_____no_output_____
###Markdown
We can clearly see that the failure is correlated with the presence of quotes in the input string (which is an important hint!). But does this also show us _immediately_ where the defect to be fixed is?
###Code
quiz("Does the line `quote = not quote` actually contain the defect?",
[
"Yes, it should be fixed",
"No, the defect is elsewhere"
], '164 * 2 % 326')
###Output
_____no_output_____
###Markdown
Indeed, it is the _governing condition_ that is wrong – that is, the condition that caused Line 12 to be executed in the first place. In order to fix a program, we have to find a location that1. _causes_ the failure (i.e., it can be changed to make the failure go away); and2. is a _defect_ (i.e., contains an error).In our example above, the highlighted code line is a _symptom_ for the error. To some extent, it is also a _cause_, since, say, commenting it out would also resolve the given failure, at the cost of causing other failures. However, the preceding condition also is a cause, as is the presence of quotes in the input.Only one of these also is a _defect_, though, and that is the preceding condition. Hence, while correlations can provide important hints, they do not necessarily locate defects. For those of us who may not have color HTML output ready, simply printing the debugger lists suspiciousness values as percentages.
###Code
print(debugger)
###Output
1 50% def remove_html_markup(s): # type: ignore
2 50% tag = False
3 50% quote = False
4 50% out = ""
5
6 50% for c in s:
7 50% if c == '<' and not quote:
8 0% tag = True
9 50% elif c == '>' and not quote:
10 0% tag = False
11 50% elif c == '"' or c == "'" and tag:
12 100% quote = not quote
13 50% elif not tag:
14 50% out = out + c
15
16 50% return out
###Markdown
Continuous SpectrumThe criterion that an event should _only_ occur in failing runs (and not in passing runs) can be too aggressive. In particular, if we have another run that executes the "culprit" lines, but does _not_ fail, our "only in fail" criterion will no longer be helpful. Here is an example. The input```htmltext```will trigger the "culprit" line```pythonquote = not quote```but actually produce an output where the tags are properly stripped:
###Code
remove_html_markup('<b color="blue">text</b>')
###Output
_____no_output_____
###Markdown
As a consequence, we no longer have lines that are being executed only in failing runs:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
with debugger.collect_pass():
remove_html_markup('<b link="blue"></b>')
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
In our spectrum output, the effect now is that the "culprit" line is as yellow as all others.
###Code
debugger
###Output
_____no_output_____
###Markdown
We therefore introduce a different method for highlighting lines, based on their _relative_ occurrence with respect to all runs: If a line has been _mostly_ executed in failing runs, its color should shift towards red; if a line has been _mostly_ executed in passing runs, its color should shift towards green. This _continuous spectrum_ has been introduced by the seminal _Tarantula_ tool \cite{Jones2002}. In Tarantula, the color _hue_ for each line is defined as follows: $$\textit{color hue}(\textit{line}) = \textit{low color(red)} + \frac{\%\textit{passed}(\textit{line})}{\%\textit{passed}(\textit{line}) + \%\textit{failed}(\textit{line})} \times \textit{color range}$$ Here, `%passed` and `%failed` denote the percentage at which a line has been executed in passing and failing runs, respectively. A hue of 0.0 stands for red, a hue of 1.0 stands for green, and a hue of 0.5 stands for equal fractions of red and green, yielding yellow. We can implement these measures right away as methods in a new `ContinuousSpectrumDebugger` class:
###Code
class ContinuousSpectrumDebugger(DiscreteSpectrumDebugger):
"""Visualize differences between executions using a color spectrum"""
def collectors_with_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that observed the given event.
"""
all_runs = self.collectors[category]
collectors_with_event = set(collector for collector in all_runs
if event in collector.events())
return collectors_with_event
def collectors_without_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that did not observe the given event.
"""
all_runs = self.collectors[category]
collectors_without_event = set(collector for collector in all_runs
if event not in collector.events())
return collectors_without_event
def event_fraction(self, event: Any, category: str) -> float:
if category not in self.collectors:
return 0.0
all_collectors = self.collectors[category]
collectors_with_event = self.collectors_with_event(event, category)
fraction = len(collectors_with_event) / len(all_collectors)
# print(f"%{category}({event}) = {fraction}")
return fraction
def passed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.PASS)
def failed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.FAIL)
def hue(self, event: Any) -> Optional[float]:
"""Return a color hue from 0.0 (red) to 1.0 (green)."""
passed = self.passed_fraction(event)
failed = self.failed_fraction(event)
if passed + failed > 0:
return passed / (passed + failed)
else:
return None
###Output
_____no_output_____
###Markdown
Having a continuous hue also implies a continuous suspiciousness and associated tooltips:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
hue = self.hue(event)
if hue is None:
return None
return 1 - hue
def tooltip(self, event: Any) -> str:
return self.percentage(event)
###Output
_____no_output_____
###Markdown
The hue for lines executed only in failing runs is (deep) red, as expected:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 12) 0.0
###Markdown
Likewise, the hue for lines executed in passing runs is (deep) green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 8) 1.0
('remove_html_markup', 10) 1.0
###Markdown
The Tarantula tool not only sets the hue for a line, but also uses _brightness_ as measure for support – that is, how often was the line executed at all. The brighter a line, the stronger the correlation with a passing or failing outcome. The brightness is defined as follows: $$\textit{brightness}(line) = \max(\%\textit{passed}(\textit{line}), \%\textit{failed}(\textit{line}))$$ and it is easily implemented, too:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def brightness(self, event: Any) -> float:
return max(self.passed_fraction(event), self.failed_fraction(event))
###Output
_____no_output_____
###Markdown
Our single "only in fail" line has a brightness of 1.0 (the maximum).
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.brightness(location))
###Output
('remove_html_markup', 12) 1.0
###Markdown
With this, we can now define a color for each line. To this end, we override the (previously discrete) `color()` method such that it returns a color specification giving hue and brightness. We use the HTML format `hsl(hue, saturation, lightness)` where the hue is given as a value between 0 and 360 (0 is red, 120 is green) and saturation and lightness are provided as percentages.
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def color(self, event: Any) -> Optional[str]:
hue = self.hue(event)
if hue is None:
return None
saturation = self.brightness(event)
# HSL color values are specified with:
# hsl(hue, saturation, lightness).
return f"hsl({hue * 120}, {saturation * 100}%, 80%)"
debugger = test_debugger_html(ContinuousSpectrumDebugger())
###Output
_____no_output_____
###Markdown
Lines executed only in failing runs are still shown in red:
###Code
for location in debugger.only_fail_events():
print(location, debugger.color(location))
###Output
('remove_html_markup', 12) hsl(0.0, 100.0%, 80%)
###Markdown
... whereas lines executed only in passing runs are still shown in green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.color(location))
debugger
###Output
_____no_output_____
###Markdown
What happens with our `quote = not quote` "culprit" line if it is executed in passing runs, too?
###Code
with debugger.collect_pass():
out = remove_html_markup('<b link="blue"></b>')
quiz('In which color will the `quote = not quote` "culprit" line '
'be shown after executing the above code?',
[
'<span style="background-color: hsl(120.0, 50.0%, 80%)">Green</span>',
'<span style="background-color: hsl(60.0, 100.0%, 80%)">Yellow</span>',
'<span style="background-color: hsl(30.0, 100.0%, 80%)">Orange</span>',
'<span style="background-color: hsl(0.0, 100.0%, 80%)">Red</span>'
], '999 // 333')
###Output
_____no_output_____
###Markdown
We see that it still is shown with an orange-red tint.
###Code
debugger
###Output
_____no_output_____
###Markdown
Here's another example, coming right from the Tarantula paper. The `middle()` function takes three numbers `x`, `y`, and `z`, and returns the one that is neither the minimum nor the maximum of the three:
###Code
def middle(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return y
else:
if x > y:
return y
elif x > z:
return x
return z
middle(1, 2, 3)
###Output
_____no_output_____
###Markdown
Unfortunately, `middle()` can fail:
###Code
middle(2, 1, 3)
###Output
_____no_output_____
###Markdown
Let is see whether we can find the bug with a few additional test cases:
###Code
# ignore
T3 = TypeVar('T3', bound='DifferenceDebugger')
def test_debugger_middle(debugger: T3) -> T3:
with debugger.collect_pass():
middle(3, 3, 5)
with debugger.collect_pass():
middle(1, 2, 3)
with debugger.collect_pass():
middle(3, 2, 1)
with debugger.collect_pass():
middle(5, 5, 5)
with debugger.collect_pass():
middle(5, 3, 4)
with debugger.collect_fail():
middle(2, 1, 3)
return debugger
###Output
_____no_output_____
###Markdown
Note that in order to collect data from multiple function invocations, you need to have a separate `with` clause for every invocation. The following will _not_ work correctly:```python with debugger.collect_pass(): middle(3, 3, 5) middle(1, 2, 3) ...```
###Code
debugger = test_debugger_middle(ContinuousSpectrumDebugger())
debugger.event_table(args=True)
###Output
_____no_output_____
###Markdown
Here comes the visualization. We see that the `return y` line is the culprit here – and actually also the one to be fixed.
###Code
debugger
quiz("Which of the above lines should be fixed?",
[
'<span style="background-color: hsl(45.0, 100%, 80%)">Line 3: `if x < y`</span>',
'<span style="background-color: hsl(34.28571428571429, 100.0%, 80%)">Line 5: `elif x < z`</span>',
'<span style="background-color: hsl(20.000000000000004, 100.0%, 80%)">Line 6: `return y`</span>',
'<span style="background-color: hsl(120.0, 20.0%, 80%)">Line 9: `return y`</span>',
], r'len(" middle ".strip()[:3])')
###Output
_____no_output_____
###Markdown
Indeed, in the `middle()` example, the "reddest" line is also the one to be fixed. Here is the fixed version:
###Code
def middle_fixed(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return x
else:
if x > y:
return y
elif x > z:
return x
return z
middle_fixed(2, 1, 3)
###Output
_____no_output_____
###Markdown
Ranking Lines by SuspiciousnessIn a large program, there can be several locations (and events) that could be flagged as suspicious. It suffices that some large code block of say, 1,000 lines, is mostly executed in failing runs, and then all of this code block will be visualized in some shade of red. To further highlight the "most suspicious" events, one idea is to use a _ranking_ – that is, coming up with a list of events where those events most correlated with failures would be shown at the top. The programmer would then examine these events one by one and proceed down the list. We will show how this works for two "correlation" metrics – first the _Tarantula_ metric, as introduced above, and then the _Ochiai_ metric, which has shown to be one of the best "ranking" metrics. We introduce a base class `RankingDebugger` with an abstract method `suspiciousness()` to be overloaded in subclasses. The method `rank()` returns a list of all events observed, sorted by suspiciousness, highest first.
###Code
class RankingDebugger(DiscreteSpectrumDebugger):
"""Rank events by their suspiciousness"""
def rank(self) -> List[Any]:
"""Return a list of events, sorted by suspiciousness, highest first."""
def susp(event: Any) -> float:
suspiciousness = self.suspiciousness(event)
assert suspiciousness is not None
return suspiciousness
events = list(self.all_events())
events.sort(key=susp, reverse=True)
return events
def __repr__(self) -> str:
return repr(self.rank())
###Output
_____no_output_____
###Markdown
The Tarantula MetricWe can use the Tarantula metric to sort lines according to their suspiciousness. The "redder" a line (a hue of 0.0), the more suspicious it is. We can simply define $$\textit{suspiciousness}_\textit{tarantula}(\textit{event}) = 1 - \textit{color hue}(\textit{event})$$ where $\textit{color hue}$ is as defined above. This is exactly the `suspiciousness()` function as already implemented in our `ContinuousSpectrumDebugger`. We introduce the `TarantulaDebugger` class, inheriting visualization capabilities from the `ContinuousSpectrumDebugger` class as well as the suspiciousness features from the `RankingDebugger` class.
###Code
class TarantulaDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Tarantula metric for suspiciousness"""
pass
###Output
_____no_output_____
###Markdown
Let us list `remove_html_markup()` with highlighted lines again:
###Code
tarantula_html = test_debugger_html(TarantulaDebugger())
tarantula_html
###Output
_____no_output_____
###Markdown
Here's our ranking of lines, from most suspicious to least suspicious:
###Code
tarantula_html.rank()
tarantula_html.suspiciousness(tarantula_html.rank()[0])
###Output
_____no_output_____
###Markdown
We see that the first line in the list is indeed the most suspicious; the two "green" lines come at the very end. For the `middle()` function, we also obtain a ranking from "reddest" to "greenest".
###Code
tarantula_middle = test_debugger_middle(TarantulaDebugger())
tarantula_middle
tarantula_middle.rank()
tarantula_middle.suspiciousness(tarantula_middle.rank()[0])
###Output
_____no_output_____
###Markdown
The Ochiai MetricThe _Ochiai_ Metric \cite{Ochiai1957} first introduced in the biology domain \cite{daSilvaMeyer2004} and later applied for fault localization by Abreu et al. \cite{Abreu2009}, is defined as follows: $$\textit{suspiciousness}_\textit{ochiai} = \frac{\textit{failed}(\textit{event})}{\sqrt{\bigl(\textit{failed}(\textit{event}) + \textit{not-in-failed}(\textit{event})\bigr)\times\bigl(\textit{failed}(\textit{event}) + \textit{passed}(\textit{event})\bigr)}}$$ where* $\textit{failed}(\textit{event})$ is the number of times the event occurred in _failing_ runs* $\textit{not-in-failed}(\textit{event})$ is the number of times the event did _not_ occur in failing runs* $\textit{passed}(\textit{event})$ is the number of times the event occurred in _passing_ runs.We can easily implement this formula:
###Code
import math
class OchiaiDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Ochiai metric for suspiciousness"""
def suspiciousness(self, event: Any) -> Optional[float]:
failed = len(self.collectors_with_event(event, self.FAIL))
not_in_failed = len(self.collectors_without_event(event, self.FAIL))
passed = len(self.collectors_with_event(event, self.PASS))
try:
return failed / math.sqrt((failed + not_in_failed) * (failed + passed))
except ZeroDivisionError:
return None
def hue(self, event: Any) -> Optional[float]:
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
return 1 - suspiciousness
###Output
_____no_output_____
###Markdown
Applied on the `remove_html_markup()` function, the individual suspiciousness scores differ from Tarantula. However, we obtain a very similar visualization, and the same ranking.
###Code
ochiai_html = test_debugger_html(OchiaiDebugger())
ochiai_html
ochiai_html.rank()
ochiai_html.suspiciousness(ochiai_html.rank()[0])
###Output
_____no_output_____
###Markdown
The same observations also apply for the `middle()` function.
###Code
ochiai_middle = test_debugger_middle(OchiaiDebugger())
ochiai_middle
ochiai_middle.rank()
ochiai_middle.suspiciousness(ochiai_middle.rank()[0])
###Output
_____no_output_____
###Markdown
How Useful is Ranking?So, which metric is better? The standard method to evaluate such rankings is to determine a _ground truth_ – that is, the set of locations that eventually are fixed – and to check at which point in the ranking any such location occurs – the earlier, the better. In our `remove_html_markup()` and `middle()` examples, both the Tarantula and the Ochiai metric perform flawlessly, as the "culprit" line is always ranked at the top. However, this need not always be the case; the exact performance depends on the nature of the code and the observed runs. (Also, the question of whether there always is exactly one possible location where the program can be fixed is open for discussion.) You will be surprised that over time, _several dozen_ metrics have been proposed \cite{Wong2016}, each performing somewhat better or somewhat worse depending on which benchmark they were applied on. The two metrics discussed above each have their merits – the Tarantula metric was among the first such metrics, and the Ochiai metric is generally shown to be among the most effective ones \cite{Abreu2009}. While rankings can be easily _evaluated_, it is not necessarily clear whether and how much they serve programmers. As stated above, the assumption of rankings is that developers examine one potentially defective statement after another until they find the actually defective one. However, in a series of human studies with developers, Parnin and Orso \cite{Parnin2011} found that this assumption may not hold:> It is unclear whether developers can actually determine the faulty nature of a statement by simply looking at it, without any additional information (e.g., the state of the program when the statement was executed or the statements that were executed before or after that one).In their study, they found that rankings could help completing a task faster, but this effect was limited to experienced developers and simpler code. Artificially changing the rank of faulty statements had little to no effect, implying that developers would not strictly follow the ranked list of statements, but rather search through the code to understand it. At this point, a _visualization_ as in the Tarantula tool can be helpful to programmers as it _guides_ the search, but a _ranking_ that _defines_ where to search may be less useful. Having said that, ranking has its merits – notably as it comes to informing _automated_ debugging techniques. In the [chapter on program repair](Repairer.ipynb), we will see how ranked lists of potentially faulty statements tell automated repair techniques where to try to repair the program first. And once such a repair is successful, we have a very strong indication on where and how the program could be fixed! Using Large Test Suites In fault localization, the larger and the more thorough the test suite, the higher the precision. Let us try out what happens if we extend the `middle()` test suite with additional test cases. The function `middle_testcase()` returns a random input for `middle()`:
###Code
import random
def middle_testcase() -> Tuple[int, int, int]:
x = random.randrange(10)
y = random.randrange(10)
z = random.randrange(10)
return x, y, z
[middle_testcase() for i in range(5)]
###Output
_____no_output_____
###Markdown
The function `middle_test()` simply checks if `middle()` operates correctly – by placing `x`, `y`, and `z` in a list, sorting it, and checking the middle argument. If `middle()` fails, `middle_test()` raises an exception.
###Code
def middle_test(x: int, y: int, z: int) -> None:
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
middle_test(4, 5, 6)
from ExpectError import ExpectError
with ExpectError():
middle_test(2, 1, 3)
###Output
Traceback (most recent call last):
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_57283/3661663124.py", line 2, in <module>
middle_test(2, 1, 3)
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_57283/40742806.py", line 3, in middle_test
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
The function `middle_passing_testcase()` searches and returns a triple `x`, `y`, `z` that causes `middle_test()` to pass.
###Code
def middle_passing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
return x, y, z
except AssertionError:
pass
(x, y, z) = middle_passing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(1, 6, 1) = 1
###Markdown
The function `middle_failing_testcase()` does the same; but its triple `x`, `y`, `z` causes `middle_test()` to fail.
###Code
def middle_failing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
except AssertionError:
return x, y, z
(x, y, z) = middle_failing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(5, 2, 6) = 2
###Markdown
With these, we can define two sets of test cases, each with 100 inputs.
###Code
MIDDLE_TESTS = 100
MIDDLE_PASSING_TESTCASES = [middle_passing_testcase()
for i in range(MIDDLE_TESTS)]
MIDDLE_FAILING_TESTCASES = [middle_failing_testcase()
for i in range(MIDDLE_TESTS)]
###Output
_____no_output_____
###Markdown
Let us run the `OchiaiDebugger` with these two test sets.
###Code
ochiai_middle = OchiaiDebugger()
for x, y, z in MIDDLE_PASSING_TESTCASES:
with ochiai_middle.collect_pass():
middle(x, y, z)
for x, y, z in MIDDLE_FAILING_TESTCASES:
with ochiai_middle.collect_fail():
middle(x, y, z)
ochiai_middle
###Output
_____no_output_____
###Markdown
We see that the "culprit" line is still the most likely to be fixed, but the two conditions leading to the error (`x < y` and `x < z`) are also listed as potentially faulty. That is because the error might also be fixed be changing these conditions – although this would result in a more complex fix. Other Events besides CoverageWe close this chapter with two directions for further thought. If you wondered why in the above code, we were mostly talking about `events` rather than lines covered, that is because our framework allows for tracking arbitrary events, not just coverage. In fact, any data item a collector can extract from the execution can be used for correlation analysis. (It may not be so easily visualized, though.) Here's an example. We define a `ValueCollector` class that collects pairs of (local) variables and their values during execution. Its `events()` method then returns the set of all these pairs.
###Code
class ValueCollector(Collector):
""""A class to collect local variables and their values."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.vars: Set[str] = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
local_vars = frame.f_locals
for var in local_vars:
value = local_vars[var]
self.vars.add(f"{var} = {repr(value)}")
def events(self) -> Set[str]:
"""A set of (variable, value) pairs observed"""
return self.vars
###Output
_____no_output_____
###Markdown
If we apply this collector on our set of HTML test cases, these are all the events that we obtain – essentially all variables and all values ever seen:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger(ValueCollector))
for event in debugger.all_events():
print(event)
###Output
tag = True
c = '/'
s = 'abc'
s = '<b>abc</b>'
c = 'a'
quote = True
c = '"'
c = 'c'
s = '"abc"'
c = 'b'
tag = False
out = 'a'
c = '>'
out = 'ab'
out = 'abc'
c = '<'
quote = False
out = ''
###Markdown
However, some of these events only occur in the failing run:
###Code
for event in debugger.only_fail_events():
print(event)
###Output
c = '"'
s = '"abc"'
quote = True
###Markdown
Some of these differences are spurious – the string `"abc"` (with quotes) only occurs in the failing run – but others, such as `quote` being True and `c` containing a single quote are actually relevant for explaining when the failure comes to be. We can even visualize the suspiciousness of the individual events, setting the (so far undiscussed) `color` flag for producing an event table:
###Code
debugger.event_table(color=True, args=True)
###Output
_____no_output_____
###Markdown
There are many ways one can continue from here.* Rather than checking for concrete values, one could check for more _abstract properties_, for instance – what is the sign of the value? What is the length of the string? * One could check for specifics of the _control flow_ – is the loop taken? How many times?* One could check for specifics of the _information flow_ – which values flow from one variable to another?There are lots of properties that all could be related to failures – and if we happen to check for the right one, we may obtain a much crisper definition of what causes the failure. We will come up with more ideas on properties to check as it comes to [mining specifications](SpecificationMining,ipynb). Training ClassifiersThe metrics we have discussed so far are pretty _generic_ – that is, they are fixed no matter how the actual event space is structured. The field of _machine learning_ has come up with techniques that learn _classifiers_ from a given set of data – classifiers that are trained from labeled data and then can predict labels for new data sets. In our case, the labels are test outcomes (PASS and FAIL), whereas the data would be features of the events observed. A classifier by itself is not immediately useful for debugging (although it could predict whether future inputs will fail or not). Some classifiers, however, have great _diagnostic_ quality; that is, they can _explain_ how their classification comes to be. [Decision trees](https://scikit-learn.org/stable/modules/tree.html) fall into this very category. A decision tree contains a number of _nodes_, each one associated with a predicate. Depending on whether the predicate is true or false, we follow the given "true" or "false" branch to end up in the next node, which again contains a predicate. Eventually, we end up in the outcome predicted by the tree. The neat thing is that the node predicates actually give important hints on the circumstances that are _most relevant_ for deciding the outcome. Let us illustrate this with an example. We build a class `ClassifyingDebugger` that trains a decision tree from the events collected. To this end, we need to set up our input data such that it can be fed into a classifier. We start with identifying our _samples_ (runs) and the respective _labels_ (outcomes). All values have to be encoded into numerical values.
###Code
class ClassifyingDebugger(DifferenceDebugger):
"""A debugger implementing a decision tree for events"""
PASS_VALUE = +1.0
FAIL_VALUE = -1.0
def samples(self) -> Dict[str, float]:
samples = {}
for collector in self.pass_collectors():
samples[collector.id()] = self.PASS_VALUE
for collector in debugger.fail_collectors():
samples[collector.id()] = self.FAIL_VALUE
return samples
debugger = test_debugger_html(ClassifyingDebugger())
debugger.samples()
###Output
_____no_output_____
###Markdown
Next, we identify the _features_, which in our case is the set of lines executed in each sample:
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def features(self) -> Dict[str, Any]:
features = {}
for collector in debugger.pass_collectors():
features[collector.id()] = collector.events()
for collector in debugger.fail_collectors():
features[collector.id()] = collector.events()
return features
debugger = test_debugger_html(ClassifyingDebugger())
debugger.features()
###Output
_____no_output_____
###Markdown
All our features have names, which must be strings.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def feature_names(self) -> List[str]:
return [repr(feature) for feature in self.all_events()]
debugger = test_debugger_html(ClassifyingDebugger())
debugger.feature_names()
###Output
_____no_output_____
###Markdown
Next, we define the _shape_ for an individual sample, which is a value of +1 or -1 for each feature seen (i.e., +1 if the line was covered, -1 if not).
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def shape(self, sample: str) -> List[float]:
x = []
features = self.features()
for f in self.all_events():
if f in features[sample]:
x += [+1.0]
else:
x += [-1.0]
return x
debugger = test_debugger_html(ClassifyingDebugger())
debugger.shape("remove_html_markup(s='abc')")
###Output
_____no_output_____
###Markdown
Our input X for the classifier now is a list of such shapes, one for each sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def X(self) -> List[List[float]]:
X = []
samples = self.samples()
for key in samples:
X += [self.shape(key)]
return X
debugger = test_debugger_html(ClassifyingDebugger())
debugger.X()
###Output
_____no_output_____
###Markdown
Our input Y for the classifier, in contrast, is the list of labels, again indexed by sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def Y(self) -> List[float]:
Y = []
samples = self.samples()
for key in samples:
Y += [samples[key]]
return Y
debugger = test_debugger_html(ClassifyingDebugger())
debugger.Y()
###Output
_____no_output_____
###Markdown
We now have all our data ready to be fit into a tree classifier. The method `classifier()` creates and returns the (tree) classifier for the observed runs.
###Code
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def classifier(self) -> DecisionTreeClassifier:
classifier = DecisionTreeClassifier()
classifier = classifier.fit(self.X(), self.Y())
return classifier
###Output
_____no_output_____
###Markdown
We define a special method to show classifiers:
###Code
import graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def show_classifier(self, classifier: DecisionTreeClassifier) -> Any:
dot_data = export_graphviz(classifier, out_file=None,
filled=False, rounded=True,
feature_names=self.feature_names(),
class_names=["FAIL", "PASS"],
label='none',
node_ids=False,
impurity=False,
proportion=True,
special_characters=True)
return graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
This is the tree we get for our `remove_html_markup()` tests. The top predicate is whether the "culprit" line was executed (-1 means no, +1 means yes). If not (-1), the outcome is PASS. Otherwise, the outcome is TRUE.
###Code
debugger = test_debugger_html(ClassifyingDebugger())
classifier = debugger.classifier()
debugger.show_classifier(classifier)
###Output
_____no_output_____
###Markdown
We can even use our classifier to predict the outcome of additional runs. If, for instance, we execute all lines except for, say, Line 7, 9, and 11, our tree classifier would predict failure – because the "culprit" line 12 is executed.
###Code
classifier.predict([[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1]])
###Output
_____no_output_____
###Markdown
Again, there are many ways to continue from here. Which events should we train the classifier from? How do classifiers compare in their performance and diagnostic quality? There are lots of possibilities left to explore, and we only begin to realize the potential for automated debugging. SynopsisThis chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use
###Code
debugger = TarantulaDebugger()
with debugger.collect_pass():
remove_html_markup("abc")
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:
###Code
debugger = TarantulaDebugger()
with debugger:
remove_html_markup("abc")
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # raise an exception
###Output
_____no_output_____
###Markdown
`with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.
###Code
debugger.event_table(args=True, color=True)
###Output
_____no_output_____
###Markdown
Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:
###Code
debugger
###Output
_____no_output_____
###Markdown
Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.
###Code
debugger.rank()
###Output
_____no_output_____
###Markdown
Classes and MethodsHere are all classes defined in this chapter:
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([TarantulaDebugger, OchiaiDebugger],
abstract_classes=[
StatisticalDebugger,
DifferenceDebugger,
RankingDebugger
],
public_methods=[
StatisticalDebugger.__init__,
StatisticalDebugger.all_events,
StatisticalDebugger.event_table,
StatisticalDebugger.function,
StatisticalDebugger.coverage,
StatisticalDebugger.covered_functions,
DifferenceDebugger.__enter__,
DifferenceDebugger.__exit__,
DifferenceDebugger.all_pass_events,
DifferenceDebugger.all_fail_events,
DifferenceDebugger.collect_pass,
DifferenceDebugger.collect_fail,
DifferenceDebugger.only_pass_events,
DifferenceDebugger.only_fail_events,
SpectrumDebugger.code,
SpectrumDebugger.__repr__,
SpectrumDebugger.__str__,
SpectrumDebugger._repr_html_,
ContinuousSpectrumDebugger.code,
ContinuousSpectrumDebugger.__repr__,
RankingDebugger.rank
],
project='debuggingbook')
# ignore
display_class_hierarchy([CoverageCollector, ValueCollector],
public_methods=[
Tracer.__init__,
Tracer.__enter__,
Tracer.__exit__,
Tracer.changed_vars, # type: ignore
Collector.__init__,
Collector.__repr__,
Collector.function,
Collector.args,
Collector.argstring,
Collector.exception,
Collector.id,
Collector.collect,
CoverageCollector.coverage,
CoverageCollector.covered_functions,
CoverageCollector.events,
ValueCollector.__init__,
ValueCollector.events
],
project='debuggingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* _Correlations_ between execution events and outcomes (pass/fail) can make important hints for debugging* Events occurring only (or mostly) during failing runs can be _highlighted_ and _ranked_ to guide the search* Important hints include whether the _execution of specific code locations_ correlates with failure Next StepsChapters that build on this one include* [how to determine invariants that correlate with failures](DynamicInvariants.ipynb)* [how to automatically repair programs](Repairer.ipynb) BackgroundThe seminal works on statistical debugging are two papers:* "Visualization of Test Information to Assist Fault Localization" \cite{Jones2002} by James Jones, Mary Jean Harrold, and John Stasko introducing Tarantula and its visualization. The paper won an ACM SIGSOFT 10-year impact award.* "Bug Isolation via Remote Program Sampling" \cite{Liblit2003} by Ben Liblit, Alex Aiken, Alice X. Zheng, and Michael I. Jordan, introducing the term "Statistical debugging". Liblit won the ACM Doctoral Dissertation Award for this work.The Ochiai metric for fault localization was introduced by \cite{Abreu2009}. The overview by Wong et al. \cite{Wong2016} gives a comprehensive overview on the field of statistical fault localization.The study by Parnin and Orso \cite{Parnin2011} is a must to understand the limitations of the technique. Exercises Exercise 1: A Postcondition for MiddleWhat would be a postcondition for `middle()`? How can you check it? **Solution.** A simple postcondition for `middle()` would be```pythonassert m == sorted([x, y, z])[1]```where `m` is the value returned by `middle()`. `sorted()` sorts the given list, and the index `[1]` returns, well, the middle element. (This might also be a much shorter, but possibly slightly more expensive implementation for `middle()`) Since `middle()` has several `return` statements, the easiest way to check the result is to create a wrapper around `middle()`:
###Code
def middle_checked(x, y, z): # type: ignore
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
return m
###Output
_____no_output_____
###Markdown
`middle_checked()` catches the error:
###Code
from ExpectError import ExpectError
with ExpectError():
m = middle_checked(2, 1, 3)
###Output
Traceback (most recent call last):
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_57283/3016629944.py", line 2, in <module>
m = middle_checked(2, 1, 3)
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_57283/1374660292.py", line 3, in middle_checked
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
Statistical DebuggingIn this chapter, we introduce _statistical debugging_ – the idea that specific events during execution could be _statistically correlated_ with failures. We start with coverage of individual lines and then proceed towards further execution features.
###Code
from bookutils import YouTubeVideo
YouTubeVideo("UNuso00zYiI")
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on tracing executions](Tracer.ipynb).
###Code
import bookutils
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from debuggingbook.StatisticalDebugger import ```and then make use of the following features.This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use```python>>> debugger = TarantulaDebugger()>>> with debugger.collect_pass():>>> remove_html_markup("abc")>>> with debugger.collect_pass():>>> remove_html_markup('abc')>>> with debugger.collect_fail():>>> remove_html_markup('"abc"')```Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:```python>>> debugger = TarantulaDebugger()>>> with debugger:>>> remove_html_markup("abc")>>> with debugger:>>> remove_html_markup('abc')>>> with debugger:>>> remove_html_markup('"abc"')>>> assert False raise an exception````with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.```python>>> debugger.event_table(args=True, color=True)```| `remove_html_markup` | `s='abc'` | `s='abc'` | `s='"abc"'` | | --------------------- | ---- | ---- | ---- | | remove_html_markup:1 | X | X | X | | remove_html_markup:2 | X | X | X | | remove_html_markup:3 | X | X | X | | remove_html_markup:4 | X | X | X | | remove_html_markup:6 | X | X | X | | remove_html_markup:7 | X | X | X | | remove_html_markup:8 | - | X | - | | remove_html_markup:9 | X | X | X | | remove_html_markup:10 | - | X | - | | remove_html_markup:11 | X | X | X | | remove_html_markup:12 | - | - | X | | remove_html_markup:13 | X | X | X | | remove_html_markup:14 | X | X | X | | remove_html_markup:16 | X | X | X | Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:```python>>> debugger```<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 1: 50%"> 1 def remove_html_markup(s): type: ignore<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 2: 50%"> 2 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 3: 50%"> 3 quote = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 4: 50%"> 4 out = "" 5 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 6: 50%"> 6 for c in s:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 7: 50%"> 7 if c == &x27;<&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 8: 0%"> 8 tag = True<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 9: 50%"> 9 elif c == &x27;>&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 10: 0%"> 10 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 11: 50%"> 11 elif c == &x27;"&x27; or c == "&x27;" and tag:<pre style="background-color:hsl(0.0, 100.0%, 80%)" title="Line 12: 100%"> 12 quote = not quote<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 13: 50%"> 13 elif not tag:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 14: 50%"> 14 out = out + c 15 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 16: 50%"> 16 return out Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.```python>>> debugger.rank()[('remove_html_markup', 12), ('remove_html_markup', 4), ('remove_html_markup', 1), ('remove_html_markup', 7), ('remove_html_markup', 16), ('remove_html_markup', 13), ('remove_html_markup', 2), ('remove_html_markup', 11), ('remove_html_markup', 14), ('remove_html_markup', 3), ('remove_html_markup', 9), ('remove_html_markup', 6), ('remove_html_markup', 10), ('remove_html_markup', 8)]``` Classes and MethodsHere are all classes defined in this chapter: IntroductionThe idea behind _statistical debugging_ is fairly simple. We have a program that sometimes passes and sometimes fails. This outcome can be _correlated_ with events that precede it – properties of the input, properties of the execution, properties of the program state. If we, for instance, can find that "the program always fails when Line 123 is executed, and it always passes when Line 123 is _not_ executed", then we have a strong correlation between Line 123 being executed and failure.Such _correlation_ does not necessarily mean _causation_. For this, we would have to prove that executing Line 123 _always_ leads to failure, and that _not_ executing it does not lead to (this) failure. Also, a correlation (or even a causation) does not mean that Line 123 contains the defect – for this, we would have to show that it actually is an error. Still, correlations make excellent hints as it comes to search for failure causes – in all generality, if you let your search be guided by _events that correlate with failures_, you are more likely to find _important hints on how the failure comes to be_. Collecting EventsHow can we determine events that correlate with failure? We start with a general mechanism to actually _collect_ events during execution. The abstract `Collector` class provides* a `collect()` method made for collecting events, called from the `traceit()` tracer; and* an `events()` method made for retrieving these events.Both of these are _abstract_ and will be defined further in subclasses.
###Code
from Tracer import Tracer
# ignore
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Set, List, TypeVar, Union
from types import FrameType, TracebackType
class Collector(Tracer):
"""A class to record events during execution."""
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collecting function. To be overridden in subclasses."""
pass
def events(self) -> Set:
"""Return a collection of events. To be overridden in subclasses."""
return set()
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
self.collect(frame, event, arg)
###Output
_____no_output_____
###Markdown
A `Collector` class is used like `Tracer`, using a `with` statement. Let us apply it on the buggy variant of `remove_html_markup()` from the [Introduction to Debugging](Intro_Debugging.ipynb):
###Code
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
with Collector() as c:
out = remove_html_markup('"abc"')
out
###Output
_____no_output_____
###Markdown
There's not much we can do with our collector, as the `collect()` and `events()` methods are yet empty. However, we can introduce an `id()` method which returns a string identifying the collector. This string is defined from the _first function call_ encountered.
###Code
Coverage = Set[Tuple[Callable, int]]
class Collector(Collector):
def __init__(self) -> None:
"""Constructor."""
self._function: Optional[Callable] = None
self._args: Optional[Dict[str, Any]] = None
self._argstring: Optional[str] = None
self._exception: Optional[Type] = None
self.items_to_ignore: List[Union[Type, Callable]] = [self.__class__]
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Tracing function.
Saves the first function and calls collect().
"""
for item in self.items_to_ignore:
if (isinstance(item, type) and 'self' in frame.f_locals and
isinstance(frame.f_locals['self'], item)):
# Ignore this class
return
if item.__name__ == frame.f_code.co_name:
# Ignore this function
return
if self._function is None and event == 'call':
# Save function
self._function = self.create_function(frame)
self._args = frame.f_locals.copy()
self._argstring = ", ".join([f"{var}={repr(self._args[var])}"
for var in self._args])
self.collect(frame, event, arg)
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collector function. To be overloaded in subclasses."""
pass
def id(self) -> str:
"""Return an identifier for the collector,
created from the first call"""
return f"{self.function().__name__}({self.argstring()})"
def function(self) -> Callable:
"""Return the function from the first call, as a function object"""
if not self._function:
raise ValueError("No call collected")
return self._function
def argstring(self) -> str:
"""
Return the list of arguments from the first call,
as a printable string
"""
if not self._argstring:
raise ValueError("No call collected")
return self._argstring
def args(self) -> Dict[str, Any]:
"""Return a dict of argument names and values from the first call"""
if not self._args:
raise ValueError("No call collected")
return self._args
def exception(self) -> Optional[Type]:
"""Return the exception class from the first call,
or None if no exception was raised."""
return self._exception
def __repr__(self) -> str:
"""Return a string representation of the collector"""
# We use the ID as default representation when printed
return self.id()
def covered_functions(self) -> Set[Callable]:
"""Set of covered functions. To be overloaded in subclasses."""
return set()
def coverage(self) -> Coverage:
"""
Return a set (function, lineno) with locations covered.
To be overloaded in subclasses.
"""
return set()
###Output
_____no_output_____
###Markdown
Here's how the collector works. We use a `with` clause to collect details on a function call:
###Code
with Collector() as c:
remove_html_markup('abc')
###Output
_____no_output_____
###Markdown
We can now retrieve details such as the function called...
###Code
c.function()
###Output
_____no_output_____
###Markdown
... or its arguments, as a name/value dictionary.
###Code
c.args()
###Output
_____no_output_____
###Markdown
The `id()` method returns a printable representation of the call:
###Code
c.id()
###Output
_____no_output_____
###Markdown
The `argstring()` method does the same for the argument string only.
###Code
c.argstring()
###Output
_____no_output_____
###Markdown
With this, we can collect the basic information to identify calls – such that we can later correlate their events with success or failure. Error Prevention While collecting, we'd like to avoid collecting events in the collection infrastructure. The `items_to_ignore` attribute takes care of this.
###Code
class Collector(Collector):
def add_items_to_ignore(self,
items_to_ignore: List[Union[Type, Callable]]) \
-> None:
"""
Define additional classes and functions to ignore during collection
(typically `Debugger` classes using these collectors).
"""
self.items_to_ignore += items_to_ignore
###Output
_____no_output_____
###Markdown
If we exit a block without having collected anything, that's likely an error.
###Code
class Collector(Collector):
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
ret = super().__exit__(exc_tp, exc_value, exc_traceback)
if not self._function:
if exc_tp:
return False # re-raise exception
else:
raise ValueError("No call collected")
return ret
###Output
_____no_output_____
###Markdown
Collecting CoverageSo far, our `Collector` class does not collect any events. Let us extend it such that it collects _coverage_ information – that is, the set of locations executed. To this end, we introduce a `CoverageCollector` subclass which saves the coverage in a set containing functions and line numbers.
###Code
from types import FrameType
from StackInspector import StackInspector
class CoverageCollector(Collector, StackInspector):
"""A class to record covered locations during execution."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self._coverage: Coverage = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Save coverage for an observed event.
"""
name = frame.f_code.co_name
function = self.search_func(name, frame)
if function is None:
function = self.create_function(frame)
location = (function, frame.f_lineno)
self._coverage.add(location)
###Output
_____no_output_____
###Markdown
We also override `events()` such that it returns the set of covered locations.
###Code
class CoverageCollector(CoverageCollector):
def events(self) -> Set[Tuple[str, int]]:
"""
Return the set of locations covered.
Each location comes as a pair (`function_name`, `lineno`).
"""
return {(func.__name__, lineno) for func, lineno in self._coverage}
###Output
_____no_output_____
###Markdown
The methods `coverage()` and `covered_functions()` allow precise access to the coverage obtained.
###Code
class CoverageCollector(CoverageCollector):
def covered_functions(self) -> Set[Callable]:
"""Return a set with all functions covered."""
return {func for func, lineno in self._coverage}
def coverage(self) -> Coverage:
"""Return a set (function, lineno) with all locations covered."""
return self._coverage
###Output
_____no_output_____
###Markdown
Here is how we can use `CoverageCollector` to determine the lines executed during a run of `remove_html_markup()`:
###Code
with CoverageCollector() as c:
remove_html_markup('abc')
c.events()
###Output
_____no_output_____
###Markdown
Sets of line numbers alone are not too revealing. They provide more insights if we actually list the code, highlighting these numbers:
###Code
import inspect
from bookutils import getsourcelines # like inspect.getsourcelines(), but in color
def code_with_coverage(function: Callable, coverage: Coverage) -> None:
source_lines, starting_line_number = \
getsourcelines(function)
line_number = starting_line_number
for line in source_lines:
marker = '*' if (function, line_number) in coverage else ' '
print(f"{line_number:4} {marker} {line}", end='')
line_number += 1
code_with_coverage(remove_html_markup, c.coverage())
###Output
1 * [34mdef[39;49;00m [32mremove_html_markup[39;49;00m(s): [37m# type: ignore[39;49;00m
2 * tag = [34mFalse[39;49;00m
3 * quote = [34mFalse[39;49;00m
4 * out = [33m"[39;49;00m[33m"[39;49;00m
5
6 * [34mfor[39;49;00m c [35min[39;49;00m s:
7 * [34mif[39;49;00m c == [33m'[39;49;00m[33m<[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
8 tag = [34mTrue[39;49;00m
9 * [34melif[39;49;00m c == [33m'[39;49;00m[33m>[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
10 tag = [34mFalse[39;49;00m
11 * [34melif[39;49;00m c == [33m'[39;49;00m[33m"[39;49;00m[33m'[39;49;00m [35mor[39;49;00m c == [33m"[39;49;00m[33m'[39;49;00m[33m"[39;49;00m [35mand[39;49;00m tag:
12 quote = [35mnot[39;49;00m quote
13 * [34melif[39;49;00m [35mnot[39;49;00m tag:
14 * out = out + c
15
16 * [34mreturn[39;49;00m out
###Markdown
Remember that the input `s` was `"abc"`? In this listing, we can see which lines were covered and which lines were not. From the listing already, we can see that `s` has neither tags nor quotes. Such coverage computation plays a big role in _testing_, as one wants tests to cover as many different aspects of program execution (and notably code) as possible. But also during debugging, code coverage is essential: If some code was not even executed in the failing run, then any change to it will have no effect.
###Code
from bookutils import quiz
quiz('Let the input be `"<b>Don\'t do this!</b>"`. '
"Which of these lines are executed? Use the code to find out!",
[
"`tag = True`",
"`tag = False`",
"`quote = not quote`",
"`out = out + c`"
], "[ord(c) - ord('a') - 1 for c in 'cdf']")
###Output
_____no_output_____
###Markdown
To find the solution, try this out yourself:
###Code
with CoverageCollector() as c:
remove_html_markup("<b>Don't do this!</b>")
# code_with_coverage(remove_html_markup, c.coverage)
###Output
_____no_output_____
###Markdown
Computing DifferencesLet us get back to the idea that we want to _correlate_ events with passing and failing outcomes. For this, we need to examine events in both _passing_ and _failing_ runs, and determine their _differences_ – since it is these differences we want to associate with their respective outcome. A Base Class for Statistical DebuggingThe `StatisticalDebugger` base class takes a collector class (such as `CoverageCollector`). Its `collect()` method creates a new collector of that very class, which will be maintained by the debugger. As argument, `collect()` takes a string characterizing the outcome (such as `'PASS'` or `'FAIL'`). This is how one would use it:```pythondebugger = StatisticalDebugger()with debugger.collect('PASS'): some_passing_run()with debugger.collect('PASS'): another_passing_run()with debugger.collect('FAIL'): some_failing_run()``` Let us implement `StatisticalDebugger`. The base class gets a collector class as argument:
###Code
class StatisticalDebugger:
"""A class to collect events for multiple outcomes."""
def __init__(self, collector_class: Type = CoverageCollector, log: bool = False):
"""Constructor. Use instances of `collector_class` to collect events."""
self.collector_class = collector_class
self.collectors: Dict[str, List[Collector]] = {}
self.log = log
###Output
_____no_output_____
###Markdown
The `collect()` method creates (and stores) a collector for the given outcome, using the given outcome to characterize the run. Any additional arguments are passed to the collector.
###Code
class StatisticalDebugger(StatisticalDebugger):
def collect(self, outcome: str, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for the given outcome.
Additional args are passed to the collector."""
collector = self.collector_class(*args, **kwargs)
collector.add_items_to_ignore([self.__class__])
return self.add_collector(outcome, collector)
def add_collector(self, outcome: str, collector: Collector) -> Collector:
if outcome not in self.collectors:
self.collectors[outcome] = []
self.collectors[outcome].append(collector)
return collector
###Output
_____no_output_____
###Markdown
The `all_events()` method produces a union of all events observed. If an outcome is given, it produces a union of all events with that outcome:
###Code
class StatisticalDebugger(StatisticalDebugger):
def all_events(self, outcome: Optional[str] = None) -> Set[Any]:
"""Return a set of all events observed."""
all_events = set()
if outcome:
if outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
else:
for outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
return all_events
###Output
_____no_output_____
###Markdown
Here's a simple example of `StatisticalDebugger` in action:
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
The method `all_events()` returns all events collected:
###Code
s.all_events()
###Output
_____no_output_____
###Markdown
If given an outcome as argument, we obtain all events with the given outcome.
###Code
s.all_events('FAIL')
###Output
_____no_output_____
###Markdown
The attribute `collectors` maps outcomes to lists of collectors:
###Code
s.collectors
###Output
_____no_output_____
###Markdown
Here's the collector of the one (and first) passing run:
###Code
s.collectors['PASS'][0].id()
s.collectors['PASS'][0].events()
###Output
_____no_output_____
###Markdown
To better highlight the differences between the collected events, we introduce a method `event_table()` that prints out whether an event took place in a run. Excursion: Printing an Event Table
###Code
from IPython.display import Markdown
import html
class StatisticalDebugger(StatisticalDebugger):
def function(self) -> Optional[Callable]:
"""
Return the entry function from the events observed,
or None if ambiguous.
"""
names_seen = set()
functions = []
for outcome in self.collectors:
for collector in self.collectors[outcome]:
# We may have multiple copies of the function,
# but sharing the same name
func = collector.function()
if func.__name__ not in names_seen:
functions.append(func)
names_seen.add(func.__name__)
if len(functions) != 1:
return None # ambiguous
return functions[0]
def covered_functions(self) -> Set[Callable]:
"""Return a set of all functions observed."""
functions = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
functions |= collector.covered_functions()
return functions
def coverage(self) -> Coverage:
"""Return a set of all (functions, line_numbers) observed"""
coverage = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
coverage |= collector.coverage()
return coverage
def color(self, event: Any) -> Optional[str]:
"""
Return a color for the given event, or None.
To be overloaded in subclasses.
"""
return None
def tooltip(self, event: Any) -> Optional[str]:
"""
Return a tooltip string for the given event, or None.
To be overloaded in subclasses.
"""
return None
def event_str(self, event: Any) -> str:
"""Format the given event. To be overloaded in subclasses."""
if isinstance(event, str):
return event
if isinstance(event, tuple):
return ":".join(self.event_str(elem) for elem in event)
return str(event)
def event_table_text(self, *, args: bool = False, color: bool = False) -> str:
"""
Print out a table of events observed.
If `args` is True, use arguments as headers.
If `color` is True, use colors.
"""
sep = ' | '
all_events = self.all_events()
longest_event = max(len(f"{self.event_str(event)}")
for event in all_events)
out = ""
# Header
if args:
out += '| '
func = self.function()
if func:
out += '`' + func.__name__ + '`'
out += sep
for name in self.collectors:
for collector in self.collectors[name]:
out += '`' + collector.argstring() + '`' + sep
out += '\n'
else:
out += '| ' + ' ' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += name + sep
out += '\n'
out += '| ' + '-' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += '-' * len(name) + sep
out += '\n'
# Data
for event in sorted(all_events):
event_name = self.event_str(event).rjust(longest_event)
tooltip = self.tooltip(event)
if tooltip:
title = f' title="{tooltip}"'
else:
title = ''
if color:
color_name = self.color(event)
if color_name:
event_name = \
f'<samp style="background-color: {color_name}"{title}>' \
f'{html.escape(event_name)}' \
f'</samp>'
out += f"| {event_name}" + sep
for name in self.collectors:
for collector in self.collectors[name]:
out += ' ' * (len(name) - 1)
if event in collector.events():
out += "X"
else:
out += "-"
out += sep
out += '\n'
return out
def event_table(self, **_args: Any) -> Any:
"""Print out event table in Markdown format."""
return Markdown(self.event_table_text(**_args))
def __repr__(self) -> str:
return self.event_table_text()
def _repr_markdown_(self) -> str:
return self.event_table_text(args=True, color=True)
###Output
_____no_output_____
###Markdown
End of Excursion
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
s.event_table(args=True)
quiz("How many lines are executed in the failing run only?",
[
"One",
"Two",
"Three"
], 'len([12])')
###Output
_____no_output_____
###Markdown
Indeed, Line 12 executed in the failing run only would be a correlation to look for. Collecting Passing and Failing RunsWhile our `StatisticalDebugger` class allows arbitrary outcomes, we are typically only interested in two outcomes, namely _passing_ vs. _failing_ runs. We therefore introduce a specialized `DifferenceDebugger` class that provides customized methods to collect and access passing and failing runs.
###Code
class DifferenceDebugger(StatisticalDebugger):
"""A class to collect events for passing and failing outcomes."""
PASS = 'PASS'
FAIL = 'FAIL'
def collect_pass(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for passing runs."""
return self.collect(self.PASS, *args, **kwargs)
def collect_fail(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for failing runs."""
return self.collect(self.FAIL, *args, **kwargs)
def pass_collectors(self) -> List[Collector]:
return self.collectors[self.PASS]
def fail_collectors(self) -> List[Collector]:
return self.collectors[self.FAIL]
def all_fail_events(self) -> Set[Any]:
"""Return all events observed in failing runs."""
return self.all_events(self.FAIL)
def all_pass_events(self) -> Set[Any]:
"""Return all events observed in passing runs."""
return self.all_events(self.PASS)
def only_fail_events(self) -> Set[Any]:
"""Return all events observed only in failing runs."""
return self.all_fail_events() - self.all_pass_events()
def only_pass_events(self) -> Set[Any]:
"""Return all events observed only in passing runs."""
return self.all_pass_events() - self.all_fail_events()
###Output
_____no_output_____
###Markdown
We can use `DifferenceDebugger` just as a `StatisticalDebugger`:
###Code
# ignore
T1 = TypeVar('T1', bound='DifferenceDebugger')
def test_debugger_html_simple(debugger: T1) -> T1:
with debugger.collect_pass():
remove_html_markup('abc')
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
return debugger
###Output
_____no_output_____
###Markdown
However, since the outcome of tests may not always be predetermined, we provide a simpler interface for tests that can fail (= raise an exception) or pass (not raise an exception).
###Code
class DifferenceDebugger(DifferenceDebugger):
def __enter__(self) -> Any:
"""Enter a `with` block. Collect coverage and outcome;
classify as FAIL if the block raises an exception,
and PASS if it does not.
"""
self.collector = self.collector_class()
self.collector.add_items_to_ignore([self.__class__])
self.collector.__enter__()
return self
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
status = self.collector.__exit__(exc_tp, exc_value, exc_traceback)
if status is None:
pass
else:
return False # Internal error; re-raise exception
if exc_tp is None:
outcome = self.PASS
else:
outcome = self.FAIL
self.add_collector(outcome, self.collector)
return True # Ignore exception, if any
###Output
_____no_output_____
###Markdown
Using this interface, we can rewrite `test_debugger_html()`:
###Code
# ignore
T2 = TypeVar('T2', bound='DifferenceDebugger')
def test_debugger_html(debugger: T2) -> T2:
with debugger:
remove_html_markup('abc')
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # Mark test as failing
return debugger
test_debugger_html(DifferenceDebugger())
###Output
_____no_output_____
###Markdown
Analyzing EventsLet us now focus on _analyzing_ events collected. Since events come back as _sets_, we can compute _unions_ and _differences_ between these sets. For instance, we can compute which lines were executed in _any_ of the passing runs of `test_debugger_html()`, above:
###Code
debugger = test_debugger_html(DifferenceDebugger())
pass_1_events = debugger.pass_collectors()[0].events()
pass_2_events = debugger.pass_collectors()[1].events()
in_any_pass = pass_1_events | pass_2_events
in_any_pass
###Output
_____no_output_____
###Markdown
Likewise, we can determine which lines were _only_ executed in the failing run:
###Code
fail_events = debugger.fail_collectors()[0].events()
only_in_fail = fail_events - in_any_pass
only_in_fail
###Output
_____no_output_____
###Markdown
And we see that the "failing" run is characterized by processing quotes:
###Code
code_with_coverage(remove_html_markup, only_in_fail)
debugger = test_debugger_html(DifferenceDebugger())
debugger.all_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the failing run:
###Code
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the passing runs:
###Code
debugger.only_pass_events()
###Output
_____no_output_____
###Markdown
Again, having these lines individually is neat, but things become much more interesting if we can see the associated code lines just as well. That's what we will do in the next section. Visualizing DifferencesTo show correlations of line coverage in context, we introduce a number of _visualization_ techniques that _highlight_ code with different colors. Discrete SpectrumThe first idea is to use a _discrete_ spectrum of three colors:* _red_ for code executed in failing runs only* _green_ for code executed in passing runs only* _yellow_ for code executed in both passing and failing runs.Code that is not executed stays unhighlighted. We first introduce an abstract class `SpectrumDebugger` that provides the essential functions. `suspiciousness()` returns a value between 0 and 1 indicating the suspiciousness of the given event - or `None` if unknown.
###Code
class SpectrumDebugger(DifferenceDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value in the range [0, 1.0]
for the given event, or `None` if unknown.
To be overloaded in subclasses.
"""
return None
###Output
_____no_output_____
###Markdown
The `tooltip()` and `percentage()` methods convert the suspiciousness into a human-readable form.
###Code
class SpectrumDebugger(SpectrumDebugger):
def tooltip(self, event: Any) -> str:
"""
Return a tooltip for the given event (default: percentage).
To be overloaded in subclasses.
"""
return self.percentage(event)
def percentage(self, event: Any) -> str:
"""
Return the suspiciousness for the given event as percentage string.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is not None:
return str(int(suspiciousness * 100)).rjust(3) + '%'
else:
return ' ' * len('100%')
###Output
_____no_output_____
###Markdown
The `code()` method takes a function and shows each of its source code lines using the given spectrum, using HTML markup:
###Code
class SpectrumDebugger(SpectrumDebugger):
def code(self, functions: Optional[Set[Callable]] = None, *,
color: bool = False, suspiciousness: bool = False,
line_numbers: bool = True) -> str:
"""
Return a listing of `functions` (default: covered functions).
If `color` is True, render as HTML, using suspiciousness colors.
If `suspiciousness` is True, include suspiciousness values.
If `line_numbers` is True (default), include line numbers.
"""
if not functions:
functions = self.covered_functions()
out = ""
seen = set()
for function in functions:
source_lines, starting_line_number = \
inspect.getsourcelines(function)
if (function.__name__, starting_line_number) in seen:
continue
seen.add((function.__name__, starting_line_number))
if out:
out += '\n'
if color:
out += '<p/>'
line_number = starting_line_number
for line in source_lines:
if color:
line = html.escape(line)
if line.strip() == '':
line = ' '
location = (function.__name__, line_number)
location_suspiciousness = self.suspiciousness(location)
if location_suspiciousness is not None:
tooltip = f"Line {line_number}: {self.tooltip(location)}"
else:
tooltip = f"Line {line_number}: not executed"
if suspiciousness:
line = self.percentage(location) + ' ' + line
if line_numbers:
line = str(line_number).rjust(4) + ' ' + line
line_color = self.color(location)
if color and line_color:
line = f'''<pre style="background-color:{line_color}"
title="{tooltip}">{line.rstrip()}</pre>'''
elif color:
line = f'<pre title="{tooltip}">{line}</pre>'
else:
line = line.rstrip()
out += line + '\n'
line_number += 1
return out
###Output
_____no_output_____
###Markdown
We introduce a few helper methods to visualize the code with colors in various forms.
###Code
class SpectrumDebugger(SpectrumDebugger):
def _repr_html_(self) -> str:
"""When output in Jupyter, visualize as HTML"""
return self.code(color=True)
def __str__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
def __repr__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
###Output
_____no_output_____
###Markdown
So far, however, central methods like `suspiciousness()` or `color()` were abstract – that is, to be defined in subclasses. Our `DiscreteSpectrumDebugger` subclass provides concrete implementations for these, with `color()` returning one of the three colors depending on the line number:
###Code
class DiscreteSpectrumDebugger(SpectrumDebugger):
"""Visualize differences between executions using three discrete colors"""
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value [0, 1.0]
for the given event, or `None` if unknown.
"""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return 0.5
elif event in failing:
return 1.0
elif event in passing:
return 0.0
else:
return None
def color(self, event: Any) -> Optional[str]:
"""
Return a HTML color for the given event.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
if suspiciousness > 0.8:
return 'mistyrose'
if suspiciousness >= 0.5:
return 'lightyellow'
return 'honeydew'
def tooltip(self, event: Any) -> str:
"""Return a tooltip for the given event."""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return "in passing and failing runs"
elif event in failing:
return "only in failing runs"
elif event in passing:
return "only in passing runs"
else:
return "never"
###Output
_____no_output_____
###Markdown
This is how the `only_pass_events()` and `only_fail_events()` sets look like when visualized with code. The "culprit" line is well highlighted:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
debugger
###Output
_____no_output_____
###Markdown
We can clearly see that the failure is correlated with the presence of quotes in the input string (which is an important hint!). But does this also show us _immediately_ where the defect to be fixed is?
###Code
quiz("Does the line `quote = not quote` actually contain the defect?",
[
"Yes, it should be fixed",
"No, the defect is elsewhere"
], '164 * 2 % 326')
###Output
_____no_output_____
###Markdown
Indeed, it is the _governing condition_ that is wrong – that is, the condition that caused Line 12 to be executed in the first place. In order to fix a program, we have to find a location that1. _causes_ the failure (i.e., it can be changed to make the failure go away); and2. is a _defect_ (i.e., contains an error).In our example above, the highlighted code line is a _symptom_ for the error. To some extent, it is also a _cause_, since, say, commenting it out would also resolve the given failure, at the cost of causing other failures. However, the preceding condition also is a cause, as is the presence of quotes in the input.Only one of these also is a _defect_, though, and that is the preceding condition. Hence, while correlations can provide important hints, they do not necessarily locate defects. For those of us who may not have color HTML output ready, simply printing the debugger lists suspiciousness values as percentages.
###Code
print(debugger)
###Output
1 50% def remove_html_markup(s): # type: ignore
2 50% tag = False
3 50% quote = False
4 50% out = ""
5
6 50% for c in s:
7 50% if c == '<' and not quote:
8 0% tag = True
9 50% elif c == '>' and not quote:
10 0% tag = False
11 50% elif c == '"' or c == "'" and tag:
12 100% quote = not quote
13 50% elif not tag:
14 50% out = out + c
15
16 50% return out
###Markdown
Continuous SpectrumThe criterion that an event should _only_ occur in failing runs (and not in passing runs) can be too aggressive. In particular, if we have another run that executes the "culprit" lines, but does _not_ fail, our "only in fail" criterion will no longer be helpful. Here is an example. The input```htmltext```will trigger the "culprit" line```pythonquote = not quote```but actually produce an output where the tags are properly stripped:
###Code
remove_html_markup('<b color="blue">text</b>')
###Output
_____no_output_____
###Markdown
As a consequence, we no longer have lines that are being executed only in failing runs:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
with debugger.collect_pass():
remove_html_markup('<b link="blue"></b>')
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
In our spectrum output, the effect now is that the "culprit" line is as yellow as all others.
###Code
debugger
###Output
_____no_output_____
###Markdown
We therefore introduce a different method for highlighting lines, based on their _relative_ occurrence with respect to all runs: If a line has been _mostly_ executed in failing runs, its color should shift towards red; if a line has been _mostly_ executed in passing runs, its color should shift towards green. This _continuous spectrum_ has been introduced by the seminal _Tarantula_ tool \cite{Jones2002}. In Tarantula, the color _hue_ for each line is defined as follows: $$\textit{color hue}(\textit{line}) = \textit{low color(red)} + \frac{\%\textit{passed}(\textit{line})}{\%\textit{passed}(\textit{line}) + \%\textit{failed}(\textit{line})} \times \textit{color range}$$ Here, `%passed` and `%failed` denote the percentage at which a line has been executed in passing and failing runs, respectively. A hue of 0.0 stands for red, a hue of 1.0 stands for green, and a hue of 0.5 stands for equal fractions of red and green, yielding yellow. We can implement these measures right away as methods in a new `ContinuousSpectrumDebugger` class:
###Code
class ContinuousSpectrumDebugger(DiscreteSpectrumDebugger):
"""Visualize differences between executions using a color spectrum"""
def collectors_with_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that observed the given event.
"""
all_runs = self.collectors[category]
collectors_with_event = set(collector for collector in all_runs
if event in collector.events())
return collectors_with_event
def collectors_without_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that did not observe the given event.
"""
all_runs = self.collectors[category]
collectors_without_event = set(collector for collector in all_runs
if event not in collector.events())
return collectors_without_event
def event_fraction(self, event: Any, category: str) -> float:
if category not in self.collectors:
return 0.0
all_collectors = self.collectors[category]
collectors_with_event = self.collectors_with_event(event, category)
fraction = len(collectors_with_event) / len(all_collectors)
# print(f"%{category}({event}) = {fraction}")
return fraction
def passed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.PASS)
def failed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.FAIL)
def hue(self, event: Any) -> Optional[float]:
"""Return a color hue from 0.0 (red) to 1.0 (green)."""
passed = self.passed_fraction(event)
failed = self.failed_fraction(event)
if passed + failed > 0:
return passed / (passed + failed)
else:
return None
###Output
_____no_output_____
###Markdown
Having a continuous hue also implies a continuous suspiciousness and associated tooltips:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
hue = self.hue(event)
if hue is None:
return None
return 1 - hue
def tooltip(self, event: Any) -> str:
return self.percentage(event)
###Output
_____no_output_____
###Markdown
The hue for lines executed only in failing runs is (deep) red, as expected:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 12) 0.0
###Markdown
Likewise, the hue for lines executed in passing runs is (deep) green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 10) 1.0
('remove_html_markup', 8) 1.0
###Markdown
The Tarantula tool not only sets the hue for a line, but also uses _brightness_ as measure for support – that is, how often was the line executed at all. The brighter a line, the stronger the correlation with a passing or failing outcome. The brightness is defined as follows: $$\textit{brightness}(line) = \max(\%\textit{passed}(\textit{line}), \%\textit{failed}(\textit{line}))$$ and it is easily implemented, too:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def brightness(self, event: Any) -> float:
return max(self.passed_fraction(event), self.failed_fraction(event))
###Output
_____no_output_____
###Markdown
Our single "only in fail" line has a brightness of 1.0 (the maximum).
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.brightness(location))
###Output
('remove_html_markup', 12) 1.0
###Markdown
With this, we can now define a color for each line. To this end, we override the (previously discrete) `color()` method such that it returns a color specification giving hue and brightness. We use the HTML format `hsl(hue, saturation, lightness)` where the hue is given as a value between 0 and 360 (0 is red, 120 is green) and saturation and lightness are provided as percentages.
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def color(self, event: Any) -> Optional[str]:
hue = self.hue(event)
if hue is None:
return None
saturation = self.brightness(event)
# HSL color values are specified with:
# hsl(hue, saturation, lightness).
return f"hsl({hue * 120}, {saturation * 100}%, 80%)"
debugger = test_debugger_html(ContinuousSpectrumDebugger())
###Output
_____no_output_____
###Markdown
Lines executed only in failing runs are still shown in red:
###Code
for location in debugger.only_fail_events():
print(location, debugger.color(location))
###Output
('remove_html_markup', 12) hsl(0.0, 100.0%, 80%)
###Markdown
... whereas lines executed only in passing runs are still shown in green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.color(location))
debugger
###Output
_____no_output_____
###Markdown
What happens with our `quote = not quote` "culprit" line if it is executed in passing runs, too?
###Code
with debugger.collect_pass():
out = remove_html_markup('<b link="blue"></b>')
quiz('In which color will the `quote = not quote` "culprit" line '
'be shown after executing the above code?',
[
'<span style="background-color: hsl(120.0, 50.0%, 80%)">Green</span>',
'<span style="background-color: hsl(60.0, 100.0%, 80%)">Yellow</span>',
'<span style="background-color: hsl(30.0, 100.0%, 80%)">Orange</span>',
'<span style="background-color: hsl(0.0, 100.0%, 80%)">Red</span>'
], '999 // 333')
###Output
_____no_output_____
###Markdown
We see that it still is shown with an orange-red tint.
###Code
debugger
###Output
_____no_output_____
###Markdown
Here's another example, coming right from the Tarantula paper. The `middle()` function takes three numbers `x`, `y`, and `z`, and returns the one that is neither the minimum nor the maximum of the three:
###Code
def middle(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return y
else:
if x > y:
return y
elif x > z:
return x
return z
middle(1, 2, 3)
###Output
_____no_output_____
###Markdown
Unfortunately, `middle()` can fail:
###Code
middle(2, 1, 3)
###Output
_____no_output_____
###Markdown
Let is see whether we can find the bug with a few additional test cases:
###Code
# ignore
T3 = TypeVar('T3', bound='DifferenceDebugger')
def test_debugger_middle(debugger: T3) -> T3:
with debugger.collect_pass():
middle(3, 3, 5)
with debugger.collect_pass():
middle(1, 2, 3)
with debugger.collect_pass():
middle(3, 2, 1)
with debugger.collect_pass():
middle(5, 5, 5)
with debugger.collect_pass():
middle(5, 3, 4)
with debugger.collect_fail():
middle(2, 1, 3)
return debugger
###Output
_____no_output_____
###Markdown
Note that in order to collect data from multiple function invocations, you need to have a separate `with` clause for every invocation. The following will _not_ work correctly:```python with debugger.collect_pass(): middle(3, 3, 5) middle(1, 2, 3) ...```
###Code
debugger = test_debugger_middle(ContinuousSpectrumDebugger())
debugger.event_table(args=True)
###Output
_____no_output_____
###Markdown
Here comes the visualization. We see that the `return y` line is the culprit here – and actually also the one to be fixed.
###Code
debugger
quiz("Which of the above lines should be fixed?",
[
'<span style="background-color: hsl(45.0, 100%, 80%)">Line 3: `if x < y`</span>',
'<span style="background-color: hsl(34.28571428571429, 100.0%, 80%)">Line 5: `elif x < z`</span>',
'<span style="background-color: hsl(20.000000000000004, 100.0%, 80%)">Line 6: `return y`</span>',
'<span style="background-color: hsl(120.0, 20.0%, 80%)">Line 9: `return y`</span>',
], r'len(" middle ".strip()[:3])')
###Output
_____no_output_____
###Markdown
Indeed, in the `middle()` example, the "reddest" line is also the one to be fixed. Here is the fixed version:
###Code
def middle_fixed(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return x
else:
if x > y:
return y
elif x > z:
return x
return z
middle_fixed(2, 1, 3)
###Output
_____no_output_____
###Markdown
Ranking Lines by SuspiciousnessIn a large program, there can be several locations (and events) that could be flagged as suspicious. It suffices that some large code block of say, 1,000 lines, is mostly executed in failing runs, and then all of this code block will be visualized in some shade of red. To further highlight the "most suspicious" events, one idea is to use a _ranking_ – that is, coming up with a list of events where those events most correlated with failures would be shown at the top. The programmer would then examine these events one by one and proceed down the list. We will show how this works for two "correlation" metrics – first the _Tarantula_ metric, as introduced above, and then the _Ochiai_ metric, which has shown to be one of the best "ranking" metrics. We introduce a base class `RankingDebugger` with an abstract method `suspiciousness()` to be overloaded in subclasses. The method `rank()` returns a list of all events observed, sorted by suspiciousness, highest first.
###Code
class RankingDebugger(DiscreteSpectrumDebugger):
"""Rank events by their suspiciousness"""
def rank(self) -> List[Any]:
"""Return a list of events, sorted by suspiciousness, highest first."""
def susp(event: Any) -> float:
suspiciousness = self.suspiciousness(event)
assert suspiciousness is not None
return suspiciousness
events = list(self.all_events())
events.sort(key=susp, reverse=True)
return events
def __repr__(self) -> str:
return repr(self.rank())
###Output
_____no_output_____
###Markdown
The Tarantula MetricWe can use the Tarantula metric to sort lines according to their suspiciousness. The "redder" a line (a hue of 0.0), the more suspicious it is. We can simply define $$\textit{suspiciousness}_\textit{tarantula}(\textit{event}) = 1 - \textit{color hue}(\textit{event})$$ where $\textit{color hue}$ is as defined above. This is exactly the `suspiciousness()` function as already implemented in our `ContinuousSpectrumDebugger`. We introduce the `TarantulaDebugger` class, inheriting visualization capabilities from the `ContinuousSpectrumDebugger` class as well as the suspiciousness features from the `RankingDebugger` class.
###Code
class TarantulaDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Tarantula metric for suspiciousness"""
pass
###Output
_____no_output_____
###Markdown
Let us list `remove_html_markup()` with highlighted lines again:
###Code
tarantula_html = test_debugger_html(TarantulaDebugger())
tarantula_html
###Output
_____no_output_____
###Markdown
Here's our ranking of lines, from most suspicious to least suspicious:
###Code
tarantula_html.rank()
tarantula_html.suspiciousness(tarantula_html.rank()[0])
###Output
_____no_output_____
###Markdown
We see that the first line in the list is indeed the most suspicious; the two "green" lines come at the very end. For the `middle()` function, we also obtain a ranking from "reddest" to "greenest".
###Code
tarantula_middle = test_debugger_middle(TarantulaDebugger())
tarantula_middle
tarantula_middle.rank()
tarantula_middle.suspiciousness(tarantula_middle.rank()[0])
###Output
_____no_output_____
###Markdown
The Ochiai MetricThe _Ochiai_ Metric \cite{Ochiai1957} first introduced in the biology domain \cite{daSilvaMeyer2004} and later applied for fault localization by Abreu et al. \cite{Abreu2009}, is defined as follows: $$\textit{suspiciousness}_\textit{ochiai} = \frac{\textit{failed}(\textit{event})}{\sqrt{\bigl(\textit{failed}(\textit{event}) + \textit{not-in-failed}(\textit{event})\bigr)\times\bigl(\textit{failed}(\textit{event}) + \textit{passed}(\textit{event})\bigr)}}$$ where* $\textit{failed}(\textit{event})$ is the number of times the event occurred in _failing_ runs* $\textit{not-in-failed}(\textit{event})$ is the number of times the event did _not_ occur in failing runs* $\textit{passed}(\textit{event})$ is the number of times the event occurred in _passing_ runs.We can easily implement this formula:
###Code
import math
class OchiaiDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Ochiai metric for suspiciousness"""
def suspiciousness(self, event: Any) -> Optional[float]:
failed = len(self.collectors_with_event(event, self.FAIL))
not_in_failed = len(self.collectors_without_event(event, self.FAIL))
passed = len(self.collectors_with_event(event, self.PASS))
try:
return failed / math.sqrt((failed + not_in_failed) * (failed + passed))
except ZeroDivisionError:
return None
def hue(self, event: Any) -> Optional[float]:
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
return 1 - suspiciousness
###Output
_____no_output_____
###Markdown
Applied on the `remove_html_markup()` function, the individual suspiciousness scores differ from Tarantula. However, we obtain a very similar visualization, and the same ranking.
###Code
ochiai_html = test_debugger_html(OchiaiDebugger())
ochiai_html
ochiai_html.rank()
ochiai_html.suspiciousness(ochiai_html.rank()[0])
###Output
_____no_output_____
###Markdown
The same observations also apply for the `middle()` function.
###Code
ochiai_middle = test_debugger_middle(OchiaiDebugger())
ochiai_middle
ochiai_middle.rank()
ochiai_middle.suspiciousness(ochiai_middle.rank()[0])
###Output
_____no_output_____
###Markdown
How Useful is Ranking?So, which metric is better? The standard method to evaluate such rankings is to determine a _ground truth_ – that is, the set of locations that eventually are fixed – and to check at which point in the ranking any such location occurs – the earlier, the better. In our `remove_html_markup()` and `middle()` examples, both the Tarantula and the Ochiai metric perform flawlessly, as the "culprit" line is always ranked at the top. However, this need not always be the case; the exact performance depends on the nature of the code and the observed runs. (Also, the question of whether there always is exactly one possible location where the program can be fixed is open for discussion.) You will be surprised that over time, _several dozen_ metrics have been proposed \cite{Wong2016}, each performing somewhat better or somewhat worse depending on which benchmark they were applied on. The two metrics discussed above each have their merits – the Tarantula metric was among the first such metrics, and the Ochiai metric is generally shown to be among the most effective ones \cite{Abreu2009}. While rankings can be easily _evaluated_, it is not necessarily clear whether and how much they serve programmers. As stated above, the assumption of rankings is that developers examine one potentially defective statement after another until they find the actually defective one. However, in a series of human studies with developers, Parnin and Orso \cite{Parnin2011} found that this assumption may not hold:> It is unclear whether developers can actually determine the faulty nature of a statement by simply looking at it, without any additional information (e.g., the state of the program when the statement was executed or the statements that were executed before or after that one).In their study, they found that rankings could help completing a task faster, but this effect was limited to experienced developers and simpler code. Artificially changing the rank of faulty statements had little to no effect, implying that developers would not strictly follow the ranked list of statements, but rather search through the code to understand it. At this point, a _visualization_ as in the Tarantula tool can be helpful to programmers as it _guides_ the search, but a _ranking_ that _defines_ where to search may be less useful. Having said that, ranking has its merits – notably as it comes to informing _automated_ debugging techniques. In the [chapter on program repair](Repairer.ipynb), we will see how ranked lists of potentially faulty statements tell automated repair techniques where to try to repair the program first. And once such a repair is successful, we have a very strong indication on where and how the program could be fixed! Using Large Test Suites In fault localization, the larger and the more thorough the test suite, the higher the precision. Let us try out what happens if we extend the `middle()` test suite with additional test cases. The function `middle_testcase()` returns a random input for `middle()`:
###Code
import random
def middle_testcase() -> Tuple[int, int, int]:
x = random.randrange(10)
y = random.randrange(10)
z = random.randrange(10)
return x, y, z
[middle_testcase() for i in range(5)]
###Output
_____no_output_____
###Markdown
The function `middle_test()` simply checks if `middle()` operates correctly – by placing `x`, `y`, and `z` in a list, sorting it, and checking the middle argument. If `middle()` fails, `middle_test()` raises an exception.
###Code
def middle_test(x: int, y: int, z: int) -> None:
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
middle_test(4, 5, 6)
from ExpectError import ExpectError
with ExpectError():
middle_test(2, 1, 3)
###Output
Traceback (most recent call last):
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_46423/3661663124.py", line 2, in <module>
middle_test(2, 1, 3)
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_46423/40742806.py", line 3, in middle_test
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
The function `middle_passing_testcase()` searches and returns a triple `x`, `y`, `z` that causes `middle_test()` to pass.
###Code
def middle_passing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
return x, y, z
except AssertionError:
pass
(x, y, z) = middle_passing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(1, 6, 1) = 1
###Markdown
The function `middle_failing_testcase()` does the same; but its triple `x`, `y`, `z` causes `middle_test()` to fail.
###Code
def middle_failing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
except AssertionError:
return x, y, z
(x, y, z) = middle_failing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(5, 2, 6) = 2
###Markdown
With these, we can define two sets of test cases, each with 100 inputs.
###Code
MIDDLE_TESTS = 100
MIDDLE_PASSING_TESTCASES = [middle_passing_testcase()
for i in range(MIDDLE_TESTS)]
MIDDLE_FAILING_TESTCASES = [middle_failing_testcase()
for i in range(MIDDLE_TESTS)]
###Output
_____no_output_____
###Markdown
Let us run the `OchiaiDebugger` with these two test sets.
###Code
ochiai_middle = OchiaiDebugger()
for x, y, z in MIDDLE_PASSING_TESTCASES:
with ochiai_middle.collect_pass():
middle(x, y, z)
for x, y, z in MIDDLE_FAILING_TESTCASES:
with ochiai_middle.collect_fail():
middle(x, y, z)
ochiai_middle
###Output
_____no_output_____
###Markdown
We see that the "culprit" line is still the most likely to be fixed, but the two conditions leading to the error (`x < y` and `x < z`) are also listed as potentially faulty. That is because the error might also be fixed be changing these conditions – although this would result in a more complex fix. Other Events besides CoverageWe close this chapter with two directions for further thought. If you wondered why in the above code, we were mostly talking about `events` rather than lines covered, that is because our framework allows for tracking arbitrary events, not just coverage. In fact, any data item a collector can extract from the execution can be used for correlation analysis. (It may not be so easily visualized, though.) Here's an example. We define a `ValueCollector` class that collects pairs of (local) variables and their values during execution. Its `events()` method then returns the set of all these pairs.
###Code
class ValueCollector(Collector):
""""A class to collect local variables and their values."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.vars: Set[str] = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
local_vars = frame.f_locals
for var in local_vars:
value = local_vars[var]
self.vars.add(f"{var} = {repr(value)}")
def events(self) -> Set[str]:
"""A set of (variable, value) pairs observed"""
return self.vars
###Output
_____no_output_____
###Markdown
If we apply this collector on our set of HTML test cases, these are all the events that we obtain – essentially all variables and all values ever seen:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger(ValueCollector))
for event in debugger.all_events():
print(event)
###Output
tag = False
quote = False
s = 'abc'
out = 'a'
quote = True
out = ''
c = '/'
s = '<b>abc</b>'
s = '"abc"'
c = '>'
c = 'c'
c = '<'
out = 'abc'
c = '"'
c = 'b'
c = 'a'
out = 'ab'
tag = True
###Markdown
However, some of these events only occur in the failing run:
###Code
for event in debugger.only_fail_events():
print(event)
###Output
c = '"'
quote = True
s = '"abc"'
###Markdown
Some of these differences are spurious – the string `"abc"` (with quotes) only occurs in the failing run – but others, such as `quote` being True and `c` containing a single quote are actually relevant for explaining when the failure comes to be. We can even visualize the suspiciousness of the individual events, setting the (so far undiscussed) `color` flag for producing an event table:
###Code
debugger.event_table(color=True, args=True)
###Output
_____no_output_____
###Markdown
There are many ways one can continue from here.* Rather than checking for concrete values, one could check for more _abstract properties_, for instance – what is the sign of the value? What is the length of the string? * One could check for specifics of the _control flow_ – is the loop taken? How many times?* One could check for specifics of the _information flow_ – which values flow from one variable to another?There are lots of properties that all could be related to failures – and if we happen to check for the right one, we may obtain a much crisper definition of what causes the failure. We will come up with more ideas on properties to check as it comes to [mining specifications](SpecificationMining,ipynb). Training ClassifiersThe metrics we have discussed so far are pretty _generic_ – that is, they are fixed no matter how the actual event space is structured. The field of _machine learning_ has come up with techniques that learn _classifiers_ from a given set of data – classifiers that are trained from labeled data and then can predict labels for new data sets. In our case, the labels are test outcomes (PASS and FAIL), whereas the data would be features of the events observed. A classifier by itself is not immediately useful for debugging (although it could predict whether future inputs will fail or not). Some classifiers, however, have great _diagnostic_ quality; that is, they can _explain_ how their classification comes to be. [Decision trees](https://scikit-learn.org/stable/modules/tree.html) fall into this very category. A decision tree contains a number of _nodes_, each one associated with a predicate. Depending on whether the predicate is true or false, we follow the given "true" or "false" branch to end up in the next node, which again contains a predicate. Eventually, we end up in the outcome predicted by the tree. The neat thing is that the node predicates actually give important hints on the circumstances that are _most relevant_ for deciding the outcome. Let us illustrate this with an example. We build a class `ClassifyingDebugger` that trains a decision tree from the events collected. To this end, we need to set up our input data such that it can be fed into a classifier. We start with identifying our _samples_ (runs) and the respective _labels_ (outcomes). All values have to be encoded into numerical values.
###Code
class ClassifyingDebugger(DifferenceDebugger):
"""A debugger implementing a decision tree for events"""
PASS_VALUE = +1.0
FAIL_VALUE = -1.0
def samples(self) -> Dict[str, float]:
samples = {}
for collector in self.pass_collectors():
samples[collector.id()] = self.PASS_VALUE
for collector in debugger.fail_collectors():
samples[collector.id()] = self.FAIL_VALUE
return samples
debugger = test_debugger_html(ClassifyingDebugger())
debugger.samples()
###Output
_____no_output_____
###Markdown
Next, we identify the _features_, which in our case is the set of lines executed in each sample:
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def features(self) -> Dict[str, Any]:
features = {}
for collector in debugger.pass_collectors():
features[collector.id()] = collector.events()
for collector in debugger.fail_collectors():
features[collector.id()] = collector.events()
return features
debugger = test_debugger_html(ClassifyingDebugger())
debugger.features()
###Output
_____no_output_____
###Markdown
All our features have names, which must be strings.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def feature_names(self) -> List[str]:
return [repr(feature) for feature in self.all_events()]
debugger = test_debugger_html(ClassifyingDebugger())
debugger.feature_names()
###Output
_____no_output_____
###Markdown
Next, we define the _shape_ for an individual sample, which is a value of +1 or -1 for each feature seen (i.e., +1 if the line was covered, -1 if not).
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def shape(self, sample: str) -> List[float]:
x = []
features = self.features()
for f in self.all_events():
if f in features[sample]:
x += [+1.0]
else:
x += [-1.0]
return x
debugger = test_debugger_html(ClassifyingDebugger())
debugger.shape("remove_html_markup(s='abc')")
###Output
_____no_output_____
###Markdown
Our input X for the classifier now is a list of such shapes, one for each sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def X(self) -> List[List[float]]:
X = []
samples = self.samples()
for key in samples:
X += [self.shape(key)]
return X
debugger = test_debugger_html(ClassifyingDebugger())
debugger.X()
###Output
_____no_output_____
###Markdown
Our input Y for the classifier, in contrast, is the list of labels, again indexed by sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def Y(self) -> List[float]:
Y = []
samples = self.samples()
for key in samples:
Y += [samples[key]]
return Y
debugger = test_debugger_html(ClassifyingDebugger())
debugger.Y()
###Output
_____no_output_____
###Markdown
We now have all our data ready to be fit into a tree classifier. The method `classifier()` creates and returns the (tree) classifier for the observed runs.
###Code
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def classifier(self) -> DecisionTreeClassifier:
classifier = DecisionTreeClassifier()
classifier = classifier.fit(self.X(), self.Y())
return classifier
###Output
_____no_output_____
###Markdown
We define a special method to show classifiers:
###Code
import graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def show_classifier(self, classifier: DecisionTreeClassifier) -> Any:
dot_data = export_graphviz(classifier, out_file=None,
filled=False, rounded=True,
feature_names=self.feature_names(),
class_names=["FAIL", "PASS"],
label='none',
node_ids=False,
impurity=False,
proportion=True,
special_characters=True)
return graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
This is the tree we get for our `remove_html_markup()` tests. The top predicate is whether the "culprit" line was executed (-1 means no, +1 means yes). If not (-1), the outcome is PASS. Otherwise, the outcome is TRUE.
###Code
debugger = test_debugger_html(ClassifyingDebugger())
classifier = debugger.classifier()
debugger.show_classifier(classifier)
###Output
_____no_output_____
###Markdown
We can even use our classifier to predict the outcome of additional runs. If, for instance, we execute all lines except for, say, Line 7, 9, and 11, our tree classifier would predict failure – because the "culprit" line 12 is executed.
###Code
classifier.predict([[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1]])
###Output
_____no_output_____
###Markdown
Again, there are many ways to continue from here. Which events should we train the classifier from? How do classifiers compare in their performance and diagnostic quality? There are lots of possibilities left to explore, and we only begin to realize the potential for automated debugging. SynopsisThis chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use
###Code
debugger = TarantulaDebugger()
with debugger.collect_pass():
remove_html_markup("abc")
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:
###Code
debugger = TarantulaDebugger()
with debugger:
remove_html_markup("abc")
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # raise an exception
###Output
_____no_output_____
###Markdown
`with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.
###Code
debugger.event_table(args=True, color=True)
###Output
_____no_output_____
###Markdown
Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:
###Code
debugger
###Output
_____no_output_____
###Markdown
Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.
###Code
debugger.rank()
###Output
_____no_output_____
###Markdown
Classes and MethodsHere are all classes defined in this chapter:
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([TarantulaDebugger, OchiaiDebugger],
abstract_classes=[
StatisticalDebugger,
DifferenceDebugger,
RankingDebugger
],
public_methods=[
StatisticalDebugger.__init__,
StatisticalDebugger.all_events,
StatisticalDebugger.event_table,
StatisticalDebugger.function,
StatisticalDebugger.coverage,
StatisticalDebugger.covered_functions,
DifferenceDebugger.__enter__,
DifferenceDebugger.__exit__,
DifferenceDebugger.all_pass_events,
DifferenceDebugger.all_fail_events,
DifferenceDebugger.collect_pass,
DifferenceDebugger.collect_fail,
DifferenceDebugger.only_pass_events,
DifferenceDebugger.only_fail_events,
SpectrumDebugger.code,
SpectrumDebugger.__repr__,
SpectrumDebugger.__str__,
SpectrumDebugger._repr_html_,
ContinuousSpectrumDebugger.code,
ContinuousSpectrumDebugger.__repr__,
RankingDebugger.rank
],
project='debuggingbook')
# ignore
display_class_hierarchy([CoverageCollector, ValueCollector],
public_methods=[
Tracer.__init__,
Tracer.__enter__,
Tracer.__exit__,
Tracer.changed_vars, # type: ignore
Collector.__init__,
Collector.__repr__,
Collector.function,
Collector.args,
Collector.argstring,
Collector.exception,
Collector.id,
Collector.collect,
CoverageCollector.coverage,
CoverageCollector.covered_functions,
CoverageCollector.events,
ValueCollector.__init__,
ValueCollector.events
],
project='debuggingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* _Correlations_ between execution events and outcomes (pass/fail) can make important hints for debugging* Events occurring only (or mostly) during failing runs can be _highlighted_ and _ranked_ to guide the search* Important hints include whether the _execution of specific code locations_ correlates with failure Next StepsChapters that build on this one include* [how to determine invariants that correlate with failures](DynamicInvariants.ipynb)* [how to automatically repair programs](Repairer.ipynb) BackgroundThe seminal works on statistical debugging are two papers:* "Visualization of Test Information to Assist Fault Localization" \cite{Jones2002} by James Jones, Mary Jean Harrold, and John Stasko introducing Tarantula and its visualization. The paper won an ACM SIGSOFT 10-year impact award.* "Bug Isolation via Remote Program Sampling" \cite{Liblit2003} by Ben Liblit, Alex Aiken, Alice X. Zheng, and Michael I. Jordan, introducing the term "Statistical debugging". Liblit won the ACM Doctoral Dissertation Award for this work.The Ochiai metric for fault localization was introduced by \cite{Abreu2009}. The overview by Wong et al. \cite{Wong2016} gives a comprehensive overview on the field of statistical fault localization.The study by Parnin and Orso \cite{Parnin2011} is a must to understand the limitations of the technique. Exercises Exercise 1: A Postcondition for MiddleWhat would be a postcondition for `middle()`? How can you check it? **Solution.** A simple postcondition for `middle()` would be```pythonassert m == sorted([x, y, z])[1]```where `m` is the value returned by `middle()`. `sorted()` sorts the given list, and the index `[1]` returns, well, the middle element. (This might also be a much shorter, but possibly slightly more expensive implementation for `middle()`) Since `middle()` has several `return` statements, the easiest way to check the result is to create a wrapper around `middle()`:
###Code
def middle_checked(x, y, z): # type: ignore
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
return m
###Output
_____no_output_____
###Markdown
`middle_checked()` catches the error:
###Code
from ExpectError import ExpectError
with ExpectError():
m = middle_checked(2, 1, 3)
###Output
Traceback (most recent call last):
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_46423/3016629944.py", line 2, in <module>
m = middle_checked(2, 1, 3)
File "/var/folders/n2/xd9445p97rb3xh7m1dfx8_4h0006ts/T/ipykernel_46423/1374660292.py", line 3, in middle_checked
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
Statistical DebuggingIn this chapter, we introduce _statistical debugging_ – the idea that specific events during execution could be _statistically correlated_ with failures. We start with coverage of individual lines and then proceed towards further execution features.
###Code
from bookutils import YouTubeVideo
YouTubeVideo("UNuso00zYiI")
###Output
_____no_output_____
###Markdown
**Prerequisites*** You should have read the [chapter on tracing executions](Tracer.ipynb).
###Code
import bookutils
###Output
_____no_output_____
###Markdown
SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from debuggingbook.StatisticalDebugger import ```and then make use of the following features.This chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use```python>>> debugger = TarantulaDebugger()>>> with debugger.collect_pass():>>> remove_html_markup("abc")>>> with debugger.collect_pass():>>> remove_html_markup('abc')>>> with debugger.collect_fail():>>> remove_html_markup('"abc"')```Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:```python>>> debugger = TarantulaDebugger()>>> with debugger:>>> remove_html_markup("abc")>>> with debugger:>>> remove_html_markup('abc')>>> with debugger:>>> remove_html_markup('"abc"')>>> assert False raise an exception````with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.```python>>> debugger.event_table(args=True, color=True)```| `remove_html_markup` | `s='abc'` | `s='abc'` | `s='"abc"'` | | --------------------- | ---- | ---- | ---- | | remove_html_markup:1 | X | X | X | | remove_html_markup:2 | X | X | X | | remove_html_markup:3 | X | X | X | | remove_html_markup:4 | X | X | X | | remove_html_markup:6 | X | X | X | | remove_html_markup:7 | X | X | X | | remove_html_markup:8 | - | X | - | | remove_html_markup:9 | X | X | X | | remove_html_markup:10 | - | X | - | | remove_html_markup:11 | X | X | X | | remove_html_markup:12 | - | - | X | | remove_html_markup:13 | X | X | X | | remove_html_markup:14 | X | X | X | | remove_html_markup:16 | X | X | X | Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:```python>>> debugger```<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 1: 50%"> 1 def remove_html_markup(s): type: ignore<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 2: 50%"> 2 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 3: 50%"> 3 quote = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 4: 50%"> 4 out = "" 5 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 6: 50%"> 6 for c in s:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 7: 50%"> 7 if c == &x27;<&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 8: 0%"> 8 tag = True<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 9: 50%"> 9 elif c == &x27;>&x27; and not quote:<pre style="background-color:hsl(120.0, 50.0%, 80%)" title="Line 10: 0%"> 10 tag = False<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 11: 50%"> 11 elif c == &x27;"&x27; or c == "&x27;" and tag:<pre style="background-color:hsl(0.0, 100.0%, 80%)" title="Line 12: 100%"> 12 quote = not quote<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 13: 50%"> 13 elif not tag:<pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 14: 50%"> 14 out = out + c 15 <pre style="background-color:hsl(60.0, 100.0%, 80%)" title="Line 16: 50%"> 16 return out Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.```python>>> debugger.rank()[('remove_html_markup', 12), ('remove_html_markup', 2), ('remove_html_markup', 13), ('remove_html_markup', 6), ('remove_html_markup', 4), ('remove_html_markup', 3), ('remove_html_markup', 1), ('remove_html_markup', 14), ('remove_html_markup', 7), ('remove_html_markup', 11), ('remove_html_markup', 16), ('remove_html_markup', 9), ('remove_html_markup', 10), ('remove_html_markup', 8)]``` Classes and MethodsHere are all classes defined in this chapter: IntroductionThe idea behind _statistical debugging_ is fairly simple. We have a program that sometimes passes and sometimes fails. This outcome can be _correlated_ with events that precede it – properties of the input, properties of the execution, properties of the program state. If we, for instance, can find that "the program always fails when Line 123 is executed, and it always passes when Line 123 is _not_ executed", then we have a strong correlation between Line 123 being executed and failure.Such _correlation_ does not necessarily mean _causation_. For this, we would have to prove that executing Line 123 _always_ leads to failure, and that _not_ executing it does not lead to (this) failure. Also, a correlation (or even a causation) does not mean that Line 123 contains the defect – for this, we would have to show that it actually is an error. Still, correlations make excellent hints as it comes to search for failure causes – in all generality, if you let your search be guided by _events that correlate with failures_, you are more likely to find _important hints on how the failure comes to be_. Collecting EventsHow can we determine events that correlate with failure? We start with a general mechanism to actually _collect_ events during execution. The abstract `Collector` class provides* a `collect()` method made for collecting events, called from the `traceit()` tracer; and* an `events()` method made for retrieving these events.Both of these are _abstract_ and will be defined further in subclasses.
###Code
from Tracer import Tracer
# ignore
from typing import Any, Callable, Optional, Type, Tuple
from typing import Dict, Set, List, TypeVar, Union
from types import FrameType, TracebackType
class Collector(Tracer):
"""A class to record events during execution."""
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collecting function. To be overridden in subclasses."""
pass
def events(self) -> Set:
"""Return a collection of events. To be overridden in subclasses."""
return set()
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
self.collect(frame, event, arg)
###Output
_____no_output_____
###Markdown
A `Collector` class is used like `Tracer`, using a `with` statement. Let us apply it on the buggy variant of `remove_html_markup()` from the [Introduction to Debugging](Intro_Debugging.ipynb):
###Code
def remove_html_markup(s): # type: ignore
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
with Collector() as c:
out = remove_html_markup('"abc"')
out
###Output
_____no_output_____
###Markdown
There's not much we can do with our collector, as the `collect()` and `events()` methods are yet empty. However, we can introduce an `id()` method which returns a string identifying the collector. This string is defined from the _first function call_ encountered.
###Code
from types import FunctionType
Coverage = Set[Tuple[Callable, int]]
class Collector(Collector):
def __init__(self) -> None:
"""Constructor."""
self._function: Optional[Callable] = None
self._args: Optional[Dict[str, Any]] = None
self._argstring: Optional[str] = None
self._exception: Optional[Type] = None
self.items_to_ignore: List[Union[Type, Callable]] = [self.__class__]
def traceit(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Tracing function.
Saves the first function and calls collect().
"""
for item in self.items_to_ignore:
if (isinstance(item, type) and 'self' in frame.f_locals and
isinstance(frame.f_locals['self'], item)):
# Ignore this class
return
if item.__name__ == frame.f_code.co_name:
# Ignore this function
return
if self._function is None and event == 'call':
# Save function
self._function = self.create_function(frame)
self._args = frame.f_locals.copy()
self._argstring = ", ".join([f"{var}={repr(self._args[var])}"
for var in self._args])
self.collect(frame, event, arg)
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""Collector function. To be overloaded in subclasses."""
pass
def id(self) -> str:
"""Return an identifier for the collector,
created from the first call"""
return f"{self.function().__name__}({self.argstring()})"
def function(self) -> Callable:
"""Return the function from the first call, as a function object"""
if not self._function:
raise ValueError("No call collected")
return self._function
def argstring(self) -> str:
"""
Return the list of arguments from the first call,
as a printable string
"""
if not self._argstring:
raise ValueError("No call collected")
return self._argstring
def args(self) -> Dict[str, Any]:
"""Return a dict of argument names and values from the first call"""
if not self._args:
raise ValueError("No call collected")
return self._args
def exception(self) -> Optional[Type]:
"""Return the exception class from the first call,
or None if no exception was raised."""
return self._exception
def __repr__(self) -> str:
"""Return a string representation of the collector"""
# We use the ID as default representation when printed
return self.id()
def covered_functions(self) -> Set[Callable]:
"""Set of covered functions. To be overloaded in subclasses."""
return set()
def coverage(self) -> Coverage:
"""
Return a set (function, lineno) with locations covered.
To be overloaded in subclasses.
"""
return set()
###Output
_____no_output_____
###Markdown
Here's how the collector works. We use a `with` clause to collect details on a function call:
###Code
with Collector() as c:
remove_html_markup('abc')
###Output
_____no_output_____
###Markdown
We can now retrieve details such as the function called...
###Code
c.function()
###Output
_____no_output_____
###Markdown
... or its arguments, as a name/value dictionary.
###Code
c.args()
###Output
_____no_output_____
###Markdown
The `id()` method returns a printable representation of the call:
###Code
c.id()
###Output
_____no_output_____
###Markdown
The `argstring()` method does the same for the argument string only.
###Code
c.argstring()
###Output
_____no_output_____
###Markdown
With this, we can collect the basic information to identify calls – such that we can later correlate their events with success or failure. Error Prevention While collecting, we'd like to avoid collecting events in the collection infrastructure. The `items_to_ignore` attribute takes care of this.
###Code
class Collector(Collector):
def add_items_to_ignore(self,
items_to_ignore: List[Union[Type, Callable]]) \
-> None:
"""
Define additional classes and functions to ignore during collection
(typically `Debugger` classes using these collectors).
"""
self.items_to_ignore += items_to_ignore
###Output
_____no_output_____
###Markdown
If we exit a block without having collected anything, that's likely an error.
###Code
class Collector(Collector):
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
ret = super().__exit__(exc_tp, exc_value, exc_traceback)
if not self._function:
if exc_tp:
return False # re-raise exception
else:
raise ValueError("No call collected")
return ret
###Output
_____no_output_____
###Markdown
Collecting CoverageSo far, our `Collector` class does not collect any events. Let us extend it such that it collects _coverage_ information – that is, the set of locations executed. To this end, we introduce a `CoverageCollector` subclass which saves the coverage in a set containing functions and line numbers.
###Code
from types import FrameType
from StackInspector import StackInspector
class CoverageCollector(Collector, StackInspector):
"""A class to record covered locations during execution."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self._coverage: Coverage = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
"""
Save coverage for an observed event.
"""
name = frame.f_code.co_name
function = self.search_func(name, frame)
if function is None:
function = self.create_function(frame)
location = (function, frame.f_lineno)
self._coverage.add(location)
###Output
_____no_output_____
###Markdown
We also override `events()` such that it returns the set of covered locations.
###Code
class CoverageCollector(CoverageCollector):
def events(self) -> Set[Tuple[str, int]]:
"""
Return the set of locations covered.
Each location comes as a pair (`function_name`, `lineno`).
"""
return {(func.__name__, lineno) for func, lineno in self._coverage}
###Output
_____no_output_____
###Markdown
The methods `coverage()` and `covered_functions()` allow precise access to the coverage obtained.
###Code
class CoverageCollector(CoverageCollector):
def covered_functions(self) -> Set[Callable]:
"""Return a set with all functions covered."""
return {func for func, lineno in self._coverage}
def coverage(self) -> Coverage:
"""Return a set (function, lineno) with all locations covered."""
return self._coverage
###Output
_____no_output_____
###Markdown
Here is how we can use `CoverageCollector` to determine the lines executed during a run of `remove_html_markup()`:
###Code
with CoverageCollector() as c:
remove_html_markup('abc')
c.events()
###Output
_____no_output_____
###Markdown
Sets of line numbers alone are not too revealing. They provide more insights if we actually list the code, highlighting these numbers:
###Code
import inspect
from bookutils import getsourcelines # like inspect.getsourcelines(), but in color
def code_with_coverage(function: Callable, coverage: Coverage) -> None:
source_lines, starting_line_number = \
getsourcelines(function)
line_number = starting_line_number
for line in source_lines:
marker = '*' if (function, line_number) in coverage else ' '
print(f"{line_number:4} {marker} {line}", end='')
line_number += 1
code_with_coverage(remove_html_markup, c.coverage())
###Output
1 * [34mdef[39;49;00m [32mremove_html_markup[39;49;00m(s): [37m# type: ignore[39;49;00m
2 * tag = [34mFalse[39;49;00m
3 * quote = [34mFalse[39;49;00m
4 * out = [33m"[39;49;00m[33m"[39;49;00m
5
6 * [34mfor[39;49;00m c [35min[39;49;00m s:
7 * [34mif[39;49;00m c == [33m'[39;49;00m[33m<[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
8 tag = [34mTrue[39;49;00m
9 * [34melif[39;49;00m c == [33m'[39;49;00m[33m>[39;49;00m[33m'[39;49;00m [35mand[39;49;00m [35mnot[39;49;00m quote:
10 tag = [34mFalse[39;49;00m
11 * [34melif[39;49;00m c == [33m'[39;49;00m[33m"[39;49;00m[33m'[39;49;00m [35mor[39;49;00m c == [33m"[39;49;00m[33m'[39;49;00m[33m"[39;49;00m [35mand[39;49;00m tag:
12 quote = [35mnot[39;49;00m quote
13 * [34melif[39;49;00m [35mnot[39;49;00m tag:
14 * out = out + c
15
16 * [34mreturn[39;49;00m out
###Markdown
Remember that the input `s` was `"abc"`? In this listing, we can see which lines were covered and which lines were not. From the listing already, we can see that `s` has neither tags nor quotes. Such coverage computation plays a big role in _testing_, as one wants tests to cover as many different aspects of program execution (and notably code) as possible. But also during debugging, code coverage is essential: If some code was not even executed in the failing run, then any change to it will have no effect.
###Code
from bookutils import quiz
quiz('Let the input be `"<b>Don\'t do this!</b>"`. '
"Which of these lines are executed? Use the code to find out!",
[
"`tag = True`",
"`tag = False`",
"`quote = not quote`",
"`out = out + c`"
], "[ord(c) - ord('a') - 1 for c in 'cdf']")
###Output
_____no_output_____
###Markdown
To find the solution, try this out yourself:
###Code
with CoverageCollector() as c:
remove_html_markup("<b>Don't do this!</b>")
# code_with_coverage(remove_html_markup, c.coverage)
###Output
_____no_output_____
###Markdown
Computing DifferencesLet us get back to the idea that we want to _correlate_ events with passing and failing outcomes. For this, we need to examine events in both _passing_ and _failing_ runs, and determine their _differences_ – since it is these differences we want to associate with their respective outcome. A Base Class for Statistical DebuggingThe `StatisticalDebugger` base class takes a collector class (such as `CoverageCollector`). Its `collect()` method creates a new collector of that very class, which will be maintained by the debugger. As argument, `collect()` takes a string characterizing the outcome (such as `'PASS'` or `'FAIL'`). This is how one would use it:```pythondebugger = StatisticalDebugger()with debugger.collect('PASS'): some_passing_run()with debugger.collect('PASS'): another_passing_run()with debugger.collect('FAIL'): some_failing_run()``` Let us implement `StatisticalDebugger`. The base class gets a collector class as argument:
###Code
class StatisticalDebugger:
"""A class to collect events for multiple outcomes."""
def __init__(self, collector_class: Type = CoverageCollector, log: bool = False):
"""Constructor. Use instances of `collector_class` to collect events."""
self.collector_class = collector_class
self.collectors: Dict[str, List[Collector]] = {}
self.log = log
###Output
_____no_output_____
###Markdown
The `collect()` method creates (and stores) a collector for the given outcome, using the given outcome to characterize the run. Any additional arguments are passed to the collector.
###Code
class StatisticalDebugger(StatisticalDebugger):
def collect(self, outcome: str, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for the given outcome.
Additional args are passed to the collector."""
collector = self.collector_class(*args, **kwargs)
collector.add_items_to_ignore([self.__class__])
return self.add_collector(outcome, collector)
def add_collector(self, outcome: str, collector: Collector) -> Collector:
if outcome not in self.collectors:
self.collectors[outcome] = []
self.collectors[outcome].append(collector)
return collector
###Output
_____no_output_____
###Markdown
The `all_events()` method produces a union of all events observed. If an outcome is given, it produces a union of all events with that outcome:
###Code
class StatisticalDebugger(StatisticalDebugger):
def all_events(self, outcome: Optional[str] = None) -> Set[Any]:
"""Return a set of all events observed."""
all_events = set()
if outcome:
if outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
else:
for outcome in self.collectors:
for collector in self.collectors[outcome]:
all_events.update(collector.events())
return all_events
###Output
_____no_output_____
###Markdown
Here's a simple example of `StatisticalDebugger` in action:
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
The method `all_events()` returns all events collected:
###Code
s.all_events()
###Output
_____no_output_____
###Markdown
If given an outcome as argument, we obtain all events with the given outcome.
###Code
s.all_events('FAIL')
###Output
_____no_output_____
###Markdown
The attribute `collectors` maps outcomes to lists of collectors:
###Code
s.collectors
###Output
_____no_output_____
###Markdown
Here's the collector of the one (and first) passing run:
###Code
s.collectors['PASS'][0].id()
s.collectors['PASS'][0].events()
###Output
_____no_output_____
###Markdown
To better highlight the differences between the collected events, we introduce a method `event_table()` that prints out whether an event took place in a run. Excursion: Printing an Event Table
###Code
from IPython.display import Markdown
import html
class StatisticalDebugger(StatisticalDebugger):
def function(self) -> Optional[Callable]:
"""
Return the entry function from the events observed,
or None if ambiguous.
"""
names_seen = set()
functions = []
for outcome in self.collectors:
for collector in self.collectors[outcome]:
# We may have multiple copies of the function,
# but sharing the same name
func = collector.function()
if func.__name__ not in names_seen:
functions.append(func)
names_seen.add(func.__name__)
if len(functions) != 1:
return None # ambiguous
return functions[0]
def covered_functions(self) -> Set[Callable]:
"""Return a set of all functions observed."""
functions = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
functions |= collector.covered_functions()
return functions
def coverage(self) -> Coverage:
"""Return a set of all (functions, line_numbers) observed"""
coverage = set()
for outcome in self.collectors:
for collector in self.collectors[outcome]:
coverage |= collector.coverage()
return coverage
def color(self, event: Any) -> Optional[str]:
"""
Return a color for the given event, or None.
To be overloaded in subclasses.
"""
return None
def tooltip(self, event: Any) -> Optional[str]:
"""
Return a tooltip string for the given event, or None.
To be overloaded in subclasses.
"""
return None
def event_str(self, event: Any) -> str:
"""Format the given event. To be overloaded in subclasses."""
if isinstance(event, str):
return event
if isinstance(event, tuple):
return ":".join(self.event_str(elem) for elem in event)
return str(event)
def event_table_text(self, *, args: bool = False, color: bool = False) -> str:
"""
Print out a table of events observed.
If `args` is True, use arguments as headers.
If `color` is True, use colors.
"""
sep = ' | '
all_events = self.all_events()
longest_event = max(len(f"{self.event_str(event)}")
for event in all_events)
out = ""
# Header
if args:
out += '| '
func = self.function()
if func:
out += '`' + func.__name__ + '`'
out += sep
for name in self.collectors:
for collector in self.collectors[name]:
out += '`' + collector.argstring() + '`' + sep
out += '\n'
else:
out += '| ' + ' ' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += name + sep
out += '\n'
out += '| ' + '-' * longest_event + sep
for name in self.collectors:
for i in range(len(self.collectors[name])):
out += '-' * len(name) + sep
out += '\n'
# Data
for event in sorted(all_events):
event_name = self.event_str(event).rjust(longest_event)
tooltip = self.tooltip(event)
if tooltip:
title = f' title="{tooltip}"'
else:
title = ''
if color:
color_name = self.color(event)
if color_name:
event_name = \
f'<samp style="background-color: {color_name}"{title}>' \
f'{html.escape(event_name)}' \
f'</samp>'
out += f"| {event_name}" + sep
for name in self.collectors:
for collector in self.collectors[name]:
out += ' ' * (len(name) - 1)
if event in collector.events():
out += "X"
else:
out += "-"
out += sep
out += '\n'
return out
def event_table(self, **_args: Any) -> Any:
"""Print out event table in Markdown format."""
return Markdown(self.event_table_text(**_args))
def __repr__(self) -> str:
return self.event_table_text()
def _repr_markdown_(self) -> str:
return self.event_table_text(args=True, color=True)
###Output
_____no_output_____
###Markdown
End of Excursion
###Code
s = StatisticalDebugger()
with s.collect('PASS'):
remove_html_markup("abc")
with s.collect('PASS'):
remove_html_markup('<b>abc</b>')
with s.collect('FAIL'):
remove_html_markup('"abc"')
s.event_table(args=True)
quiz("How many lines are executed in the failing run only?",
[
"One",
"Two",
"Three"
], 'len([12])')
###Output
_____no_output_____
###Markdown
Indeed, Line 12 executed in the failing run only would be a correlation to look for. Collecting Passing and Failing RunsWhile our `StatisticalDebugger` class allows arbitrary outcomes, we are typically only interested in two outcomes, namely _passing_ vs. _failing_ runs. We therefore introduce a specialized `DifferenceDebugger` class that provides customized methods to collect and access passing and failing runs.
###Code
class DifferenceDebugger(StatisticalDebugger):
"""A class to collect events for passing and failing outcomes."""
PASS = 'PASS'
FAIL = 'FAIL'
def collect_pass(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for passing runs."""
return self.collect(self.PASS, *args, **kwargs)
def collect_fail(self, *args: Any, **kwargs: Any) -> Collector:
"""Return a collector for failing runs."""
return self.collect(self.FAIL, *args, **kwargs)
def pass_collectors(self) -> List[Collector]:
return self.collectors[self.PASS]
def fail_collectors(self) -> List[Collector]:
return self.collectors[self.FAIL]
def all_fail_events(self) -> Set[Any]:
"""Return all events observed in failing runs."""
return self.all_events(self.FAIL)
def all_pass_events(self) -> Set[Any]:
"""Return all events observed in passing runs."""
return self.all_events(self.PASS)
def only_fail_events(self) -> Set[Any]:
"""Return all events observed only in failing runs."""
return self.all_fail_events() - self.all_pass_events()
def only_pass_events(self) -> Set[Any]:
"""Return all events observed only in passing runs."""
return self.all_pass_events() - self.all_fail_events()
###Output
_____no_output_____
###Markdown
We can use `DifferenceDebugger` just as a `StatisticalDebugger`:
###Code
# ignore
T1 = TypeVar('T1', bound='DifferenceDebugger')
def test_debugger_html_simple(debugger: T1) -> T1:
with debugger.collect_pass():
remove_html_markup('abc')
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
return debugger
###Output
_____no_output_____
###Markdown
However, since the outcome of tests may not always be predetermined, we provide a simpler interface for tests that can fail (= raise an exception) or pass (not raise an exception).
###Code
class DifferenceDebugger(DifferenceDebugger):
def __enter__(self) -> Any:
"""Enter a `with` block. Collect coverage and outcome;
classify as FAIL if the block raises an exception,
and PASS if it does not.
"""
self.collector = self.collector_class()
self.collector.add_items_to_ignore([self.__class__])
self.collector.__enter__()
return self
def __exit__(self, exc_tp: Type, exc_value: BaseException,
exc_traceback: TracebackType) -> Optional[bool]:
"""Exit the `with` block."""
status = self.collector.__exit__(exc_tp, exc_value, exc_traceback)
if status is None:
pass
else:
return False # Internal error; re-raise exception
if exc_tp is None:
outcome = self.PASS
else:
outcome = self.FAIL
self.add_collector(outcome, self.collector)
return True # Ignore exception, if any
###Output
_____no_output_____
###Markdown
Using this interface, we can rewrite `test_debugger_html()`:
###Code
# ignore
T2 = TypeVar('T2', bound='DifferenceDebugger')
def test_debugger_html(debugger: T2) -> T2:
with debugger:
remove_html_markup('abc')
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # Mark test as failing
return debugger
test_debugger_html(DifferenceDebugger())
###Output
_____no_output_____
###Markdown
Analyzing EventsLet us now focus on _analyzing_ events collected. Since events come back as _sets_, we can compute _unions_ and _differences_ between these sets. For instance, we can compute which lines were executed in _any_ of the passing runs of `test_debugger_html()`, above:
###Code
debugger = test_debugger_html(DifferenceDebugger())
pass_1_events = debugger.pass_collectors()[0].events()
pass_2_events = debugger.pass_collectors()[1].events()
in_any_pass = pass_1_events | pass_2_events
in_any_pass
###Output
_____no_output_____
###Markdown
Likewise, we can determine which lines were _only_ executed in the failing run:
###Code
fail_events = debugger.fail_collectors()[0].events()
only_in_fail = fail_events - in_any_pass
only_in_fail
###Output
_____no_output_____
###Markdown
And we see that the "failing" run is characterized by processing quotes:
###Code
code_with_coverage(remove_html_markup, only_in_fail)
debugger = test_debugger_html(DifferenceDebugger())
debugger.all_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the failing run:
###Code
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
These are the lines executed only in the passing runs:
###Code
debugger.only_pass_events()
###Output
_____no_output_____
###Markdown
Again, having these lines individually is neat, but things become much more interesting if we can see the associated code lines just as well. That's what we will do in the next section. Visualizing DifferencesTo show correlations of line coverage in context, we introduce a number of _visualization_ techniques that _highlight_ code with different colors. Discrete SpectrumThe first idea is to use a _discrete_ spectrum of three colors:* _red_ for code executed in failing runs only* _green_ for code executed in passing runs only* _yellow_ for code executed in both passing and failing runs.Code that is not executed stays unhighlighted. We first introduce an abstract class `SpectrumDebugger` that provides the essential functions. `suspiciousness()` returns a value between 0 and 1 indicating the suspiciousness of the given event - or `None` if unknown.
###Code
class SpectrumDebugger(DifferenceDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value in the range [0, 1.0]
for the given event, or `None` if unknown.
To be overloaded in subclasses.
"""
return None
###Output
_____no_output_____
###Markdown
The `tooltip()` and `percentage()` methods convert the suspiciousness into a human-readable form.
###Code
class SpectrumDebugger(SpectrumDebugger):
def tooltip(self, event: Any) -> str:
"""
Return a tooltip for the given event (default: percentage).
To be overloaded in subclasses.
"""
return self.percentage(event)
def percentage(self, event: Any) -> str:
"""
Return the suspiciousness for the given event as percentage string.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is not None:
return str(int(suspiciousness * 100)).rjust(3) + '%'
else:
return ' ' * len('100%')
###Output
_____no_output_____
###Markdown
The `code()` method takes a function and shows each of its source code lines using the given spectrum, using HTML markup:
###Code
class SpectrumDebugger(SpectrumDebugger):
def code(self, functions: Optional[Set[Callable]] = None, *,
color: bool = False, suspiciousness: bool = False,
line_numbers: bool = True) -> str:
"""
Return a listing of `functions` (default: covered functions).
If `color` is True, render as HTML, using suspiciousness colors.
If `suspiciousness` is True, include suspiciousness values.
If `line_numbers` is True (default), include line numbers.
"""
if not functions:
functions = self.covered_functions()
out = ""
seen = set()
for function in functions:
source_lines, starting_line_number = \
inspect.getsourcelines(function)
if (function.__name__, starting_line_number) in seen:
continue
seen.add((function.__name__, starting_line_number))
if out:
out += '\n'
if color:
out += '<p/>'
line_number = starting_line_number
for line in source_lines:
if color:
line = html.escape(line)
if line.strip() == '':
line = ' '
location = (function.__name__, line_number)
location_suspiciousness = self.suspiciousness(location)
if location_suspiciousness is not None:
tooltip = f"Line {line_number}: {self.tooltip(location)}"
else:
tooltip = f"Line {line_number}: not executed"
if suspiciousness:
line = self.percentage(location) + ' ' + line
if line_numbers:
line = str(line_number).rjust(4) + ' ' + line
line_color = self.color(location)
if color and line_color:
line = f'''<pre style="background-color:{line_color}"
title="{tooltip}">{line.rstrip()}</pre>'''
elif color:
line = f'<pre title="{tooltip}">{line}</pre>'
else:
line = line.rstrip()
out += line + '\n'
line_number += 1
return out
###Output
_____no_output_____
###Markdown
We introduce a few helper methods to visualize the code with colors in various forms.
###Code
class SpectrumDebugger(SpectrumDebugger):
def _repr_html_(self) -> str:
"""When output in Jupyter, visualize as HTML"""
return self.code(color=True)
def __str__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
def __repr__(self) -> str:
"""Show code as string"""
return self.code(color=False, suspiciousness=True)
###Output
_____no_output_____
###Markdown
So far, however, central methods like `suspiciousness()` or `color()` were abstract – that is, to be defined in subclasses. Our `DiscreteSpectrumDebugger` subclass provides concrete implementations for these, with `color()` returning one of the three colors depending on the line number:
###Code
class DiscreteSpectrumDebugger(SpectrumDebugger):
"""Visualize differences between executions using three discrete colors"""
def suspiciousness(self, event: Any) -> Optional[float]:
"""
Return a suspiciousness value [0, 1.0]
for the given event, or `None` if unknown.
"""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return 0.5
elif event in failing:
return 1.0
elif event in passing:
return 0.0
else:
return None
def color(self, event: Any) -> Optional[str]:
"""
Return a HTML color for the given event.
"""
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
if suspiciousness > 0.8:
return 'mistyrose'
if suspiciousness >= 0.5:
return 'lightyellow'
return 'honeydew'
def tooltip(self, event: Any) -> str:
"""Return a tooltip for the given event."""
passing = self.all_pass_events()
failing = self.all_fail_events()
if event in passing and event in failing:
return "in passing and failing runs"
elif event in failing:
return "only in failing runs"
elif event in passing:
return "only in passing runs"
else:
return "never"
###Output
_____no_output_____
###Markdown
This is how the `only_pass_events()` and `only_fail_events()` sets look like when visualized with code. The "culprit" line is well highlighted:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
debugger
###Output
_____no_output_____
###Markdown
We can clearly see that the failure is correlated with the presence of quotes in the input string (which is an important hint!). But does this also show us _immediately_ where the defect to be fixed is?
###Code
quiz("Does the line `quote = not quote` actually contain the defect?",
[
"Yes, it should be fixed",
"No, the defect is elsewhere"
], '164 * 2 % 326')
###Output
_____no_output_____
###Markdown
Indeed, it is the _governing condition_ that is wrong – that is, the condition that caused Line 12 to be executed in the first place. In order to fix a program, we have to find a location that1. _causes_ the failure (i.e., it can be changed to make the failure go away); and2. is a _defect_ (i.e., contains an error).In our example above, the highlighted code line is a _symptom_ for the error. To some extent, it is also a _cause_, since, say, commenting it out would also resolve the given failure, at the cost of causing other failures. However, the preceding condition also is a cause, as is the presence of quotes in the input.Only one of these also is a _defect_, though, and that is the preceding condition. Hence, while correlations can provide important hints, they do not necessarily locate defects. For those of us who may not have color HTML output ready, simply printing the debugger lists suspiciousness values as percentages.
###Code
print(debugger)
###Output
1 50% def remove_html_markup(s): # type: ignore
2 50% tag = False
3 50% quote = False
4 50% out = ""
5
6 50% for c in s:
7 50% if c == '<' and not quote:
8 0% tag = True
9 50% elif c == '>' and not quote:
10 0% tag = False
11 50% elif c == '"' or c == "'" and tag:
12 100% quote = not quote
13 50% elif not tag:
14 50% out = out + c
15
16 50% return out
###Markdown
Continuous SpectrumThe criterion that an event should _only_ occur in failing runs (and not in passing runs) can be too aggressive. In particular, if we have another run that executes the "culprit" lines, but does _not_ fail, our "only in fail" criterion will no longer be helpful. Here is an example. The input```htmltext```will trigger the "culprit" line```pythonquote = not quote```but actually produce an output where the tags are properly stripped:
###Code
remove_html_markup('<b color="blue">text</b>')
###Output
_____no_output_____
###Markdown
As a consequence, we no longer have lines that are being executed only in failing runs:
###Code
debugger = test_debugger_html(DiscreteSpectrumDebugger())
with debugger.collect_pass():
remove_html_markup('<b link="blue"></b>')
debugger.only_fail_events()
###Output
_____no_output_____
###Markdown
In our spectrum output, the effect now is that the "culprit" line is as yellow as all others.
###Code
debugger
###Output
_____no_output_____
###Markdown
We therefore introduce a different method for highlighting lines, based on their _relative_ occurrence with respect to all runs: If a line has been _mostly_ executed in failing runs, its color should shift towards red; if a line has been _mostly_ executed in passing runs, its color should shift towards green. This _continuous spectrum_ has been introduced by the seminal _Tarantula_ tool \cite{Jones2002}. In Tarantula, the color _hue_ for each line is defined as follows: $$\textit{color hue}(\textit{line}) = \textit{low color(red)} + \frac{\%\textit{passed}(\textit{line})}{\%\textit{passed}(\textit{line}) + \%\textit{failed}(\textit{line})} \times \textit{color range}$$ Here, `%passed` and `%failed` denote the percentage at which a line has been executed in passing and failing runs, respectively. A hue of 0.0 stands for red, a hue of 1.0 stands for green, and a hue of 0.5 stands for equal fractions of red and green, yielding yellow. We can implement these measures right away as methods in a new `ContinuousSpectrumDebugger` class:
###Code
class ContinuousSpectrumDebugger(DiscreteSpectrumDebugger):
"""Visualize differences between executions using a color spectrum"""
def collectors_with_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that observed the given event.
"""
all_runs = self.collectors[category]
collectors_with_event = set(collector for collector in all_runs
if event in collector.events())
return collectors_with_event
def collectors_without_event(self, event: Any, category: str) -> Set[Collector]:
"""
Return all collectors in a category
that did not observe the given event.
"""
all_runs = self.collectors[category]
collectors_without_event = set(collector for collector in all_runs
if event not in collector.events())
return collectors_without_event
def event_fraction(self, event: Any, category: str) -> float:
if category not in self.collectors:
return 0.0
all_collectors = self.collectors[category]
collectors_with_event = self.collectors_with_event(event, category)
fraction = len(collectors_with_event) / len(all_collectors)
# print(f"%{category}({event}) = {fraction}")
return fraction
def passed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.PASS)
def failed_fraction(self, event: Any) -> float:
return self.event_fraction(event, self.FAIL)
def hue(self, event: Any) -> Optional[float]:
"""Return a color hue from 0.0 (red) to 1.0 (green)."""
passed = self.passed_fraction(event)
failed = self.failed_fraction(event)
if passed + failed > 0:
return passed / (passed + failed)
else:
return None
###Output
_____no_output_____
###Markdown
Having a continuous hue also implies a continuous suspiciousness and associated tooltips:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def suspiciousness(self, event: Any) -> Optional[float]:
hue = self.hue(event)
if hue is None:
return None
return 1 - hue
def tooltip(self, event: Any) -> str:
return self.percentage(event)
###Output
_____no_output_____
###Markdown
The hue for lines executed only in failing runs is (deep) red, as expected:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 12) 0.0
###Markdown
Likewise, the hue for lines executed in passing runs is (deep) green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.hue(location))
###Output
('remove_html_markup', 10) 1.0
('remove_html_markup', 8) 1.0
###Markdown
The Tarantula tool not only sets the hue for a line, but also uses _brightness_ as measure for support – that is, how often was the line executed at all. The brighter a line, the stronger the correlation with a passing or failing outcome. The brightness is defined as follows: $$\textit{brightness}(line) = \max(\%\textit{passed}(\textit{line}), \%\textit{failed}(\textit{line}))$$ and it is easily implemented, too:
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def brightness(self, event: Any) -> float:
return max(self.passed_fraction(event), self.failed_fraction(event))
###Output
_____no_output_____
###Markdown
Our single "only in fail" line has a brightness of 1.0 (the maximum).
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger())
for location in debugger.only_fail_events():
print(location, debugger.brightness(location))
###Output
('remove_html_markup', 12) 1.0
###Markdown
With this, we can now define a color for each line. To this end, we override the (previously discrete) `color()` method such that it returns a color specification giving hue and brightness. We use the HTML format `hsl(hue, saturation, lightness)` where the hue is given as a value between 0 and 360 (0 is red, 120 is green) and saturation and lightness are provided as percentages.
###Code
class ContinuousSpectrumDebugger(ContinuousSpectrumDebugger):
def color(self, event: Any) -> Optional[str]:
hue = self.hue(event)
if hue is None:
return None
saturation = self.brightness(event)
# HSL color values are specified with:
# hsl(hue, saturation, lightness).
return f"hsl({hue * 120}, {saturation * 100}%, 80%)"
debugger = test_debugger_html(ContinuousSpectrumDebugger())
###Output
_____no_output_____
###Markdown
Lines executed only in failing runs are still shown in red:
###Code
for location in debugger.only_fail_events():
print(location, debugger.color(location))
###Output
('remove_html_markup', 12) hsl(0.0, 100.0%, 80%)
###Markdown
... whereas lines executed only in passing runs are still shown in green:
###Code
for location in debugger.only_pass_events():
print(location, debugger.color(location))
debugger
###Output
_____no_output_____
###Markdown
What happens with our `quote = not quote` "culprit" line if it is executed in passing runs, too?
###Code
with debugger.collect_pass():
out = remove_html_markup('<b link="blue"></b>')
quiz('In which color will the `quote = not quote` "culprit" line '
'be shown after executing the above code?',
[
'<span style="background-color: hsl(120.0, 50.0%, 80%)">Green</span>',
'<span style="background-color: hsl(60.0, 100.0%, 80%)">Yellow</span>',
'<span style="background-color: hsl(30.0, 100.0%, 80%)">Orange</span>',
'<span style="background-color: hsl(0.0, 100.0%, 80%)">Red</span>'
], '999 // 333')
###Output
_____no_output_____
###Markdown
We see that it still is shown with an orange-red tint.
###Code
debugger
###Output
_____no_output_____
###Markdown
Here's another example, coming right from the Tarantula paper. The `middle()` function takes three numbers `x`, `y`, and `z`, and returns the one that is neither the minimum nor the maximum of the three:
###Code
def middle(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return y
else:
if x > y:
return y
elif x > z:
return x
return z
middle(1, 2, 3)
###Output
_____no_output_____
###Markdown
Unfortunately, `middle()` can fail:
###Code
middle(2, 1, 3)
###Output
_____no_output_____
###Markdown
Let is see whether we can find the bug with a few additional test cases:
###Code
# ignore
T3 = TypeVar('T3', bound='DifferenceDebugger')
def test_debugger_middle(debugger: T3) -> T3:
with debugger.collect_pass():
middle(3, 3, 5)
with debugger.collect_pass():
middle(1, 2, 3)
with debugger.collect_pass():
middle(3, 2, 1)
with debugger.collect_pass():
middle(5, 5, 5)
with debugger.collect_pass():
middle(5, 3, 4)
with debugger.collect_fail():
middle(2, 1, 3)
return debugger
###Output
_____no_output_____
###Markdown
Note that in order to collect data from multiple function invocations, you need to have a separate `with` clause for every invocation. The following will _not_ work correctly:```python with debugger.collect_pass(): middle(3, 3, 5) middle(1, 2, 3) ...```
###Code
debugger = test_debugger_middle(ContinuousSpectrumDebugger())
debugger.event_table(args=True)
###Output
_____no_output_____
###Markdown
Here comes the visualization. We see that the `return y` line is the culprit here – and actually also the one to be fixed.
###Code
debugger
quiz("Which of the above lines should be fixed?",
[
'<span style="background-color: hsl(45.0, 100%, 80%)">Line 3: `elif x < y`</span>',
'<span style="background-color: hsl(34.28571428571429, 100.0%, 80%)">Line 5: `elif x < z`</span>',
'<span style="background-color: hsl(20.000000000000004, 100.0%, 80%)">Line 6: `return y`</span>',
'<span style="background-color: hsl(120.0, 20.0%, 80%)">Line 9: `return y`</span>',
], r'len(" middle ".strip()[:3])')
###Output
_____no_output_____
###Markdown
Indeed, in the `middle()` example, the "reddest" line is also the one to be fixed. Here is the fixed version:
###Code
def middle_fixed(x, y, z): # type: ignore
if y < z:
if x < y:
return y
elif x < z:
return x
else:
if x > y:
return y
elif x > z:
return x
return z
middle_fixed(2, 1, 3)
###Output
_____no_output_____
###Markdown
Ranking Lines by SuspiciousnessIn a large program, there can be several locations (and events) that could be flagged as suspicious. It suffices that some large code block of say, 1,000 lines, is mostly executed in failing runs, and then all of this code block will be visualized in some shade of red. To further highlight the "most suspicious" events, one idea is to use a _ranking_ – that is, coming up with a list of events where those events most correlated with failures would be shown at the top. The programmer would then examine these events one by one and proceed down the list. We will show how this works for two "correlation" metrics – first the _Tarantula_ metric, as introduced above, and then the _Ochiai_ metric, which has shown to be one of the best "ranking" metrics. We introduce a base class `RankingDebugger` with an abstract method `suspiciousness()` to be overloaded in subclasses. The method `rank()` returns a list of all events observed, sorted by suspiciousness, highest first.
###Code
class RankingDebugger(DiscreteSpectrumDebugger):
"""Rank events by their suspiciousness"""
def rank(self) -> List[Any]:
"""Return a list of events, sorted by suspiciousness, highest first."""
def susp(event: Any) -> float:
suspiciousness = self.suspiciousness(event)
assert suspiciousness is not None
return suspiciousness
events = list(self.all_events())
events.sort(key=susp, reverse=True)
return events
def __repr__(self) -> str:
return repr(self.rank())
###Output
_____no_output_____
###Markdown
The Tarantula MetricWe can use the Tarantula metric to sort lines according to their suspiciousness. The "redder" a line (a hue of 0.0), the more suspicious it is. We can simply define $$\textit{suspiciousness}_\textit{tarantula}(\textit{event}) = 1 - \textit{color hue}(\textit{event})$$ where $\textit{color hue}$ is as defined above. This is exactly the `suspiciousness()` function as already implemented in our `ContinuousSpectrumDebugger`. We introduce the `TarantulaDebugger` class, inheriting visualization capabilities from the `ContinuousSpectrumDebugger` class as well as the suspiciousness features from the `RankingDebugger` class.
###Code
class TarantulaDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Tarantula metric for suspiciousness"""
pass
###Output
_____no_output_____
###Markdown
Let us list `remove_html_markup()` with highlighted lines again:
###Code
tarantula_html = test_debugger_html(TarantulaDebugger())
tarantula_html
###Output
_____no_output_____
###Markdown
Here's our ranking of lines, from most suspicious to least suspicious:
###Code
tarantula_html.rank()
tarantula_html.suspiciousness(tarantula_html.rank()[0])
###Output
_____no_output_____
###Markdown
We see that the first line in the list is indeed the most suspicious; the two "green" lines come at the very end. For the `middle()` function, we also obtain a ranking from "reddest" to "greenest".
###Code
tarantula_middle = test_debugger_middle(TarantulaDebugger())
tarantula_middle
tarantula_middle.rank()
tarantula_middle.suspiciousness(tarantula_middle.rank()[0])
###Output
_____no_output_____
###Markdown
The Ochiai MetricThe _Ochiai_ Metric \cite{Ochiai1957} first introduced in the biology domain \cite{daSilvaMeyer2004} and later applied for fault localization by Abreu et al. \cite{Abreu2009}, is defined as follows: $$\textit{suspiciousness}_\textit{ochiai} = \frac{\textit{failed}(\textit{event})}{\sqrt{\bigl(\textit{failed}(\textit{event}) + \textit{not-in-failed}(\textit{event})\bigr)\times\bigl(\textit{failed}(\textit{event}) + \textit{passed}(\textit{event})\bigr)}}$$ where* $\textit{failed}(\textit{event})$ is the number of times the event occurred in _failing_ runs* $\textit{not-in-failed}(\textit{event})$ is the number of times the event did _not_ occur in failing runs* $\textit{passed}(\textit{event})$ is the number of times the event occurred in _passing_ runs.We can easily implement this formula:
###Code
import math
class OchiaiDebugger(ContinuousSpectrumDebugger, RankingDebugger):
"""Spectrum-based Debugger using the Ochiai metric for suspiciousness"""
def suspiciousness(self, event: Any) -> Optional[float]:
failed = len(self.collectors_with_event(event, self.FAIL))
not_in_failed = len(self.collectors_without_event(event, self.FAIL))
passed = len(self.collectors_with_event(event, self.PASS))
try:
return failed / math.sqrt((failed + not_in_failed) * (failed + passed))
except ZeroDivisionError:
return None
def hue(self, event: Any) -> Optional[float]:
suspiciousness = self.suspiciousness(event)
if suspiciousness is None:
return None
return 1 - suspiciousness
###Output
_____no_output_____
###Markdown
Applied on the `remove_html_markup()` function, the individual suspiciousness scores differ from Tarantula. However, we obtain a very similar visualization, and the same ranking.
###Code
ochiai_html = test_debugger_html(OchiaiDebugger())
ochiai_html
ochiai_html.rank()
ochiai_html.suspiciousness(ochiai_html.rank()[0])
###Output
_____no_output_____
###Markdown
The same observations also apply for the `middle()` function.
###Code
ochiai_middle = test_debugger_middle(OchiaiDebugger())
ochiai_middle
ochiai_middle.rank()
ochiai_middle.suspiciousness(ochiai_middle.rank()[0])
###Output
_____no_output_____
###Markdown
How Useful is Ranking?So, which metric is better? The standard method to evaluate such rankings is to determine a _ground truth_ – that is, the set of locations that eventually are fixed – and to check at which point in the ranking any such location occurs – the earlier, the better. In our `remove_html_markup()` and `middle()` examples, both the Tarantula and the Ochiai metric perform flawlessly, as the "culprit" line is always ranked at the top. However, this need not always be the case; the exact performance depends on the nature of the code and the observed runs. (Also, the question of whether there always is exactly one possible location where the program can be fixed is open for discussion.) You will be surprised that over time, _several dozen_ metrics have been proposed \cite{Wong2016}, each performing somewhat better or somewhat worse depending on which benchmark they were applied on. The two metrics discussed above each have their merits – the Tarantula metric was among the first such metrics, and the Ochiai metric is generally shown to be among the most effective ones \cite{Abreu2009}. While rankings can be easily _evaluated_, it is not necessarily clear whether and how much they serve programmers. As stated above, the assumption of rankings is that developers examine one potentially defective statement after another until they find the actually defective one. However, in a series of human studies with developers, Parnin and Orso \cite{Parnin2011} found that this assumption may not hold:> It is unclear whether developers can actually determine the faulty nature of a statement by simply looking at it, without any additional information (e.g., the state of the program when the statement was executed or the statements that were executed before or after that one).In their study, they found that rankings could help completing a task faster, but this effect was limited to experienced developers and simpler code. Artificially changing the rank of faulty statements had little to no effect, implying that developers would not strictly follow the ranked list of statements, but rather search through the code to understand it. At this point, a _visualization_ as in the Tarantula tool can be helpful to programmers as it _guides_ the search, but a _ranking_ that _defines_ where to search may be less useful. Having said that, ranking has its merits – notably as it comes to informing _automated_ debugging techniques. In the [chapter on program repair](Repairer.ipynb), we will see how ranked lists of potentially faulty statements tell automated repair techniques where to try to repair the program first. And once such a repair is successful, we have a very strong indication on where and how the program could be fixed! Using Large Test Suites In fault localization, the larger and the more thorough the test suite, the higher the precision. Let us try out what happens if we extend the `middle()` test suite with additional test cases. The function `middle_testcase()` returns a random input for `middle()`:
###Code
import random
def middle_testcase() -> Tuple[int, int, int]:
x = random.randrange(10)
y = random.randrange(10)
z = random.randrange(10)
return x, y, z
[middle_testcase() for i in range(5)]
###Output
_____no_output_____
###Markdown
The function `middle_test()` simply checks if `middle()` operates correctly – by placing `x`, `y`, and `z` in a list, sorting it, and checking the middle argument. If `middle()` fails, `middle_test()` raises an exception.
###Code
def middle_test(x: int, y: int, z: int) -> None:
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
middle_test(4, 5, 6)
from ExpectError import ExpectError
with ExpectError():
middle_test(2, 1, 3)
###Output
Traceback (most recent call last):
File "<ipython-input-1-ae2957225406>", line 2, in <module>
middle_test(2, 1, 3)
File "<ipython-input-1-e1407680b9f2>", line 3, in middle_test
assert m == sorted([x, y, z])[1]
AssertionError (expected)
###Markdown
The function `middle_passing_testcase()` searches and returns a triple `x`, `y`, `z` that causes `middle_test()` to pass.
###Code
def middle_passing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
return x, y, z
except AssertionError:
pass
(x, y, z) = middle_passing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(2, 6, 7) = 6
###Markdown
The function `middle_failing_testcase()` does the same; but its triple `x`, `y`, `z` causes `middle_test()` to fail.
###Code
def middle_failing_testcase() -> Tuple[int, int, int]:
while True:
try:
x, y, z = middle_testcase()
middle_test(x, y, z)
except AssertionError:
return x, y, z
(x, y, z) = middle_failing_testcase()
m = middle(x, y, z)
print(f"middle({x}, {y}, {z}) = {m}")
###Output
middle(5, 4, 6) = 4
###Markdown
With these, we can define two sets of test cases, each with 100 inputs.
###Code
MIDDLE_TESTS = 100
MIDDLE_PASSING_TESTCASES = [middle_passing_testcase()
for i in range(MIDDLE_TESTS)]
MIDDLE_FAILING_TESTCASES = [middle_failing_testcase()
for i in range(MIDDLE_TESTS)]
###Output
_____no_output_____
###Markdown
Let us run the `OchiaiDebugger` with these two test sets.
###Code
ochiai_middle = OchiaiDebugger()
for x, y, z in MIDDLE_PASSING_TESTCASES:
with ochiai_middle.collect_pass():
middle(x, y, z)
for x, y, z in MIDDLE_FAILING_TESTCASES:
with ochiai_middle.collect_fail():
middle(x, y, z)
ochiai_middle
###Output
_____no_output_____
###Markdown
We see that the "culprit" line is still the most likely to be fixed, but the two conditions leading to the error (`x < y` and `x < z`) are also listed as potentially faulty. That is because the error might also be fixed be changing these conditions – although this would result in a more complex fix. Other Events besides CoverageWe close this chapter with two directions for further thought. If you wondered why in the above code, we were mostly talking about `events` rather than lines covered, that is because our framework allows for tracking arbitrary events, not just coverage. In fact, any data item a collector can extract from the execution can be used for correlation analysis. (It may not be so easily visualized, though.) Here's an example. We define a `ValueCollector` class that collects pairs of (local) variables and their values during execution. Its `events()` method then returns the set of all these pairs.
###Code
class ValueCollector(Collector):
""""A class to collect local variables and their values."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.vars: Set[str] = set()
def collect(self, frame: FrameType, event: str, arg: Any) -> None:
local_vars = frame.f_locals
for var in local_vars:
value = local_vars[var]
self.vars.add(f"{var} = {repr(value)}")
def events(self) -> Set[str]:
"""A set of (variable, value) pairs observed"""
return self.vars
###Output
_____no_output_____
###Markdown
If we apply this collector on our set of HTML test cases, these are all the events that we obtain – essentially all variables and all values ever seen:
###Code
debugger = test_debugger_html(ContinuousSpectrumDebugger(ValueCollector))
for event in debugger.all_events():
print(event)
###Output
c = 'c'
c = '>'
c = '"'
tag = False
c = 'b'
c = '/'
out = 'a'
out = ''
tag = True
c = '<'
quote = True
s = '"abc"'
s = '<b>abc</b>'
out = 'ab'
s = 'abc'
quote = False
c = 'a'
out = 'abc'
###Markdown
However, some of these events only occur in the failing run:
###Code
for event in debugger.only_fail_events():
print(event)
###Output
s = '"abc"'
c = '"'
quote = True
###Markdown
Some of these differences are spurious – the string `"abc"` (with quotes) only occurs in the failing run – but others, such as `quote` being True and `c` containing a single quote are actually relevant for explaining when the failure comes to be. We can even visualize the suspiciousness of the individual events, setting the (so far undiscussed) `color` flag for producing an event table:
###Code
debugger.event_table(color=True, args=True)
###Output
_____no_output_____
###Markdown
There are many ways one can continue from here.* Rather than checking for concrete values, one could check for more _abstract properties_, for instance – what is the sign of the value? What is the length of the string? * One could check for specifics of the _control flow_ – is the loop taken? How many times?* One could check for specifics of the _information flow_ – which values flow from one variable to another?There are lots of properties that all could be related to failures – and if we happen to check for the right one, we may obtain a much crisper definition of what causes the failure. We will come up with more ideas on properties to check as it comes to [mining specifications](SpecificationMining,ipynb). Training ClassifiersThe metrics we have discussed so far are pretty _generic_ – that is, they are fixed no matter how the actual event space is structured. The field of _machine learning_ has come up with techniques that learn _classifiers_ from a given set of data – classifiers that are trained from labeled data and then can predict labels for new data sets. In our case, the labels are test outcomes (PASS and FAIL), whereas the data would be features of the events observed. A classifier by itself is not immediately useful for debugging (although it could predict whether future inputs will fail or not). Some classifiers, however, have great _diagnostic_ quality; that is, they can _explain_ how their classification comes to be. [Decision trees](https://scikit-learn.org/stable/modules/tree.html) fall into this very category. A decision tree contains a number of _nodes_, each one associated with a predicate. Depending on whether the predicate is true or false, we follow the given "true" or "false" branch to end up in the next node, which again contains a predicate. Eventually, we end up in the outcome predicted by the tree. The neat thing is that the node predicates actually give important hints on the circumstances that are _most relevant_ for deciding the outcome. Let us illustrate this with an example. We build a class `ClassifyingDebugger` that trains a decision tree from the events collected. To this end, we need to set up our input data such that it can be fed into a classifier. We start with identifying our _samples_ (runs) and the respective _labels_ (outcomes). All values have to be encoded into numerical values.
###Code
class ClassifyingDebugger(DifferenceDebugger):
"""A debugger implementing a decision tree for events"""
PASS_VALUE = +1.0
FAIL_VALUE = -1.0
def samples(self) -> Dict[str, float]:
samples = {}
for collector in self.pass_collectors():
samples[collector.id()] = self.PASS_VALUE
for collector in debugger.fail_collectors():
samples[collector.id()] = self.FAIL_VALUE
return samples
debugger = test_debugger_html(ClassifyingDebugger())
debugger.samples()
###Output
_____no_output_____
###Markdown
Next, we identify the _features_, which in our case is the set of lines executed in each sample:
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def features(self) -> Dict[str, Any]:
features = {}
for collector in debugger.pass_collectors():
features[collector.id()] = collector.events()
for collector in debugger.fail_collectors():
features[collector.id()] = collector.events()
return features
debugger = test_debugger_html(ClassifyingDebugger())
debugger.features()
###Output
_____no_output_____
###Markdown
All our features have names, which must be strings.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def feature_names(self) -> List[str]:
return [repr(feature) for feature in self.all_events()]
debugger = test_debugger_html(ClassifyingDebugger())
debugger.feature_names()
###Output
_____no_output_____
###Markdown
Next, we define the _shape_ for an individual sample, which is a value of +1 or -1 for each feature seen (i.e., +1 if the line was covered, -1 if not).
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def shape(self, sample: str) -> List[float]:
x = []
features = self.features()
for f in self.all_events():
if f in features[sample]:
x += [+1.0]
else:
x += [-1.0]
return x
debugger = test_debugger_html(ClassifyingDebugger())
debugger.shape("remove_html_markup(s='abc')")
###Output
_____no_output_____
###Markdown
Our input X for the classifier now is a list of such shapes, one for each sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def X(self) -> List[List[float]]:
X = []
samples = self.samples()
for key in samples:
X += [self.shape(key)]
return X
debugger = test_debugger_html(ClassifyingDebugger())
debugger.X()
###Output
_____no_output_____
###Markdown
Our input Y for the classifier, in contrast, is the list of labels, again indexed by sample.
###Code
class ClassifyingDebugger(ClassifyingDebugger):
def Y(self) -> List[float]:
Y = []
samples = self.samples()
for key in samples:
Y += [samples[key]]
return Y
debugger = test_debugger_html(ClassifyingDebugger())
debugger.Y()
###Output
_____no_output_____
###Markdown
We now have all our data ready to be fit into a tree classifier. The method `classifier()` creates and returns the (tree) classifier for the observed runs.
###Code
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def classifier(self) -> DecisionTreeClassifier:
classifier = DecisionTreeClassifier()
classifier = classifier.fit(self.X(), self.Y())
return classifier
###Output
_____no_output_____
###Markdown
We define a special method to show classifiers:
###Code
import graphviz
class ClassifyingDebugger(ClassifyingDebugger):
def show_classifier(self, classifier: DecisionTreeClassifier) -> Any:
dot_data = export_graphviz(classifier, out_file=None,
filled=False, rounded=True,
feature_names=self.feature_names(),
class_names=["FAIL", "PASS"],
label='none',
node_ids=False,
impurity=False,
proportion=True,
special_characters=True)
return graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
This is the tree we get for our `remove_html_markup()` tests. The top predicate is whether the "culprit" line was executed (-1 means no, +1 means yes). If not (-1), the outcome is PASS. Otherwise, the outcome is TRUE.
###Code
debugger = test_debugger_html(ClassifyingDebugger())
classifier = debugger.classifier()
debugger.show_classifier(classifier)
###Output
_____no_output_____
###Markdown
We can even use our classifier to predict the outcome of additional runs. If, for instance, we execute all lines except for, say, Line 7, 9, and 11, our tree classifier would predict failure – because the "culprit" line 12 is executed.
###Code
classifier.predict([[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1]])
###Output
_____no_output_____
###Markdown
Again, there are many ways to continue from here. Which events should we train the classifier from? How do classifiers compare in their performance and diagnostic quality? There are lots of possibilities left to explore, and we only begin to realize the potential for automated debugging. SynopsisThis chapter introduces classes and techniques for _statistical debugging_ – that is, correlating specific events, such as lines covered, with passing and failing outcomes.To make use of the code in this chapter, use one of the provided `StatisticalDebugger` subclasses such as `TarantulaDebugger` or `OchiaiDebugger`. Both are instantiated with a `Collector` denoting the type of events you want to correlate outcomes with. The default `CoverageCollector`, collecting line coverage. Collecting Events from CallsTo collect events from calls that are labeled manually, use
###Code
debugger = TarantulaDebugger()
with debugger.collect_pass():
remove_html_markup("abc")
with debugger.collect_pass():
remove_html_markup('<b>abc</b>')
with debugger.collect_fail():
remove_html_markup('"abc"')
###Output
_____no_output_____
###Markdown
Within each `with` block, the _first function call_ is collected and tracked for coverage. (Note that _only_ the first call is tracked.) Collecting Events from TestsTo collect events from _tests_ that use exceptions to indicate failure, use the simpler `with` form:
###Code
debugger = TarantulaDebugger()
with debugger:
remove_html_markup("abc")
with debugger:
remove_html_markup('<b>abc</b>')
with debugger:
remove_html_markup('"abc"')
assert False # raise an exception
###Output
_____no_output_____
###Markdown
`with` blocks that raise an exception will be classified as failing, blocks that do not will be classified as passing. Note that exceptions raised are "swallowed" by the debugger. Visualizing Events as a TableAfter collecting events, you can print out the observed events – in this case, line numbers – in a table, showing in which runs they occurred (`X`), and with colors highlighting the suspiciousness of the event. A "red" event means that the event predominantly occurs in failing runs.
###Code
debugger.event_table(args=True, color=True)
###Output
_____no_output_____
###Markdown
Visualizing Suspicious CodeIf you collected coverage with `CoverageCollector`, you can also visualize the code with similar colors, highlighting suspicious lines:
###Code
debugger
###Output
_____no_output_____
###Markdown
Ranking EventsThe method `rank()` returns a ranked list of events, starting with the most suspicious. This is useful for automated techniques that need potential defect locations.
###Code
debugger.rank()
###Output
_____no_output_____
###Markdown
Classes and MethodsHere are all classes defined in this chapter:
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([TarantulaDebugger, OchiaiDebugger],
abstract_classes=[
StatisticalDebugger,
DifferenceDebugger,
RankingDebugger
],
public_methods=[
StatisticalDebugger.__init__,
StatisticalDebugger.all_events,
StatisticalDebugger.event_table,
StatisticalDebugger.function,
StatisticalDebugger.coverage,
StatisticalDebugger.covered_functions,
DifferenceDebugger.__enter__,
DifferenceDebugger.__exit__,
DifferenceDebugger.all_pass_events,
DifferenceDebugger.all_fail_events,
DifferenceDebugger.collect_pass,
DifferenceDebugger.collect_fail,
DifferenceDebugger.only_pass_events,
DifferenceDebugger.only_fail_events,
SpectrumDebugger.code,
SpectrumDebugger.__repr__,
SpectrumDebugger.__str__,
SpectrumDebugger._repr_html_,
ContinuousSpectrumDebugger.code,
ContinuousSpectrumDebugger.__repr__,
RankingDebugger.rank
],
project='debuggingbook')
# ignore
display_class_hierarchy([CoverageCollector, ValueCollector],
public_methods=[
Tracer.__init__,
Tracer.__enter__,
Tracer.__exit__,
Tracer.changed_vars, # type: ignore
Collector.__init__,
Collector.__repr__,
Collector.function,
Collector.args,
Collector.argstring,
Collector.exception,
Collector.id,
Collector.collect,
CoverageCollector.coverage,
CoverageCollector.covered_functions,
CoverageCollector.events,
ValueCollector.__init__,
ValueCollector.events
],
project='debuggingbook')
###Output
_____no_output_____
###Markdown
Lessons Learned* _Correlations_ between execution events and outcomes (pass/fail) can make important hints for debugging* Events occurring only (or mostly) during failing runs can be _highlighted_ and _ranked_ to guide the search* Important hints include whether the _execution of specific code locations_ correlates with failure Next StepsChapters that build on this one include* [how to determine invariants that correlate with failures](DynamicInvariants.ipynb)* [how to automatically repair programs](Repairer.ipynb) BackgroundThe seminal works on statistical debugging are two papers:* "Visualization of Test Information to Assist Fault Localization" \cite{Jones2002} by James Jones, Mary Jean Harrold, and John Stasko introducing Tarantula and its visualization. The paper won an ACM SIGSOFT 10-year impact award.* "Bug Isolation via Remote Program Sampling" \cite{Liblit2003} by Ben Liblit, Alex Aiken, Alice X. Zheng, and Michael I. Jordan, introducing the term "Statistical debugging". Liblit won the ACM Doctoral Dissertation Award for this work.The Ochiai metric for fault localization was introduced by \cite{Abreu2009}. The overview by Wong et al. \cite{Wong2016} gives a comprehensive overview on the field of statistical fault localization.The study by Parnin and Orso \cite{Parnin2011} is a must to understand the limitations of the technique. Exercises Exercise 1: A Postcondition for MiddleWhat would be a postcondition for `middle()`? How can you check it? **Solution.** A simple postcondition for `middle()` would be```pythonassert m == sorted([x, y, z])[1]```where `m` is the value returned by `middle()`. `sorted()` sorts the given list, and the index `[1]` returns, well, the middle element. (This might also be a much shorter, but possibly slightly more expensive implementation for `middle()`) Since `middle()` has several `return` statements, the easiest way to check the result is to create a wrapper around `middle()`:
###Code
def middle_checked(x, y, z): # type: ignore
m = middle(x, y, z)
assert m == sorted([x, y, z])[1]
return m
###Output
_____no_output_____
###Markdown
`middle_checked()` catches the error:
###Code
from ExpectError import ExpectError
with ExpectError():
m = middle_checked(2, 1, 3)
###Output
Traceback (most recent call last):
File "<ipython-input-1-3c03371d2614>", line 2, in <module>
m = middle_checked(2, 1, 3)
File "<ipython-input-1-7a70e9d5c211>", line 3, in middle_checked
assert m == sorted([x, y, z])[1]
AssertionError (expected)
|
A Beginners Guide to Python/02. Guide FAQ.ipynb | ###Markdown
Guide FAQHi guys, in this lecture I’m going to elaborate a little bit more on the nature of this course, an ‘FAQ’ of sorts. “Why doesn’t this guide cover sets, dicts, tuples?”I’ve left out lots of things for a variety of reasons, the main three reasons being:1. I do not have an infinite amount of time to spend on this project.1. Syntax discussion is, though necessary, really boring to teach.1. You must learn to think for yourselves!Every time I increase the ‘scope’ of this project ‘quality’ is going to suffer; more lectures means more typos and bugs to catch with less time (per lecture) to catch them in! And then there is the third (and most important) point; programming is about *self-learning* as opposed to being *spoon-fed* material. On numerous occasions throughout this guide I will encourage you to learn for yourselves, my job is to give you a set of tools to teach yourself with!In short, this guide was never intended to be fully comprehensive and if you find yourself wanting to know how ‘X’ works (e.g. Sets, Tuples, Dicts) then the answer is merely a google away. “What is the 'Zen of Python' and why should I care?”
###Code
import this
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
|
site/en/r2/tutorials/keras/basic_regression.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Regression: Predict fuel efficiency View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-beta0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Regression: Predict fuel efficiency View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
try:
%tensorflow_version 2.x # Colab only.
except Exception:
pass
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tf-nightly-2.0-preview
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tf-nightly-2.0-preview
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tf-nightly-2.0-preview
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to predict a discrete label (for example, where a picture contains an apple or an orange). This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many models from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function
import pathlib
import pandas as pd
import seaborn as sns
!pip install tf-nightly-2.0-preview
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the data into a train and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsNot separate the target value, or "label" from the features.This label is the value that we will train our model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. Note: That we intentionally use the statistics from only the training set, these statistics will also be used for evaluation. This is so that the model doesn't have any information about the test set.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here are as important as the model weights. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` exampes from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, it produces a result of the expected shape and type. Train the modelThe model is trained for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
import matplotlib.pyplot as plt
def plot_history(history):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label = 'Val Error')
plt.legend()
plt.ylim([0,5])
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label = 'Val Error')
plt.legend()
plt.ylim([0,20])
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after a few hundred epochs. Let's update the `model.fit` method to automatically stop training when the validation score doesn't improve. We'll use a *callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how did the model performs on the **test** set, which we did not use when training the model:
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict fuel efficiency using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Regression: Predict fuel efficiency View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-beta1
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Regression: Predict fuel efficiency View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Regression: Predict fuel efficiency View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-beta0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Regression: Predict fuel efficiency View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Predict fuel efficiency: regression View on TensorFlow.org Run in Google Colab View source on GitHub In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to predict a discrete label (for example, where a picture contains an apple or an orange). This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many models from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function
import pathlib
import pandas as pd
import seaborn as sns
!pip install tf-nightly-2.0-preview
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the data into a train and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsNot separate the target value, or "label" from the features.This label is the value that we will train our model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. Note: That we intentionally use the statistics from only the training set, these statistics will also be used for evaluation. This is so that the model doesn't have any information about the test set.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here are as important as the model weights. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation=tf.nn.relu),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` exampes from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, it produces a result of the expected shape and type. Train the modelThe model is trained for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
import matplotlib.pyplot as plt
def plot_history(history):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label = 'Val Error')
plt.legend()
plt.ylim([0,5])
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label = 'Val Error')
plt.legend()
plt.ylim([0,20])
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after a few hundred epochs. Let's update the `model.fit` method to automatically stop training when the validation score doesn't improve. We'll use a *callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how did the model performs on the **test** set, which we did not use when training the model:
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict fuel efficiency using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Regression: Predict fuel efficiency View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In a *regression* problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
###Code
# Use seaborn for pairplot
!pip install seaborn
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
!pip install tensorflow==2.0.0-beta1
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
_____no_output_____
###Markdown
The Auto MPG datasetThe dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/). Get the dataFirst download the dataset.
###Code
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
###Output
_____no_output_____
###Markdown
Import it using pandas
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
Clean the dataThe dataset contains a few unknown values.
###Code
dataset.isna().sum()
###Output
_____no_output_____
###Markdown
To keep this initial tutorial simple drop those rows.
###Code
dataset = dataset.dropna()
###Output
_____no_output_____
###Markdown
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
###Code
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
_____no_output_____
###Markdown
Split the data into train and testNow split the dataset into a training set and a test set.We will use the test set in the final evaluation of our model.
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
Inspect the dataHave a quick look at the joint distribution of a few pairs of columns from the training set.
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
Also look at the overall statistics:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
Split features from labelsSeparate the target value, or "label", from the features. This label is the value that you will train the model to predict.
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
Normalize the dataLook again at the `train_stats` block above and note how different the ranges of each feature are. It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
This normalized data is what we will use to train the model.Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. The model Build the modelLet's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
###Output
_____no_output_____
###Markdown
Inspect the modelUse the `.summary` method to print a simple description of the model
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
###Code
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
It seems to be working, and it produces a result of the expected shape and type. Train the modelTrain the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
###Code
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
_____no_output_____
###Markdown
Visualize the model's training progress using the stats stored in the `history` object.
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
###Code
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
_____no_output_____
###Markdown
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
_____no_output_____
###Markdown
Make predictionsFinally, predict MPG values using data in the testing set:
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
###Output
_____no_output_____
###Markdown
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
###Code
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____ |
problems/problems_1_~_10.ipynb | ###Markdown
1. Two SumGiven an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.You may assume that each input would have exactly one solution, and you may not use the same element twice.You can return the answer in any order.Example 1:```Input: nums = [2,7,11,15], target = 9Output: [0,1]Output: Because nums[0] + nums[1] == 9, we return [0, 1].```Example 2:```Input: nums = [3,2,4], target = 6Output: [1,2]```Example 3:```Input: nums = [3,3], target = 6Output: [0,1]```Constraints:- 2 <= nums.length <= 104- -109 <= nums[i] <= 109- -109 <= target <= 109- Only one valid answer exists.
###Code
def twoSum(nums, target):
output_list = []
i = 0
while True:
num_2 = target - nums[i]
for j in range(len(nums)):
if nums[j] == num_2 and i!=j:
return [i, j]
i += 1
print(twoSum([2,7,11,15], 9))
print(twoSum([3,2,4], 6))
print(twoSum([3,3], 6))
###Output
[0, 1]
[1, 2]
[0, 1]
###Markdown
2. Add Two NumbersYou are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.You may assume the two numbers do not contain any leading zero, except the number 0 itself.Example 1:```Input: l1 = [2,4,3], l2 = [5,6,4]Output: [7,0,8]Explanation: 342 + 465 = 807.```Example 2:```Input: l1 = [0], l2 = [0]Output: [0]```Example 3:```Input: l1 = [9,9,9,9,9,9,9], l2 = [9,9,9,9]Output: [8,9,9,9,0,0,0,1]```Constraints:- The number of nodes in each linked list is in the range [1, 100].- 0 <= Node.val <= 9- It is guaranteed that the list represents a number that does not have leading zeros. Reference:- [How to convert list to string [duplicate]](https://stackoverflow.com/questions/5618878/how-to-convert-list-to-string)- [Python List reverse()](https://www.programiz.com/python-programming/methods/list/reverse)- [LeetCode初级算法的Python实现--链表](https://www.cnblogs.com/NSGUF/p/9157903.html)
###Code
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def addTwoNumbers(l1, l2):
def listToListNode(input):
numbers = input
dummyRoot = ListNode(0)
ptr = dummyRoot
for number in numbers:
ptr.next = ListNode(number)
ptr = ptr.next
ptr = dummyRoot.next
return ptr
def listNodeToList(node):
if not node:
return []
result = []
while node:
result.append(node.val)
node = node.next
return result
# convert list nodes l1 and l2 to lists list_1 and list_2
list_1 = list(listNodeToList(l1))
list_2 = list(listNodeToList(l2))
# convert list_1 and list_2 to n1 and n2
list_1.reverse()
n1 = int(''.join(str(e) for e in list_1))
list_2.reverse()
n2 = int(''.join(str(e) for e in list_2))
# add n1 and n2
sum = n1 + n2
# convert sum to list
sum_list = list(str(sum))
sum_list.reverse()
sum_list = [int(e) for e in sum_list]
# convert list to list nodes
return listToListNode(sum_list)
print(addTwoNumbers(listToListNode([2,4,3]), listToListNode([5,6,4])))
print(addTwoNumbers(listToListNode([0]), listToListNode([0])))
print(addTwoNumbers(listToListNode([9,9,9,9,9,9,9]), listToListNode([9,9,9,9])))
###Output
<__main__.ListNode object at 0x7f1d15d85950>
<__main__.ListNode object at 0x7f1d15d850d0>
<__main__.ListNode object at 0x7f1d15d85990>
###Markdown
3. Longest Substring Without Repeating CharactersGiven a string s, find the length of the longest substring without repeating characters.Example 1:```Input: s = "abcabcbb"Output: 3Explanation: The answer is "abc", with the length of 3.```Example 2:```Input: s = "bbbbb"Output: 1Explanation: The answer is "b", with the length of 1.```Example 3:```Input: s = "pwwkew"Output: 3Explanation: The answer is "wke", with the length of 3.Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.```Example 4:```Input: s = ""Output: 0```Constraints:- 0 <= s.length <= 5 * 1e4- s consists of English letters, digits, symbols and spaces.
###Code
def lengthOfLongestSubstring(s):
potential_str = ''
substring_length = 0
max_length = 0
if len(s) == 0:
return substring_length
else:
for i in range(len(s)):
if i == 0:
potential_str = s[i]
substring_length += 1
elif s[i] not in potential_str:
potential_str += s[i]
if len(potential_str) > substring_length:
substring_length += 1
elif s[i] in potential_str:
potential_str += s[i]
potential_str = potential_str.split(s[i])[1] + s[i]
substring_length = len(potential_str)
# print(s[i])
# print(substring_length)
if substring_length > max_length:
max_length = substring_length
return max_length
print(lengthOfLongestSubstring("abcabcbb"))
print(lengthOfLongestSubstring("bbbbb"))
print(lengthOfLongestSubstring("pwwkew"))
print(lengthOfLongestSubstring(""))
print(lengthOfLongestSubstring("dvdf"))
###Output
3
1
3
0
3
###Markdown
4. Median of Two Sorted ArraysGiven two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two sorted arrays.The overall run time complexity should be O(log (m+n)). Example 1:```Input: nums1 = [1,3], nums2 = [2]Output: 2.00000Explanation: merged array = [1,2,3] and median is 2.```Example 2:```Input: nums1 = [1,2], nums2 = [3,4]Output: 2.50000Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.```Example 3:```Input: nums1 = [0,0], nums2 = [0,0]Output: 0.00000```Example 4:```Input: nums1 = [], nums2 = [1]Output: 1.00000```Example 5:```Input: nums1 = [2], nums2 = []Output: 2.00000```Constraints:- nums1.length == m- nums2.length == n- 0 <= m <= 1000- 0 <= n <= 1000- 1 <= m + n <= 2000- -106 <= nums1[i], nums2[i] <= 106
###Code
def findMedianSortedArrays(nums1, nums2):
merged_array = sorted(nums1 + nums2)
length = len(merged_array)
if length%2:
return merged_array[length//2]
else:
return (merged_array[length//2 - 1] + merged_array[length//2])/2
print(findMedianSortedArrays([1,3], [2]))
print(findMedianSortedArrays([1,2], [3,4]))
print(findMedianSortedArrays([0,0], [0,0]))
print(findMedianSortedArrays([], [1]))
print(findMedianSortedArrays([2], []))
###Output
2
2.5
0.0
1
2
###Markdown
5. Longest Palindromic SubstringGiven a string s, return the longest palindromic substring in s.Example 1:```Input: s = "babad"Output: "bab"Note: "aba" is also a valid answer.```Example 2:```Input: s = "cbbd"Output: "bb"```Example 3:```Input: s = "a"Output: "a"```Example 4:```Input: s = "ac"Output: "a"```Constraints:- 1 <= s.length <= 1000- s consist of only digits and English letters.Reference:- [How to convert list to string [duplicate]](https://stackoverflow.com/questions/5618878/how-to-convert-list-to-string)- [How can I reverse a list in Python?](https://stackoverflow.com/questions/3940128/how-can-i-reverse-a-list-in-python)
###Code
def longestPalindrome(s):
if ''.join(reversed(s)) == s:
return s
longest_str = s[0]
max_length = 1
str_length = len(s)
for i in range(0, str_length):
symmetrical_range = 1
while symmetrical_range <= i and i+1+symmetrical_range <= str_length:
potential_str = s[i-symmetrical_range:i+1+symmetrical_range]
if ''.join(reversed(potential_str)) == potential_str and len(potential_str) > max_length:
longest_str = potential_str
max_length = len(potential_str)
if ''.join(reversed(potential_str)) != potential_str:
break
else:
symmetrical_range += 1
symmetrical_range = 0
while symmetrical_range <= i and i+2+symmetrical_range <= str_length:
potential_str = s[i-symmetrical_range:i+2+symmetrical_range]
if ''.join(reversed(potential_str)) == potential_str and len(potential_str) > max_length:
longest_str = potential_str
max_length = len(potential_str)
if ''.join(reversed(potential_str)) != potential_str:
break
else:
symmetrical_range += 1
return longest_str
print(longestPalindrome("babad"))
print(longestPalindrome("cbbd"))
print(longestPalindrome("a"))
print(longestPalindrome("ac"))
print(longestPalindrome("kyyrjtdplseovzwjkykrjwhxquwxsfsorjiumvxjhjmgeueafubtonhlerrgsgohfosqssmizcuqryqomsipovhhodpfyudtusjhonlqabhxfahfcjqxyckycstcqwxvicwkjeuboerkmjshfgiglceycmycadpnvoeaurqatesivajoqdilynbcihnidbizwkuaoegmytopzdmvvoewvhebqzskseeubnretjgnmyjwwgcooytfojeuzcuyhsznbcaiqpwcyusyyywqmmvqzvvceylnuwcbxybhqpvjumzomnabrjgcfaabqmiotlfojnyuolostmtacbwmwlqdfkbfikusuqtupdwdrjwqmuudbcvtpieiwteqbeyfyqejglmxofdjksqmzeugwvuniaxdrunyunnqpbnfbgqemvamaxuhjbyzqmhalrprhnindrkbopwbwsjeqrmyqipnqvjqzpjalqyfvaavyhytetllzupxjwozdfpmjhjlrnitnjgapzrakcqahaqetwllaaiadalmxgvpawqpgecojxfvcgxsbrldktufdrogkogbltcezflyctklpqrjymqzyzmtlssnavzcquytcskcnjzzrytsvawkavzboncxlhqfiofuohehaygxidxsofhmhzygklliovnwqbwwiiyarxtoihvjkdrzqsnmhdtdlpckuayhtfyirnhkrhbrwkdymjrjklonyggqnxhfvtkqxoicakzsxmgczpwhpkzcntkcwhkdkxvfnjbvjjoumczjyvdgkfukfuldolqnauvoyhoheoqvpwoisniv"))
print(longestPalindrome("ccc"))
print(longestPalindrome("abb"))
print(longestPalindrome("aaaa"))
print(longestPalindrome("ababababa"))
print(longestPalindrome("babaddtattarrattatddetartrateedredividerb"))
print(longestPalindrome("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
print(longestPalindrome("ccd"))
###Output
bab
bb
a
a
qahaq
ccc
bb
aaaa
ababababa
ddtattarrattatdd
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
cc
###Markdown
6. Zigzag ConversionThe string "PAYPALISHIRING" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)P A H N A P L S I I G Y I R And then read line by line: "PAHNAPLSIIGYIR"Write the code that will take a string and make this conversion given a number of rows:string convert(string s, int numRows); Example 1:```Input: s = "PAYPALISHIRING", numRows = 3Output: "PAHNAPLSIIGYIR"```Example 2:```Input: s = "PAYPALISHIRING", numRows = 4Output: "PINALSIGYAHRPI"Explanation:P I NA L S I GY A H RP I```Example 3:```Input: s = "A", numRows = 1Output: "A"```Constraints:- 1 <= s.length <= 1000- s consists of English letters (lower-case and upper-case), ',' and '.'.- 1 <= numRows <= 1000
###Code
def convert(s, numRows):
if numRows == 1:
return s
# Create containers for characters
for i in range(1, numRows+1):
globals()[f'container_{i}'] = ''
# Write characters to containers
down_mode = True
container_i = 0
for i in range(len(s)):
if down_mode:
container_i += 1
globals()[f'container_{container_i}'] += (s[i])
if container_i == numRows:
down_mode = False
else:
container_i -= 1
globals()[f'container_{container_i}'] += (s[i])
if container_i == 1:
down_mode = True
# Read characters from containers
output_str = ''
for i in range(1, numRows+1):
output_str += globals()[f'container_{i}']
return output_str
print(convert("PAYPALISHIRING", 3))
print(convert("PAYPALISHIRING", 4))
print(convert("A", 1))
print(convert("AB", 1))
###Output
PAHNAPLSIIGYIR
PINALSIGYAHRPI
A
AB
###Markdown
7. Reverse IntegerGiven a signed 32-bit integer x, return x with its digits reversed. If reversing x causes the value to go outside the signed 32-bit integer range [$-2^{31}$, $2^{31} - 1$], then return 0.Assume the environment does not allow you to store 64-bit integers (signed or unsigned).Example 1:```Input: x = 123Output: 321```Example 2:```Input: x = -123Output: -321```Example 3:```Input: x = 120Output: 21```Example 4:```Input: x = 0Output: 0``` Constraints:- $-2^{31}$ <= x <= $2^{31} - 1$
###Code
def reverse(x):
positive = True
if x == 0:
return 0
else:
if x<0:
positive = False
input_num = str(x)[1:]
else:
input_num = str(x)
input_list = list(input_num)
input_list.reverse()
input_str = ''.join(input_list)
input_int = int(input_str)
if not positive:
input_int = -input_int
if input_int < -2**31 or input_int > 2**31-1:
return 0
else:
return input_int
print(reverse(123))
print(reverse(-123))
print(reverse(120))
print(reverse(0))
print(reverse(1534236469))
###Output
321
-321
21
0
0
###Markdown
8. String to Integer (atoi)Implement the myAtoi(string s) function, which converts a string to a 32-bit signed integer (similar to C/C++'s atoi function).The algorithm for myAtoi(string s) is as follows:1. Read in and ignore any leading whitespace.2. Check if the next character (if not already at the end of the string) is '-' or '+'. Read this character in if it is either. This determines if the final result is negative or positive respectively. Assume the result is positive if neither is present.3. Read in next the characters until the next non-digit character or the end of the input is reached. The rest of the string is ignored.4. Convert these digits into an integer (i.e. "123" -> 123, "0032" -> 32). If no digits were read, then the integer is 0. Change the sign as necessary (from step 2).5. If the integer is out of the 32-bit signed integer range [$-2^{31}$, $2^{31} - 1$], then clamp the integer so that it remains in the range. Specifically, integers less than $-2^{31}$ should be clamped to $-2^{31}$, and integers greater than $2^{31} - 1$ should be clamped to $2^{31} - 1$.6. Return the integer as the final result.Note:- Only the space character ' ' is considered a whitespace character.- Do not ignore any characters other than the leading whitespace or the rest of the string after the digits. Example 1:```Input: s = "42"Output: 42Explanation: The underlined characters are what is read in, the caret is the current reader position.Step 1: "42" (no characters read because there is no leading whitespace) ^Step 2: "42" (no characters read because there is neither a '-' nor '+') ^Step 3: "42" ("42" is read in) ^The parsed integer is 42.Since 42 is in the range [$-2^{31}$, $2^{31} - 1$], the final result is 42.```Example 2:```Input: s = " -42"Output: -42Explanation:Step 1: " -42" (leading whitespace is read and ignored) ^Step 2: " -42" ('-' is read, so the result should be negative) ^Step 3: " -42" ("42" is read in) ^The parsed integer is -42.Since -42 is in the range [$-2^{31}$, $2^{31} - 1$], the final result is -42.```Example 3:```Input: s = "4193 with words"Output: 4193Explanation:Step 1: "4193 with words" (no characters read because there is no leading whitespace) ^Step 2: "4193 with words" (no characters read because there is neither a '-' nor '+') ^Step 3: "4193 with words" ("4193" is read in; reading stops because the next character is a non-digit) ^The parsed integer is 4193.Since 4193 is in the range [$-2^{31}$, $2^{31} - 1$], the final result is 4193.```Example 4:```Input: s = "words and 987"Output: 0Explanation:Step 1: "words and 987" (no characters read because there is no leading whitespace) ^Step 2: "words and 987" (no characters read because there is neither a '-' nor '+') ^Step 3: "words and 987" (reading stops immediately because there is a non-digit 'w') ^The parsed integer is 0 because no digits were read.Since 0 is in the range [$-2^{31}$, $2^{31} - 1$], the final result is 0.```Example 5:```Input: s = "-91283472332"Output: -2147483648Explanation:Step 1: "-91283472332" (no characters read because there is no leading whitespace) ^Step 2: "-91283472332" ('-' is read, so the result should be negative) ^Step 3: "-91283472332" ("91283472332" is read in) ^The parsed integer is -91283472332.Since -91283472332 is less than the lower bound of the range [$-2^{31}$, $2^{31} - 1$], the final result is clamped to $-2^{31}$ = -2147483648.``` Constraints:- 0 <= s.length <= 200- s consists of English letters (lower-case and upper-case), digits (0-9), ' ', '+', '-', and '.'.
###Code
def myAtoi(s):
is_positive = True
sign_times = 0
num = ''
num_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
non_num_flag = False
num_flag = False
non_leading = False
for i in s:
if i != ' ' or non_leading:
non_leading = True
if i == '+' or i == '-':
sign_times += 1
if not num_flag and sign_times > 1:
return 0
if num_flag and i not in num_list:
break
elif i in num_list:
if not num_flag:
num_flag = True
if not non_num_flag:
num += i
elif i == '+':
pass
elif i == ' ':
break
elif i == '-':
if num_flag:
return 0
else:
is_positive = False
elif not num_flag:
return 0
else:
if not non_num_flag:
non_num_flag = True
if num == '':
return 0
else:
output_int = int(float(num))
if not is_positive:
output_int = -output_int
if output_int < -2**31:
return -2**31
if output_int > 2**31 - 1:
return 2**31 - 1
return output_int
print(myAtoi("42"))
print(myAtoi(" -42"))
print(myAtoi("4193 with words"))
print(myAtoi("words and 987"))
print(myAtoi("-91283472332"))
print(myAtoi("3.14159"))
print(myAtoi("+-12"))
print(myAtoi("00000-42a1234"))
print(myAtoi(" -0012a42"))
print(myAtoi(" +0 123"))
print(myAtoi("-5-"))
###Output
42
-42
4193
0
-2147483648
3
0
0
-12
0
-5
###Markdown
9. Palindrome NumberGiven an integer x, return true if x is palindrome integer.An integer is a palindrome when it reads the same backward as forward. For example, 121 is palindrome while 123 is not. Example 1:```Input: x = 121Output: true```Example 2:```Input: x = -121Output: falseExplanation: From left to right, it reads -121. From right to left, it becomes 121-. Therefore it is not a palindrome.```Example 3:```Input: x = 10Output: falseExplanation: Reads 01 from right to left. Therefore it is not a palindrome.```Example 4:```Input: x = -101Output: false```Constraints:- $-2^{31}$ <= x <= $2^{31} - 1$
###Code
def isPalindrome(x):
return str(x) == ''.join(reversed(list(str(x))))
print(isPalindrome(121))
print(isPalindrome(-121))
print(isPalindrome(10))
print(isPalindrome(-101))
###Output
True
False
False
False
###Markdown
10. Regular Expression MatchingGiven an input string s and a pattern p, implement regular expression matching with support for '.' and '*' where:'.' Matches any single character.'*' Matches zero or more of the preceding element.The matching should cover the entire input string (not partial). Example 1:```Input: s = "aa", p = "a"Output: falseExplanation: "a" does not match the entire string "aa".```Example 2:```Input: s = "aa", p = "a*"Output: trueExplanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes "aa".```Example 3:```Input: s = "ab", p = ".*"Output: trueExplanation: ".*" means "zero or more (*) of any character (.)".```Example 4:```Input: s = "aab", p = "c*a*b"Output: trueExplanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches "aab".```Example 5:```Input: s = "mississippi", p = "mis*is*p*."Output: false```Constraints:- 1 <= s.length <= 20- 1 <= p.length <= 30- s contains only lowercase English letters.- p contains only lowercase English letters, '.', and '*'.- It is guaranteed for each appearance of the character '*', there will be a previous valid character to match.
###Code
def isMatch(s, p):
english_letters = 'qwertyuiopasdfghjklzxcvbnm'
i = 0
while p:
# print(f'i: {i}')
# print(f'p: {p}')
if i > len(s)-1:
if len(p) > 1:
if p[1] != '*':
return False
elif p[2:] == '':
return True
else:
i = len(s)-1
else:
print(0)
return False
if p[0] in english_letters:
if s[i] != p[0] and len(p) > 1:
if p[1] != '*':
print(1)
return False
else:
p = p[2:]
elif s[i] != p[0] and len(p) == 1:
print(2)
return False
elif len(p) > 1:
if p[1] == '*':
while s[i] == p[0]:
if i+1 <= len(s)-1:
i += 1
else:
i += 1
break
p = p[2:]
while p:
if s[i-1] == p[0]:
p = p[1:]
else:
break
# print(f'cutted p: {p}')
else:
i += 1
p = p[1:]
else:
i += 1
p = p[1:]
elif p[0] == '.':
if len(p) == 1 and i == len(s)-1:
print(5)
return True
elif len(p) == 1 and i < len(s)-1:
print(6)
return False
else:
if p[1] == '*':
s = ''
p = p[2:]
if p == '':
print(7)
return True
else:
print(8)
return False
else:
i += 1
p = p[1:]
else:
print(9)
return False
if i == len(s):
print(10)
return True
else:
print(11)
return False
# print(isMatch("aa", "a") is False)
# print(isMatch("aa", "a*") is True)
# print(isMatch("ab", ".*") is True)
# print(isMatch("aab", "c*a*b") is True)
# print(isMatch("mississippi", "mis*is*p*.") is False)
# print(isMatch("mississippi", "mis*is*ip*.") is True)
# print(isMatch("aaa", "aaaa") is False)
# print(isMatch("aaa", "a*a") is True)
print(isMatch("aaa", "ab*a*c*a") is True)
# print(isMatch("aaca", "ab*a*c*a") is True)
# print(isMatch("a", "ab*") is True)
print(isMatch("a", "ab*a") is False)
###Output
_____no_output_____ |
site/ja/probability/examples/Probabilistic_Layers_Regression.ipynb | ###Markdown
Copyright 2019 The TensorFlow Probability Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFP 確率的レイヤー: 回帰 TensorFlow.org で表示 Google Colab で実行 GitHub でソースを表示 ノートブックをダウンロード この例では、TFP の「確率的レイヤー」を使用して回帰モデルを適合させる方法を示します。 依存関係と前提条件
###Code
#@title Import { display-mode: "form" }
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
sns.reset_defaults()
#sns.set_style('whitegrid')
#sns.set_context('talk')
sns.set_context(context='talk',font_scale=0.7)
%matplotlib inline
tfd = tfp.distributions
###Output
_____no_output_____
###Markdown
迅速に作成 はじめる前に、このデモで GPU を使用していることを確認します。[ランタイム] -> [ランタイムタイプの変更] -> [ハードウェアアクセラレータ] -> [GPU] を選択します。次のスニペットは、GPU にアクセスできることを確認します。
###Code
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
###Output
WARNING: GPU device not found.
###Markdown
注意: 何らかの理由で GPU にアクセスできない場合でも、このコラボは機能します (トレーニングには時間がかかります)。 目的 TFP を使用して確率モデルを指定し、負の対数尤度を簡単に最小化できたら素晴らしいと思いませんか?
###Code
negloglik = lambda y, rv_y: -rv_y.log_prob(y)
###Output
_____no_output_____
###Markdown
このコラボでは(線形回帰問題のコンテキストで)その方法を紹介します。
###Code
#@title Synthesize dataset.
w0 = 0.125
b0 = 5.
x_range = [-20, 60]
def load_dataset(n=150, n_tst=150):
np.random.seed(43)
def s(x):
g = (x - x_range[0]) / (x_range[1] - x_range[0])
return 3 * (0.25 + g**2.)
x = (x_range[1] - x_range[0]) * np.random.rand(n) + x_range[0]
eps = np.random.randn(n) * s(x)
y = (w0 * x * (1. + np.sin(x)) + b0) + eps
x = x[..., np.newaxis]
x_tst = np.linspace(*x_range, num=n_tst).astype(np.float32)
x_tst = x_tst[..., np.newaxis]
return y, x, x_tst
y, x, x_tst = load_dataset()
###Output
_____no_output_____
###Markdown
ケース 1: 不確実性なし
###Code
# Build model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(1),
tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)),
])
# Do inference.
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik)
model.fit(x, y, epochs=1000, verbose=False);
# Profit.
[print(np.squeeze(w.numpy())) for w in model.weights];
yhat = model(x_tst)
assert isinstance(yhat, tfd.Distribution)
#@title Figure 1: No uncertainty.
w = np.squeeze(model.layers[-2].kernel.numpy())
b = np.squeeze(model.layers[-2].bias.numpy())
plt.figure(figsize=[6, 1.5]) # inches
#plt.figure(figsize=[8, 5]) # inches
plt.plot(x, y, 'b.', label='observed');
plt.plot(x_tst, yhat.mean(),'r', label='mean', linewidth=4);
plt.ylim(-0.,17);
plt.yticks(np.linspace(0, 15, 4)[1:]);
plt.xticks(np.linspace(*x_range, num=9));
ax=plt.gca();
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['bottom'].set_smart_bounds(True)
plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5))
plt.savefig('/tmp/fig1.png', bbox_inches='tight', dpi=300)
###Output
_____no_output_____
###Markdown
ケース 2: 偶然性の不確実性
###Code
# Build model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(1 + 1),
tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :1],
scale=1e-3 + tf.math.softplus(0.05 * t[...,1:]))),
])
# Do inference.
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik)
model.fit(x, y, epochs=1000, verbose=False);
# Profit.
[print(np.squeeze(w.numpy())) for w in model.weights];
yhat = model(x_tst)
assert isinstance(yhat, tfd.Distribution)
#@title Figure 2: Aleatoric Uncertainty
plt.figure(figsize=[6, 1.5]) # inches
plt.plot(x, y, 'b.', label='observed');
m = yhat.mean()
s = yhat.stddev()
plt.plot(x_tst, m, 'r', linewidth=4, label='mean');
plt.plot(x_tst, m + 2 * s, 'g', linewidth=2, label=r'mean + 2 stddev');
plt.plot(x_tst, m - 2 * s, 'g', linewidth=2, label=r'mean - 2 stddev');
plt.ylim(-0.,17);
plt.yticks(np.linspace(0, 15, 4)[1:]);
plt.xticks(np.linspace(*x_range, num=9));
ax=plt.gca();
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['bottom'].set_smart_bounds(True)
plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5))
plt.savefig('/tmp/fig2.png', bbox_inches='tight', dpi=300)
###Output
_____no_output_____
###Markdown
ケース 3: 認識論的不確実性
###Code
# Specify the surrogate posterior over `keras.layers.Dense` `kernel` and `bias`.
def posterior_mean_field(kernel_size, bias_size=0, dtype=None):
n = kernel_size + bias_size
c = np.log(np.expm1(1.))
return tf.keras.Sequential([
tfp.layers.VariableLayer(2 * n, dtype=dtype),
tfp.layers.DistributionLambda(lambda t: tfd.Independent(
tfd.Normal(loc=t[..., :n],
scale=1e-5 + tf.nn.softplus(c + t[..., n:])),
reinterpreted_batch_ndims=1)),
])
# Specify the prior over `keras.layers.Dense` `kernel` and `bias`.
def prior_trainable(kernel_size, bias_size=0, dtype=None):
n = kernel_size + bias_size
return tf.keras.Sequential([
tfp.layers.VariableLayer(n, dtype=dtype),
tfp.layers.DistributionLambda(lambda t: tfd.Independent(
tfd.Normal(loc=t, scale=1),
reinterpreted_batch_ndims=1)),
])
# Build model.
model = tf.keras.Sequential([
tfp.layers.DenseVariational(1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]),
tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)),
])
# Do inference.
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik)
model.fit(x, y, epochs=1000, verbose=False);
# Profit.
[print(np.squeeze(w.numpy())) for w in model.weights];
yhat = model(x_tst)
assert isinstance(yhat, tfd.Distribution)
#@title Figure 3: Epistemic Uncertainty
plt.figure(figsize=[6, 1.5]) # inches
plt.clf();
plt.plot(x, y, 'b.', label='observed');
yhats = [model(x_tst) for _ in range(100)]
avgm = np.zeros_like(x_tst[..., 0])
for i, yhat in enumerate(yhats):
m = np.squeeze(yhat.mean())
s = np.squeeze(yhat.stddev())
if i < 25:
plt.plot(x_tst, m, 'r', label='ensemble means' if i == 0 else None, linewidth=0.5)
avgm += m
plt.plot(x_tst, avgm/len(yhats), 'r', label='overall mean', linewidth=4)
plt.ylim(-0.,17);
plt.yticks(np.linspace(0, 15, 4)[1:]);
plt.xticks(np.linspace(*x_range, num=9));
ax=plt.gca();
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['bottom'].set_smart_bounds(True)
plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5))
plt.savefig('/tmp/fig3.png', bbox_inches='tight', dpi=300)
###Output
_____no_output_____
###Markdown
ケース 4: 偶然性の不確実性と認識論的不確実性
###Code
# Build model.
model = tf.keras.Sequential([
tfp.layers.DenseVariational(1 + 1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]),
tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :1],
scale=1e-3 + tf.math.softplus(0.01 * t[...,1:]))),
])
# Do inference.
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik)
model.fit(x, y, epochs=1000, verbose=False);
# Profit.
[print(np.squeeze(w.numpy())) for w in model.weights];
yhat = model(x_tst)
assert isinstance(yhat, tfd.Distribution)
#@title Figure 4: Both Aleatoric & Epistemic Uncertainty
plt.figure(figsize=[6, 1.5]) # inches
plt.plot(x, y, 'b.', label='observed');
yhats = [model(x_tst) for _ in range(100)]
avgm = np.zeros_like(x_tst[..., 0])
for i, yhat in enumerate(yhats):
m = np.squeeze(yhat.mean())
s = np.squeeze(yhat.stddev())
if i < 15:
plt.plot(x_tst, m, 'r', label='ensemble means' if i == 0 else None, linewidth=1.)
plt.plot(x_tst, m + 2 * s, 'g', linewidth=0.5, label='ensemble means + 2 ensemble stdev' if i == 0 else None);
plt.plot(x_tst, m - 2 * s, 'g', linewidth=0.5, label='ensemble means - 2 ensemble stdev' if i == 0 else None);
avgm += m
plt.plot(x_tst, avgm/len(yhats), 'r', label='overall mean', linewidth=4)
plt.ylim(-0.,17);
plt.yticks(np.linspace(0, 15, 4)[1:]);
plt.xticks(np.linspace(*x_range, num=9));
ax=plt.gca();
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['bottom'].set_smart_bounds(True)
plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5))
plt.savefig('/tmp/fig4.png', bbox_inches='tight', dpi=300)
###Output
_____no_output_____
###Markdown
ケース 5: 関数的不確実性
###Code
#@title Custom PSD Kernel
class RBFKernelFn(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(RBFKernelFn, self).__init__(**kwargs)
dtype = kwargs.get('dtype', None)
self._amplitude = self.add_variable(
initializer=tf.constant_initializer(0),
dtype=dtype,
name='amplitude')
self._length_scale = self.add_variable(
initializer=tf.constant_initializer(0),
dtype=dtype,
name='length_scale')
def call(self, x):
# Never called -- this is just a layer so it can hold variables
# in a way Keras understands.
return x
@property
def kernel(self):
return tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude=tf.nn.softplus(0.1 * self._amplitude),
length_scale=tf.nn.softplus(5. * self._length_scale)
)
# For numeric stability, set the default floating-point dtype to float64
tf.keras.backend.set_floatx('float64')
# Build model.
num_inducing_points = 40
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=[1]),
tf.keras.layers.Dense(1, kernel_initializer='ones', use_bias=False),
tfp.layers.VariationalGaussianProcess(
num_inducing_points=num_inducing_points,
kernel_provider=RBFKernelFn(),
event_shape=[1],
inducing_index_points_initializer=tf.constant_initializer(
np.linspace(*x_range, num=num_inducing_points,
dtype=x.dtype)[..., np.newaxis]),
unconstrained_observation_noise_variance_initializer=(
tf.constant_initializer(np.array(0.54).astype(x.dtype))),
),
])
# Do inference.
batch_size = 32
loss = lambda y, rv_y: rv_y.variational_loss(
y, kl_weight=np.array(batch_size, x.dtype) / x.shape[0])
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=loss)
model.fit(x, y, batch_size=batch_size, epochs=1000, verbose=False)
# Profit.
yhat = model(x_tst)
assert isinstance(yhat, tfd.Distribution)
#@title Figure 5: Functional Uncertainty
y, x, _ = load_dataset()
plt.figure(figsize=[6, 1.5]) # inches
plt.plot(x, y, 'b.', label='observed');
num_samples = 7
for i in range(num_samples):
sample_ = yhat.sample().numpy()
plt.plot(x_tst,
sample_[..., 0].T,
'r',
linewidth=0.9,
label='ensemble means' if i == 0 else None);
plt.ylim(-0.,17);
plt.yticks(np.linspace(0, 15, 4)[1:]);
plt.xticks(np.linspace(*x_range, num=9));
ax=plt.gca();
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['bottom'].set_smart_bounds(True)
plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5))
plt.savefig('/tmp/fig5.png', bbox_inches='tight', dpi=300)
###Output
_____no_output_____ |
applications/solvers/cokeCombustionFoam/SegregatedSteps/runs/porousMedia/preparations/steadyStateFlow/Pe1e_3/validate/validate.ipynb | ###Markdown
Compose LB velocity, which located in the cell point (2D)
###Code
ux_LB=np.fromfile("./SRT-LB/0_VelocityX",dtype=float)
uy_LB=np.fromfile("./SRT-LB/0_VelocityY",dtype=float)
ux_LB=ux_LB.reshape(ny+1,nx+1)
uy_LB=uy_LB.reshape(ny+1,nx+1)
average_ux_LB=3.1240818E-05
norm_ux_LB=ux_LB/average_ux_LB
norm_uy_LB=uy_LB/average_ux_LB
fig, ax = plt.subplots()
ax.imshow(norm_ux_LB)
###Output
_____no_output_____
###Markdown
Compose DBS velocity, which located in the cell point (3D)
###Code
data=pd.read_csv("./DBS/vel.csv")
data=data[data["Points:2"]==0]
data.drop(['U:2',"Points:2"], axis=1,inplace=True)
data.info()
ux_dbs=np.array(data['U:0']).reshape(ny+1,nx+1)
uy_dbs=np.array(data['U:1']).reshape(ny+1,nx+1)
fig, ax = plt.subplots()
average_ux_DBS=3.81798E-04
norm_ux_DBS=ux_dbs/average_ux_DBS
norm_uy_DBS=uy_dbs/average_ux_DBS
ax.imshow(norm_ux_DBS)
###Output
_____no_output_____
###Markdown
Compare
###Code
abs_error=np.abs(norm_ux_LB-norm_ux_DBS)
print(f"max absolute error: {np.max(abs_error)} ")
relative_error=0
num=0
for i in np.arange(0,ny+1):
for j in np.arange(0,nx+1):
if(norm_ux_LB[i,j]>1e-8):
num +=1
error=abs_error[i,j]/norm_ux_LB[i,j]
relative_error +=pow(error,2)
relative_error=math.sqrt(relative_error)/num
print(f"non-zero value num: {num}")
print(f"relative_error: {relative_error*100}%")
###Output
non-zero value num: 18818
relative_error: 429.76921327118197%
###Markdown
Read MRT-LB, which result in the cell center
###Code
ux_centerline_mrt=np.loadtxt("./MRT-LB/ux-centerline.txt")
x_centerline_mrt=np.arange(0.5,480)
###Output
_____no_output_____
###Markdown
Compare the centerline
###Code
ux_centerline_LB=norm_ux_LB[50,:]
ux_centerline_DBS=norm_ux_DBS[50,:]
fig, ax = plt.subplots()
ax.plot(ux_centerline_LB,lineStyle="-.",label="SRT-LB")
ax.plot(x_centerline_mrt,ux_centerline_mrt,lineStyle="-",color="r",label="MRT-LB")
ax.plot(ux_centerline_DBS,lineStyle="--",label="DBS")
ax.set_xlabel(f"Dimensionless X")
ax.set_ylabel(f"Dimensionless Ux")
ax.set_title(f"Velocity X: DBS vs LB")
ax.legend(loc="upper right")
ux_centerline_DBS=norm_ux_DBS[50,:]
fig, ax = plt.subplots()
ax.plot(x_centerline_mrt,ux_centerline_mrt,lineStyle="-",color="r",label="MRT-LB")
ax.plot(ux_centerline_DBS,lineStyle="--",label="DBS")
ax.set_xlabel(f"Dimensionless X")
ax.set_ylabel(f"Dimensionless Ux")
ax.set_title(f"Velocity X: DBS vs LB")
ax.legend(loc="upper right")
uy_centerline_LB=norm_uy_LB[50,:]
uy_centerline_DBS=norm_uy_DBS[50,:]
fig, ax = plt.subplots()
ax.plot(uy_centerline_LB,lineStyle="-",label="LB")
ax.plot(uy_centerline_DBS,lineStyle="--",label="DBS")
ax.set_xlabel(f"Dimensionless X")
ax.set_ylabel(f"Dimensionless Uy")
ax.set_title(f"Velocity Y: DBS vs LB")
ax.legend(loc="upper right")
###Output
_____no_output_____ |
display_fitness_keypoint.ipynb | ###Markdown
Plan to Completion End state:1. Continuous color segment for down and up motion of the rep Play it out:1. Person will start in the same position they finish2. Each Rep should consist of only 2 segments3. The raw gradient, lightly processed gradient and segment IDs are insufficient4. Likely need to write custom motion processing logic and or filtering functions Motion Processing1. A rep will start at the max or within max +- 10% - Max can be determined by sampling t0 or max - Min can be sampled by taking the min of the set? (for test purposes yes, reality no - failed attempts)2. Possible steps to measure - Determine max and min based on ranges within total set - First Pass: determine areas within 50% of min or max? - Assign labels as upper and lower - Assign Position Segment IDs based on upper and lower - Second Pass: identify the local max and min within each segment - Assign Motion Segment IDs based on min max within each position segment - For a given position segment id - determine the max - set to -1 (down) (fisrt occurence) - determine the min - set to 1 (up) (first occurence) - challenge will be to correctly assign back to original df? - all other values - Filtering Funcs1. Smoothing- Total number of segments should be equal to 2x reps- Rep is measured as starting from max - returning to max Plot raw points at every graident change
###Code
# Plotly Plot
### TO DO - sub set the plot where gradient not equal zero, add transparency to points, should be good.
import plotly
plotly.tools.set_credentials_file(username='aduxbury', api_key='1vW1xxY8a14YJ6cd5Efw')
trace0 = go.Scatter(
x = left_knee_df.loc[left_knee_df['gradient'] != 0, 'x'],
y = left_knee_df.loc[left_knee_df['gradient'] != 0, 'y'],
mode = 'markers',
name = 'Left Knee',
marker=dict(
size=8,
color = left_knee_df.loc[left_knee_df['gradient'] != 0, 'gradient'], #set color equal to a variable
colorscale='RdBu',
showscale=True
)
)
trace1 = go.Scatter(
x = right_knee_df.loc[right_knee_df['gradient'] != 0, 'x'],
y = right_knee_df.loc[right_knee_df['gradient'] != 0, 'y'],
mode = 'markers',
name = 'Right Knee',
marker=dict(
size=8,
color = right_knee_df.loc[right_knee_df['gradient'] != 0, 'gradient'], #set color equal to a variable
colorscale='RdBu',
showscale=True),
)
trace2 = go.Scatter(
x = mid_hip_df.loc[mid_hip_df['gradient'] != 0, 'x'],
y = mid_hip_df.loc[mid_hip_df['gradient'] != 0, 'y'],
mode = 'markers',
name = 'Mid Hip',
marker=dict(
size=8,
color = mid_hip_df.loc[mid_hip_df['gradient'] != 0, 'gradient'], #set color equal to a variable
colorscale='RdBu',
showscale=True),
)
x = right_knee_df.loc[right_knee_df['gradient'] != 0, 'x']
y = right_knee_df.loc[right_knee_df['gradient'] != 0, 'y']
print('length of x ', len(x))
print('length of y ', len(y))
layout = go.Layout(
yaxis=dict(autorange='reversed'))
data = [trace0, trace1, trace2]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename = 'front_squat')
# Plotly Plot
### TO DO - sub set the plot where gradient not equal zero, add transparency to points, should be good.
plot_var = 'delta'
import plotly
plotly.tools.set_credentials_file(username='aduxbury', api_key='1vW1xxY8a14YJ6cd5Efw')
trace0 = go.Scatter(
x = left_knee_df.loc[left_knee_df[plot_var] != 0, 'x'],
y = left_knee_df.loc[left_knee_df[plot_var] != 0, 'y'],
mode = 'markers',
name = 'Left Knee',
marker=dict(
size=8,
color = left_knee_df.loc[left_knee_df[plot_var] != 0, plot_var], #set color equal to a variable
colorscale='RdBu',
showscale=True
)
)
trace1 = go.Scatter(
x = right_knee_df.loc[right_knee_df[plot_var] != 0, 'x'],
y = right_knee_df.loc[right_knee_df[plot_var] != 0, 'y'],
mode = 'markers',
name = 'Right Knee',
marker=dict(
size=8,
color = right_knee_df.loc[right_knee_df[plot_var] != 0, plot_var], #set color equal to a variable
colorscale='RdBu',
showscale=True),
)
x = right_knee_df.loc[right_knee_df[plot_var] != 0, 'x']
y = right_knee_df.loc[right_knee_df[plot_var] != 0, 'y']
print('length of x ', len(x))
print('length of y ', len(y))
layout = go.Layout(
yaxis=dict(autorange='reversed'))
data = [trace0, trace1]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename = 'front_squat')
###Output
length of x 219
length of y 219
###Markdown
plot histragram of segment 'lengths'Ideally there should be 1 seg down, 1 seg up for each 'rep' in the exercise, but due to sampling that does not occurGradient changes occur when points are sampled at different grid position, graident changes truncate segmentsBack and forth changes in gradients create an abundance of short segments (~50 segments with fewer than 10 points) Now that a graph has been made, we've datascienced so, this work is legit.What we can do (hopefully) is drop every segment with fewer than 10 points.
###Code
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x = right_knee_df['delta_id'].value_counts()
print('Max length of seg: ',np.max(x))
data = [go.Histogram(x=x)]
py.iplot(data, filename='basic histogram')
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x = right_knee_df['knee_delta'].values
print('Max length of seg: ',np.max(x))
print('Min length of seg: ',np.min(x))
print('Mean abs knee delta: ', np.mean(np.absolute(x)))
print('Mean knee delta: ', np.mean(x))
data = [go.Histogram(x=x)]
py.iplot(data, filename='basic histogram')
# QC point sets against number of frames
print(knee_sep_df.shape)
print(left_knee_df.shape)
print(right_knee_df.shape)
# Func to place text on an image
def draw_label(img, text, color_select=(255,255,255)):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.8
bg_color = (0,0,0)
thickness = cv2.FILLED
margin = 10
# image dimensions
img_y = img.shape[0]
img_x = img.shape[1]
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
# Set text print position to lower middle of screen
# This takes the image size and text size, then positions the message centered
pos = (int(img_y*0.98), (int(img_x/2) - int((txt_size[0][0])/2)))
# reverses y,x order for plotting as (x,y)
pos = pos[::-1]
# define end points for text box
# This is used for printing a bounding box
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
# background rectangle
#cv2.rectangle(img, (pos[0]-margin,pos[1]+margin), (end_x, end_y), bg_color, thickness)
# text
cv2.putText(img, text, pos, font_face, scale, color_select, 2)
# Func to place text on an image
#Circle(img, center, radius, color, thickness=1, lineType=8, shift=0)
def draw_point(img, point, color_select = (255, 255, 255)):
scale = 0.8
bg_color = (0,0,0)
thickness = 4
radius = 4
pos = (int(point[0]), int(point[1]))
cv2.circle(img, pos, radius, color_select, thickness)
def draw_poly_line(img, pts, color_select = (255,255,255)):
poly_line_thickness = 2
poly_closed = False
pts = pts[:,0:2]
pts = pts.reshape((-1,1,2))
cv2.polylines(img, np.int32([pts]), poly_closed, color_select, thickness=poly_line_thickness)
# take point data grouped by segments and plot individual poly segments
def draw_line_set(img, pts, color_select = (220,220,200)):
# Figure out the number of segments
seg_num = np.max(pts['motion_id_seg'].values)
red = (10, 230, 10)
blue = (255, 255, 255)
previous_seg_color = blue
counter = 0
counter2 = 0
seg = 1
#print('length of pts: ', len(pts), ' seg_num, ', seg_num)
# Plot each segment separately and with it's gradient appropriate color
while seg < seg_num+1:
seg_pts = pts[pts['motion_id_seg']==seg]
if len(seg_pts) > 0:
# based on histogram of points - we want to drop all short points from plotting
#print('inside for loop: ', len(seg_pts))
#print('Seg ', seg)
if(seg_pts['motion_id'].values[0] == 'up'):
# draw a red line for up
draw_poly_line(img, seg_pts.values, red)
counter = counter+ 1
previous_seg_color = blue
#print('Total pass through up: ',counter)
elif(seg_pts['motion_id'].values[0] == 'down'):
# draw a blue line for down
draw_poly_line(img, seg_pts.values, )
previous_seg_color = red
counter2 = counter2 + 1
#print('length of pts ',len(seg_pts.values))
else:
draw_poly_line(img, seg_pts.values, previous_seg_color)
seg+=1
'''
np.max(left_knee_df['polyid'].values)
seg_pts = right_knee_df[right_knee_df['motion_id_seg']<3]
print('counting ', seg_pts['motion_id'].values[0]=='down')
print('counting ', seg_pts.iloc[0:20,6:8])
'''
###Output
_____no_output_____
###Markdown
Plan to sexy MVP1. Add grey fade + alpha channels for plotting - want to call point - 90 points primary - zero to 90 as grey with incremental alpha (place in fnc call) - White circle on knee point 2. Develop flags for knee movements out of alignment - calc Original (standing) center knees (x-coord) (a) - for each pair of knee points - calc mid point (x-coord) (b) - calculate the displacement (+-) from the current (b) to center knees (a) - Calculate a flag based on % displacement from center (a) - When the flag is triggered - change display text and corresponding knee color3. Add message for knee out of position 4. Calculate and dispay velocity - Determine pixes to distance relationship x pixes = 1m - We'll have a height measure m and a pixel value ~ pixels / m - Determine the time - Length of the input vid in seconds, number of frames ~ frames / second - Calculate and display values - Current velocity = average velocity over the past quarter second, udated every quarter second - Average decent velocity = total distance for all down flagged motions, and the total frame count - Average Ascent velocity = total distance for all up flags, and the total frame count5. Clean up code base6. Build tooling to load video in openPose, extract point data, process point data and plot new video
###Code
def check_form(frame, pts_l, pts_r, msg = "Good Form"):
if pts_l['knee_flag'] == 1:
msg = "Form Break! - Left Knee"
draw_point(frame,pts_l,(0,0,255))
draw_label(frame,msg,(0,0,255))
elif pts_r['knee_flag'] == 1:
msg = "Form Break! - Right Knee"
draw_point(frame,pts_r,(0,0,255))
draw_label(frame,msg,(0,0,255))
else:
draw_label(frame,msg,(255,255,255))
return msg
def get_pixel_per_meter():
# subtract toe y position from hip y position
# relate pixel distance to peron's height / 2
pixels = right_leg_df['y'].iloc[3] - right_leg_df['y'].iloc[0]
meters = (persons_height*toe_hip_height_ratio)/100
pix_per_meter = pixels/meters
return pix_per_meter
def get_distance(pts):
pixels_per_meter = get_pixel_per_meter()
distance = 0
total_dist_pix = []
for pt in range(1,len(pts)):
total_dist_pix.append(np.abs(pts['y'].iloc[pt]-pts['y'].iloc[pt-1]))
total_dist_pix = np.sum(total_dist_pix)
# expected distance in meters
distance = total_dist_pix / pixels_per_meter
# set precision 2 decimals
distance = int(distance * 100)/100
return distance
def get_velocity(pts, frame_fps):
# time in seconds
time = len(pts)/frame_fps
distance = get_distance(pts)
velocity = distance / time
# convert to 2 decimals
velocity = int(velocity*100)/100
return velocity
def get_average_velocity(pts, frame_fps):
seg_num = np.max(pts['motion_id_seg'].values)
avrg_asc_vel = [0]
avrg_dec_vel = [0]
for seg in range(1,seg_num+1):
if len(pts[pts['motion_id_seg']==seg]) > 2:
vel = get_velocity(pts[pts['motion_id_seg']==seg], frame_fps)
else:
vel = 0
if seg%2 == 1:
avrg_dec_vel.append(vel)
else:
avrg_asc_vel.append(vel)
avrg_asc_vel = int(np.average(avrg_asc_vel)*100)/100
avrg_dec_vel = int(np.average(avrg_dec_vel)*100)/100
return avrg_asc_vel, avrg_dec_vel
def draw_stats_box(frame, velocity, avrg_ascent, avrg_decent, count):
title_text = "MyStrengthBook.com"
if mid_hip_df['motion_id_seg'].iloc[count] == 1:
rep = 1
elif mid_hip_df['motion_id_seg'].iloc[count] % 2 == 0:
rep = int(mid_hip_df['motion_id_seg'].iloc[count]/2)
else:
rep = int(np.round(mid_hip_df['motion_id_seg'].iloc[count]/2+0.1))
cur_rep = "Rep: "+str(rep)
cur_frame = "Frame: "+str(count)
vel_text = "Movement Speed: "+str(velocity)+" m/s"
avrg_dec_vel = "Avrg Decent Speed: "+str(avrg_decent)+" m/s"
avrg_asc_vel = "Avrg Ascent Speed: "+str(avrg_ascent)+" m/s"
# string used to set text box size
for_size_text = "Avrg Ascent Speed: "+str(10.00)+" m/s"
box_text = [cur_rep, cur_frame, vel_text, avrg_dec_vel, avrg_asc_vel]
font_face = cv2.FONT_HERSHEY_DUPLEX
scale = 0.8
bg_color = (0,0,0)
# MSB Green
box_color = (74, 194, 108)
# charcoal text
text_color = (50, 50, 50)
thickness = cv2.FILLED
margin = 10
# image dimensions
#img_y = img.shape[0]
#img_x = img.shape[1]
# Determine size of text box
text = for_size_text
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
text_height = txt_size[0][1]
text_width = txt_size[0][0]
box_height = len(box_text)*text_height + len(box_text)*margin + margin*8
box_width = text_width + 4*margin
# Set text print position to lower middle of screen
# This takes the image size and text size, then positions the message centered
text = title_text
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
txt_size = txt_size[0][0]
title_pos = (int(box_width/2-txt_size/2),int(2*margin+text_height))
# background rectangle
cv2.rectangle(frame, (0,0), (5+box_width, 5+box_height), text_color, -1)
cv2.rectangle(frame, (0,0), (box_width, box_height), box_color, -1)
counter = 0
y_pos = 3*text_height
for txt in box_text:
cv2.putText(frame, txt, (2*margin,3*margin+y_pos), font_face, scale, text_color, 1)
counter = counter + 1
y_pos = y_pos + text_height+int(margin)
# Title Text
cv2.putText(frame, title_text, title_pos, font_face, scale, text_color, 1)
cap = cv2.VideoCapture('examples\media\mike_front_view_squat.mp4')
###Output
_____no_output_____
###Markdown
Video with Knee Tracking Lines - Testing v2 up/down motion tracking
###Code
cap = cv2.VideoCapture('examples\media\mike_front_view_squat_trim.mp4')
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frame_fps = cap.get(cv2.CAP_PROP_FPS)
# 4 samples per second ~ 1 every 250ms
sample_interval = 4
vel_inter = int(np.round(frame_fps/sample_interval))
# Status box values
cur_velocity = 0
avrg_decent = 0
avrg_ascent = 0
print("Number of Frames in Input Video: ",frame_count)
print("Frame Rate Input: ", frame_fps)
alpha = 0
text = "Current Frame: "
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
width = int(cap.get(3)) # float
height = int(cap.get(4)) # float
cv2.namedWindow('Peach Factor',cv2.WINDOW_AUTOSIZE)
### Init output video
out = cv2.VideoWriter('peach_factory_v2.avi',cv2.VideoWriter_fourcc('M','J','P','G'),
25, (width,height))
count = 0
while(True):
ret, frame = cap.read()
if ret == True:
buffer = frame.copy()
if count > 0:
frame_msec = int(cap.get(cv2.CAP_PROP_POS_MSEC))
if count < 150:
#right_temp_df = right_knee_df.iloc[0:count]
#left_temp_df = left_knee_df.iloc[0:count]
# Expected right knee, red line --> direct of motion is up
draw_line_set(frame, right_knee_df.iloc[0:count])
# Expected left knee, red line --> direct of motion is up
draw_line_set(frame, left_knee_df.iloc[0:count])
else:
#right_temp_df = right_knee_df.iloc[count-89:count]
#left_temp_df = left_knee_df.iloc[count-89:count]
draw_line_set(frame, right_knee_df.iloc[count-149:count])
# Expected left knee, red line --> direct of motion is up
draw_line_set(frame, left_knee_df.iloc[count-149:count])
alpha = 1-(count/frame_count)+0.3
draw_line_set(buffer, right_knee_df.iloc[0:count-149])
# Expected left knee, red line --> direct of motion is up
draw_line_set(buffer, left_knee_df.iloc[0:count-149])
for point in range(0,4):
draw_point(frame, left_leg_df.iloc[(count*4)+(point)], (150,150,0))
draw_point(frame, right_leg_df.iloc[(count*4)+(point)], (150,150,0))
draw_poly_line(buffer, left_leg_df.iloc[count*4:(count*4)+4].values, (255,255,255))
draw_poly_line(buffer, right_leg_df.iloc[count*4:(count*4)+4].values, (255,255,255))
alpha = 0.3
draw_point(buffer, left_knee_df.iloc[count], (0,255,0))
draw_point(buffer, right_knee_df.iloc[count], (0,255,0))
# Make Velocity Calculation
if count%vel_inter == 0 and count > vel_inter:
velocity = get_velocity(mid_hip_df.iloc[(count-vel_inter):count], frame_fps)
avrg_ascent, avrg_decent = get_average_velocity(mid_hip_df.iloc[0:count], frame_fps)
#check_form(frame, left_knee_df.iloc[count], right_knee_df.iloc[count])
#draw_label(frame, text+str(count)+", "+msg)
#cv2.putText(frame,text,(150,500), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.addWeighted(buffer, alpha, frame,1-alpha, 0, frame)
draw_stats_box(frame,velocity, avrg_ascent, avrg_decent, count)
cv2.imshow('Peach Factor',frame)
### Write output video
out.write(frame)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
else:
break
count+=1
cap.release()
out.release()
cv2.destroyAllWindows()
# Compress output AVI into clean and tiny MP4
from subprocess import check_output
#check_output("del peach_factory_v2.mp4", shell=True).decode()
check_output("ffmpeg -i peach_factory_v2.avi -vcodec h264 -acodec mp2 peach_factory_25fps.mp4", shell=True).decode()
# ffmpeg -i peach_factory_v2.avi -vcodec h264 -acodec mp2 peach_factory_v2.mp4
mid_hip_df
###Output
_____no_output_____
###Markdown
Video with Knee Points only
###Code
#cap = cv2.VideoCapture('examples\media\squat_front_ad_trim.mp4')
cap = cv2.VideoCapture('examples\media\mike_front_view_squat.mp4')
print("Number of frames in video: ",cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
cv2.namedWindow('Peach Factor',cv2.WINDOW_AUTOSIZE)
text = "Good Form!"
count = 0
while(True):
ret, frame = cap.read()
if ret == True:
if count > 2:
right_temp_df = right_knee_df.iloc[count]
left_temp_df = left_knee_df.iloc[count]
if right_temp_df['gradient']>0:
draw_point(frame, right_temp_df, (255,0,0))
elif right_temp_df['gradient']<0:
draw_point(frame, right_temp_df, (0,0,255))
if left_temp_df['gradient']>0:
draw_point(frame, left_temp_df, (255,0,0))
elif left_temp_df['gradient']<0:
draw_point(frame, left_temp_df, (0,0,255))
#draw_point(frame, left_knee_df.iloc[count])
draw_label(frame, text)
#cv2.putText(frame,text,(150,500), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Peach Factor',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
count+=1
cap.release()
cv2.destroyAllWindows()
#cap = cv2.VideoCapture('examples\media\squat_front_ad_trim.mp4')
cap = cv2.VideoCapture('examples\media\mike_front_view_squat.mp4')
print("Number of frames in video: ",cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
cv2.namedWindow('Peach Factor',cv2.WINDOW_AUTOSIZE)
text = "Good Form!"
count = 0
while(True):
ret, frame = cap.read()
if ret == True:
if count > 2:
#right_temp_df = right_knee_df.iloc[count]
#left_temp_df = left_knee_df.iloc[count]
#draw_point(frame, left_knee_df.iloc[count])
draw_label(frame, text)
#cv2.putText(frame,text,(150,500), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Peach Factor',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
count+=1
cap.release()
cv2.destroyAllWindows()
### Ok so next steps.
# 1. line sets. Polygone line sets can be determined by creating different polygons each time the gradient changes
# 2. Lines can be filtered by length and removed from the plotting func.
# 3. Plotting should be adjusted such that the poly line plot call is made separately for each grouping of points
###Output
_____no_output_____
###Markdown
Video with Knee Tracking Lines - Testing v1 up/down motion tracking
###Code
#cap = cv2.VideoCapture('examples\media\squat_front_ad_trim.mp4')
cap = cv2.VideoCapture('examples\media\mike_front_view_squat.mp4')
print("Number of frames in video: ",cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
cv2.namedWindow('Peach Factor',cv2.WINDOW_AUTOSIZE)
text = "This man needs help."
count = 0
while(True):
ret, frame = cap.read()
if ret == True:
if count > 80:
right_temp_df = right_knee_df.iloc[count-80:count]
left_temp_df = left_knee_df.iloc[count-80:count]
# Expected right knee, red line --> direct of motion is up
draw_poly_line(frame, right_temp_df.loc[right_temp_df['gradient'] > 0].values, (255,0,0))
# Expected right knee, blue line --> direct of motion is down
draw_poly_line(frame, right_temp_df.loc[right_temp_df['gradient'] < 0].values, (0,0,255))
# Expected left knee, red line --> direct of motion is up
draw_poly_line(frame, left_temp_df.loc[left_temp_df['gradient'] > 0].values, (255,0,0))
# Expected left knee, blue line --> direct of motion is down
draw_poly_line(frame, left_temp_df.loc[left_temp_df['gradient'] < 0].values, (0,0,255))
draw_label(frame, text)
#cv2.putText(frame,text,(150,500), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Peach Factor',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
count+=1
cap.release()
cv2.destroyAllWindows()
###Output
_____no_output_____
###Markdown
Video with Knee Tracking Lines - All points
###Code
#cap = cv2.VideoCapture('examples\media\squat_front_ad_trim.mp4')
cap = cv2.VideoCapture('examples\media\mike_front_view_squat.mp4')
print("Number of frames in video: ",cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
cv2.namedWindow('Peach Factor',cv2.WINDOW_AUTOSIZE)
text = "This man needs help."
count = 0
while(True):
ret, frame = cap.read()
if ret == True:
if count > 2:
draw_poly_line(frame, right_knee_df.iloc[0:count].values)
draw_poly_line(frame, left_knee_df.iloc[0:count].values)
draw_label(frame, text)
#cv2.putText(frame,text,(150,500), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Peach Factor',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
count+=1
cap.release()
cv2.destroyAllWindows()
#cap = cv2.VideoCapture('examples\media\squat_front_ad_trim.mp4')
cap = cv2.VideoCapture('examples\media\mike_front_view_squat.mp4')
print("Number of frames in video: ",cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
cv2.namedWindow('Peach Factor',cv2.WINDOW_AUTOSIZE)
text = "This man needs help."
count = 0
while(True):
ret, frame = cap.read()
if ret == True:
for point in range(0,count):
draw_point(frame, left_knee_df.iloc[point])
draw_point(frame, right_knee_df.iloc[point])
draw_label(frame, text)
#cv2.putText(frame,text,(150,500), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Peach Factor',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
count+=1
cap.release()
cv2.destroyAllWindows()
print(frame.shape)
# To Do
# 1. Save a sub set of all left knee, right knee and distance between the two
# 2. plot all the points, diff color for left and right
# 3. look for outliers
# find way to auto-classify
# 4. Plot knee points with line back onto video frame
nasty_file = "squat_front_trim_000000000570_keypoints.json"
try:
temp_df = pd.read_json(path_to_json+nasty_file, orient='record')
temp_df = pd.DataFrame.from_dict(temp_df.values[0][0], orient='index')
except:
print('bad record')
temp_df
cv2.polylines(img,[pts],True,(0,255,255))
blue = (255, 0, 0)
red = (0, 0, 255)
green = (0, 255, 0)
violet = (180, 0, 180)
yellow = (0, 180, 180)
white = (255, 255, 255)
cv2.line(image, (50, 30), (450, 35), blue, thickness=5)
cv2.circle(image, (240, 205), 23, red, -1)
cv2.rectangle(image, (50, 60), (450, 95), green, -1)
cv2.ellipse(image, (250, 150), (80, 20), 5, 0, 360, violet, -1)
points = np.array([[[140, 230], [380, 230], [320, 250], [250, 280]]], np.int32)
cv2.polylines(image, [points], True, yellow, thickness=3)
font_scale = 1.5
font = cv2.FONT_HERSHEY_PLAIN
# set the rectangle background to white
rectangle_bgr = (255, 255, 255)
# make a black image
img = np.zeros((500, 500, 3))
# set some text
text = "Some text in a box!"
# get the width and height of the text box
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]
# set the text start position
text_offset_x = 10
text_offset_y = img.shape[0] - 25
# make the coords of the box with a small padding of two pixels
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width - 2, text_offset_y - text_height - 2))
cv2.rectangle(img, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)
cv2.putText(img, text, (text_offset_x, text_offset_y), font, fontScale=font_scale, color=(0, 0, 0), thickness=1)
cv2.imshow("A box!", img)
cv2.waitKey(0)
print('Size of img: ', img.shape)
# Right 9, 10, 11, 22
# Left 12, 13, 14, 19
'''
// Result for BODY_25 (25 body parts consisting of COCO + foot)
// const std::map<unsigned int, std::string> POSE_BODY_25_BODY_PARTS {
// {0, "Nose"},
// {1, "Neck"},
// {2, "RShoulder"},
// {3, "RElbow"},
// {4, "RWrist"},
// {5, "LShoulder"},
// {6, "LElbow"},
// {7, "LWrist"},
// {8, "MidHip"},
// {9, "RHip"},
// {10, "RKnee"},
// {11, "RAnkle"},
// {12, "LHip"},
// {13, "LKnee"},
// {14, "LAnkle"},
// {15, "REye"},
// {16, "LEye"},
// {17, "REar"},
// {18, "LEar"},
// {19, "LBigToe"},
// {20, "LSmallToe"},
// {21, "LHeel"},
// {22, "RBigToe"},
// {23, "RSmallToe"},
// {24, "RHeel"},
// {25, "Background"}
// };
'''
'''
multi-select technique
df.loc[(df["B"] > 50) & (df["C"] == 900), "A"] *= 1000
df
A B C
0 9 40 300
1 9 70 700
2 5000 70 900
3 8000 80 900
4 7 50 200
5 9 30 900
'''
###Output
_____no_output_____ |
content/preface.ipynb | ###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook 〉 About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum-Accelerated Scientific Computation using LibKet Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Qiskit を使った量子計算の学習 Qiskit Communityチームからのご挨拶です! Qiskitをベースとした大学の量子アルゴリズム/計算コースの補足教材となるよう、このテキストブックを作り始めました: 量子アルゴリズムの基礎となる数学 今日の非フォールトトレラントな量子デバイスの詳細 IBMのクラウド型量子システムに量子アルゴリズムを実装するためのQiskitでのコーディング Read the textbook このテキストブックについてこのテキストブックは、Qiskit SDKの使い方を学びながら、量子コンピューティングの概念を学ぶことができる無料のオンラインテキストです。 Codeをインラインで実行するこのテキストブックは、読みやすいJupyter notebookのフレームワークに基づいて作られていますが、読者がテキストブック内でコードを編集して実行することもできます。 各章のページは、インストールなしに、IBM Quantum ExperienceでJupyter notebookとして開くこともできます。
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook 〉 About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook 〉 About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook 〉 About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
Learn Quantum Computation using Qiskit Greetings from the Qiskit Community team! This textbook is a university quantum algorithms/computation course supplement based on Qiskit to help learn: The mathematics behind quantum algorithms Details about today's non-fault-tolerant quantum devices Writing code in Qiskit to implement quantum algorithms on IBM's cloud quantum systems Read the textbook About the TextbookThis is a free digital textbook that will teach you the concepts of quantum computing while you learn to use the Qiskit SDK. Run the Code InlineThis textbook is built on a jupyter notebook framework that allows for easy reading, but it also allows readers to edit and run the code right in the textbook. The chapters can also be opened as Jupyter notebooks in the IBM Quantum Experience, no installs required!
###Code
# Click 'try', then 'run' to see the output,
# you can change the code and run it again.
print("This code works!")
from qiskit import QuantumCircuit
qc = QuantumCircuit(2) # Create circuit with 2 qubits
qc.h(0) # Do H-gate on q0
qc.cx(0,1) # Do CNOT on q1 controlled by q0
qc.measure_all()
qc.draw()
###Output
_____no_output_____ |
notebooks/t20-TCD_sentiment_analysis.ipynb | ###Markdown
Tutorial 20. Sentiment analysisCreated by Emanuel Flores-Bautista 2019 All content contained in this notebook is licensed under a [Creative Commons License 4.0 BY NC](https://creativecommons.org/licenses/by-nc/4.0/). The code is licensed under a [MIT license](https://opensource.org/licenses/MIT).
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from keras.datasets import imdb
import TCD19_utils as TCD
TCD.set_plotting_style_2()
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
###Output
Using TensorFlow backend.
###Markdown
We will train a classifier movie for reviews in the IMDB data set.
###Code
import tensorflow as tf
#from tensorflow import keras as tf.keras
#import numpy as np
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=5000)
# restore np.load for future normal usage
np.load = np_load_old
###Output
_____no_output_____
###Markdown
from keras.datasets import imdb(x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz", num_words=5000, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size),allow_pickle = True)print('Loaded dataset with {} training samples,{} test samples'.format(len(X_train), len(X_test)))
###Code
len(X_train[0])
print('---review---')
print(X_train[6])
print('---label---')
print(y_train[6])
###Output
---review---
[1, 2, 365, 1234, 5, 1156, 354, 11, 14, 2, 2, 7, 1016, 2, 2, 356, 44, 4, 1349, 500, 746, 5, 200, 4, 4132, 11, 2, 2, 1117, 1831, 2, 5, 4831, 26, 6, 2, 4183, 17, 369, 37, 215, 1345, 143, 2, 5, 1838, 8, 1974, 15, 36, 119, 257, 85, 52, 486, 9, 6, 2, 2, 63, 271, 6, 196, 96, 949, 4121, 4, 2, 7, 4, 2212, 2436, 819, 63, 47, 77, 2, 180, 6, 227, 11, 94, 2494, 2, 13, 423, 4, 168, 7, 4, 22, 5, 89, 665, 71, 270, 56, 5, 13, 197, 12, 161, 2, 99, 76, 23, 2, 7, 419, 665, 40, 91, 85, 108, 7, 4, 2084, 5, 4773, 81, 55, 52, 1901]
---label---
1
###Markdown
Note that the review is stored as a sequence of integers. From the [Keras documentation](https://keras.io/datasets/) we can see that these are words IDs that have been pre-assigned to individual words, and the label is an integer (0 for negative, 1 for positive). We can go ahead and access the words from each review with the `get_word_index()` method from the `imdb` object.
###Code
word2id = imdb.get_word_index()
id2word = {i: word for word, i in word2id.items()}
print('---review with words---')
print([id2word.get(i, ' ') for i in X_train[6]])
print('---label---')
print(y_train[6])
###Output
---review with words---
['the', 'and', 'full', 'involving', 'to', 'impressive', 'boring', 'this', 'as', 'and', 'and', 'br', 'villain', 'and', 'and', 'need', 'has', 'of', 'costumes', 'b', 'message', 'to', 'may', 'of', 'props', 'this', 'and', 'and', 'concept', 'issue', 'and', 'to', "god's", 'he', 'is', 'and', 'unfolds', 'movie', 'women', 'like', "isn't", 'surely', "i'm", 'and', 'to', 'toward', 'in', "here's", 'for', 'from', 'did', 'having', 'because', 'very', 'quality', 'it', 'is', 'and', 'and', 'really', 'book', 'is', 'both', 'too', 'worked', 'carl', 'of', 'and', 'br', 'of', 'reviewer', 'closer', 'figure', 'really', 'there', 'will', 'and', 'things', 'is', 'far', 'this', 'make', 'mistakes', 'and', 'was', "couldn't", 'of', 'few', 'br', 'of', 'you', 'to', "don't", 'female', 'than', 'place', 'she', 'to', 'was', 'between', 'that', 'nothing', 'and', 'movies', 'get', 'are', 'and', 'br', 'yes', 'female', 'just', 'its', 'because', 'many', 'br', 'of', 'overly', 'to', 'descent', 'people', 'time', 'very', 'bland']
---label---
1
###Markdown
Because we cannot feed the index matrix directly to the classifier, we need to perform some data wrangling and feature extraction abilities. We're going to write a couple of functions, in order to 1. Get a list of reviews, consisting of full length strings. 2. Perform TF-IDF feature extraction on the reviews documents. Feature engineering
###Code
def get_joined_rvw(X):
"""
Given an X_train or X_test dataset from the IMDB reviews
of Keras, return a list of the reviews in string format.
"""
#Get word to index dictionary
word2id = imdb.get_word_index()
#Get index to word mapping dictionary
id2word = {i: word for word, i in word2id.items()}
#Initialize reviews list
doc_list = []
for review in X:
#Extract review
initial_rvw = [id2word.get(i) for i in review]
#Join strings followed by spaces
joined_rvw = " ".join(initial_rvw)
#Append review to the doc_list
doc_list.append(joined_rvw)
return doc_list
train_rvw = get_joined_rvw(X_train)
test_rvw = get_joined_rvw(X_test)
tf_idf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=vocabulary_size,
stop_words='english')
tf_idf_train = tf_idf_vectorizer.fit_transform(train_rvw)
tf_idf_test = tf_idf_vectorizer.fit_transform(test_rvw)
#tf_idf_feature_names = tf_idf_vectorizer.get_feature_names()
#tf_idf = np.vstack([tf_idf_train.toarray(), tf_idf_test.toarray()])
#X_new = pd.DataFrame(tf_idf, columns=tf_idf_feature_names)
X_train_new = tf_idf_train.toarray()
X_test_new = tf_idf_test.toarray()
X_test_new.shape
def get_data_from_keras_imdb():
"""
Extract TF-IDF matrices for the Keras IMDB dataset.
"""
vocabulary_size = 1000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size)
#X = np.vstack([X_train[:, None], X_test[:, None]])
X_train_docs = get_joined_rvw(X_train)
X_test_docs = get_joined_rvw(X_test)
tf_idf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=vocabulary_size,
stop_words='english')
tf_idf_train = tf_idf_vectorizer.fit_transform(X_train_docs)
tf_idf_test = tf_idf_vectorizer.fit_transform(X_test_docs)
#tf_idf_feature_names = tf_idf_vectorizer.get_feature_names()
#tf_idf = np.vstack([tf_idf_train.toarray(), tf_idf_test.toarray()])
#X_new = pd.DataFrame(tf_idf, columns=tf_idf_feature_names)
X_train_new = tf_idf_train.toarray()
X_test_new = tf_idf_test.toarray()
return X_train_new, y_train, X_test_new, y_test
###Output
_____no_output_____
###Markdown
X_train, y_train, X_test, y_test = get_data_from_keras_imdb()
###Code
print('train dataset shape', X_train.shape)
print('test dataset shape', X_test.shape)
###Output
train dataset shape (25000,)
test dataset shape (25000,)
###Markdown
We can readily see that we are ready to train our classification algorithm with the TF-IDF matrices. ML Classification: Model bulding and testing
###Code
model = RandomForestClassifier(n_estimators=200, max_depth=3, random_state=42)
model.fit(X_train_new[:, :-1], y_train)
y_pred = model.predict(X_test_new)
print(classification_report(y_test, y_pred))
print('Accuracy score : ', accuracy_score(y_test, y_pred))
model = MLPClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
print('Accuracy score : ', accuracy_score(y_test, y_pred))
from sklearn.model_selection import cross_val_score
cross_val_score(model, X_train_new[], y_train, cv=5)
import manu_utils as TCD
palette = TCD.palette(cmap = True)
C = confusion_matrix(y_test, y_pred)
c_normed = C / C.astype(np.float).sum(axis=1) [:, np.newaxis]
sns.heatmap(c_normed, cmap = palette,
xticklabels=['negative', 'positive'],
yticklabels=['negative', 'positive'],
annot= True, vmin = 0, vmax = 1,
cbar_kws = {'label': 'recall'})
#
plt.ylabel('True label')
plt.xlabel('Predicted label');
###Output
_____no_output_____
###Markdown
Sci-kit learn pipelines
###Code
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(TfidfVectorizer(max_df=0.95, min_df=2,
max_features=vocabulary_size,
stop_words='english'), MLPClassifier())
pipe.fit(train_rvw, y_train)
labels = pipe.predict(test_rvw)
targets = ['negative','positive']
def predict_category(s, model=pipe):
pred = pipe.predict([s])
return targets[pred[0]]
predict_category('this was a hell of a good movie')
predict_category('this was a freaking crappy time yo')
###Output
_____no_output_____ |
Lesson_NeuralNets/Deep_Neural_Network-1.ipynb | ###Markdown
Tutorial: Download data with ```tf.data```
###Code
import tensorflow_datasets as tfds
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# Mandatory: to launch
#tf.enable_eager_execution()
mnist_data, info = tfds.load("mnist", with_info=True, as_supervised=True)
mnist_train, mnist_test = mnist_data["train"], mnist_data["test"]
mnist_train.
mnist_test
mnist_example, = mnist_train.take(1)
image, label = mnist_example["image"], mnist_example["label"]
plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap("gray"))
print("Label: %d" % label.numpy())
print(info.features)
print(info.features["label"].num_classes)
print(info.features["label"].names)
print(info.splits["train"].num_examples)
print(info.splits["test"].num_examples)
info = mnist_builder.info
print(info)
train_ds = tfds.load("mnist", split="train")
train_ds
import tensorflow_datasets as tfds
(train_features, train_labels), (test_features, test_labels) = tfds.load("mnist",split=["train", "test"], as_supervised=True)
###Output
_____no_output_____
###Markdown
Multilayer Neural NetworksIn this lesson, you'll learn how to build multilayer neural networks with TensorFlow. Adding a hidden layer to a network allows it to model more complex functions. Also, using a non-linear activation function on the hidden layer lets it model non-linear functions.We shall learn about ReLU, a non-linear function, or rectified linear unit. The ReLU function is $0$ for negative inputs and xx for all inputs $x >0$.Next, you'll see how a ReLU hidden layer is implemented in TensorFlow. TensorFlow ReLUsTensorFlow provides the ReLU function as ```tf.nn.relu()```, as shown below.
###Code
import tensorflow as tf
# Hidden Layer with ReLU activation function
hidden_layer = tf.add(tf.matmul(features, hidden_weights), hidden_biases)
hidden_layer = tf.nn.relu(hidden_layer)
output = tf.add(tf.matmul(hidden_layer, output_weights), output_biases)
###Output
_____no_output_____
###Markdown
The above code applies the ```tf.nn.relu()``` function to the hidden_layer, effectively turning off any negative weights and acting like an on/off switch. Adding additional layers, like the output layer, after an activation function turns the model into a nonlinear function. This nonlinearity allows the network to solve more complex problems. Quiz
###Code
# Solution is available in the other "solution.py" tab
import tensorflow as tf
output = None
hidden_layer_weights = [
[0.1, 0.2, 0.4],
[0.4, 0.6, 0.6],
[0.5, 0.9, 0.1],
[0.8, 0.2, 0.8]]
out_weights = [
[0.1, 0.6],
[0.2, 0.1],
[0.7, 0.9]]
# Weights and biases
weights = [
tf.Variable(hidden_layer_weights),
tf.Variable(out_weights)]
biases = [
tf.Variable(tf.zeros(3)),
tf.Variable(tf.zeros(2))]
# Input
features = tf.Variable([[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0], [11.0, 12.0, 13.0, 14.0]])
# TODO: Create Model
# Hidden Layer with ReLU activation function
hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0])
hidden_layer = tf.nn.relu(hidden_layer)
output = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1])
# TODO: Print session results
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(output))
###Output
_____no_output_____
###Markdown
1. Deep Neural Network in TensorFlowWhat I have learnt:* ```tf.reshape``` is used to turn a picture of size $n \times m$ to a feature matrix with $n \times m$ columns* How to train a one hidden layer NNYou've seen how to build a logistic classifier using TensorFlow. Now you're going to see how to use the logistic classifier to build a deep neural network. Step by StepIn the following walkthrough, we'll step through TensorFlow code written to classify the letters in the MNIST database. If you would like to run the network on your computer, the file is provided here. You can find this and many more examples of TensorFlow at [Aymeric Damien's GitHub repository](https://github.com/aymericdamien/TensorFlow-Examples). Code TensorFlow MNIST
###Code
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 20
batch_size = 128 # Decrease batch size if you don't have enough memory
display_step = 1
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
from tensorflow.examples.tutorials.mnist import input_data
from keras.utils import to_categorical
import tensorflow as tf
from tensorflow import keras
import numpy as np
mnist = keras.datasets.mnist
(train_features, train_labels), (test_features, test_labels) = mnist.load_data()
train_features = np.reshape(train_features, [-1, n_input])
#test_features = np.reshape(test_features, [-1, n_input])
# to_categorical: one hot encoding
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
#mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
###Output
_____no_output_____
###Markdown
You'll use the MNIST dataset provided by TensorFlow, which batches and One-Hot encodes the data for you. Learning Parameters The focus here is on the architecture of multilayer neural networks, not parameter tuning, so here we'll just give you the learning parameters. Hidden Layer Parameters
###Code
n_hidden_layer = 256 # layer number of features
###Output
_____no_output_____
###Markdown
The variable n_hidden_layer determines the size of the hidden layer in the neural network. This is also known as the width of a layer. Weights and Biases
###Code
# Store layers weight & bias
weights = {
'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])),
'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes]))
}
biases = {
'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
###Output
_____no_output_____
###Markdown
Deep neural networks use multiple layers with each layer requiring it's own weight and bias. The ```'hidden_layer'``` weight and bias is for the hidden layer. The ```'out'``` weight and bias is for the output layer. If the neural network were deeper, there would be weights and biases for each additional layer. Input
###Code
# tf Graph input
x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
y = tf.placeholder(tf.float32, shape=[None, n_classes])
x_flat = tf.reshape(x, [-1, n_input])
###Output
_____no_output_____
###Markdown
The MNIST data is made up of 28px by 28px images with a single channel . The ```tf.reshape()``` function above reshapes the 28px by 28px matrices in ```x``` into row vectors of 784px. Multilayer Perceptron
###Code
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']),\
biases['hidden_layer'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
logits = tf.add(tf.matmul(layer_1, weights['out']), biases['out'])
###Output
_____no_output_____
###Markdown
You've seen the linear function ```tf.add(tf.matmul(x_flat, weights['hidden_layer'])```, ```biases['hidden_layer'])``` before, also known as ```xw + b```. Combining linear functions together using a ReLU will give you a two layer network. Optimizer
###Code
# Define loss and optimizer
cost = tf.reduce_mean(\
tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(cost)
###Output
_____no_output_____
###Markdown
This is the same optimization technique used in the Intro to TensorFLow lab. Session
###Code
def batches(batch_size, features, labels):
"""
Create batches of features and labels
:param batch_size: The batch size
:param features: List of features
:param labels: List of labels
:return: Batches of (Features, Labels)
"""
assert len(features) == len(labels)
# TODO: Implement batching
output_batches = []
sample_size = len(features)
for start_i in range(0, sample_size, batch_size):
end_i = start_i + batch_size
batch = [features[start_i:end_i], labels[start_i:end_i]]
output_batches.append(batch)
return output_batches
train_feed_dict
# Initializing the variables
init = tf.global_variables_initializer()
train_batches = batches(batch_size, train_features, train_labels)
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for batch_features, batch_labels in train_batches:
train_feed_dict = {
x_flat: batch_features,
y: batch_labels}
loss = sess.run(optimizer, feed_dict=train_feed_dict)
# Calculate accuracy for test dataset
#test_accuracy = sess.run(
# accuracy,
# feed_dict={features: test_features, labels: test_labels})
#print('Test Accuracy: {}'.format(test_accuracy))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
###Output
_____no_output_____ |
QBioI_Qiskit/qiskit-textbook-master/content/ch-states/representing-qubit-states.ipynb | ###Markdown
Representing Qubit States You now know something about bits, and about how our familiar digital computers work. All the complex variables, objects and data structures used in modern software are basically all just big piles of bits. Those of us who work on quantum computing call these *classical variables.* The computers that use them, like the one you are using to read this article, we call *classical computers*.In quantum computers, our basic variable is the _qubit:_ a quantum variant of the bit. These have exactly the same restrictions as normal bits do: they can store only a single binary piece of information, and can only ever give us an output of `0` or `1`. However, they can also be manipulated in ways that can only be described by quantum mechanics. This gives us new gates to play with, allowing us to find new ways to design algorithms.To fully understand these new gates, we first need to understand how to write down qubit states. For this we will use the mathematics of vectors, matrices, and complex numbers. Though we will introduce these concepts as we go, it would be best if you are comfortable with them already. If you need a more in-depth explanation or a refresher, you can find the guide [here](../ch-prerequisites/linear_algebra.html). Contents1. [Classical vs Quantum Bits](cvsq) 1.1 [Statevectors](statevectors) 1.2 [Qubit Notation](notation) 1.3 [Exploring Qubits with Qiskit](exploring-qubits) 2. [The Rules of Measurement](rules-measurement) 2.1 [A Very Important Rule](important-rule) 2.2 [The Implications of this Rule](implications)3. [The Bloch Sphere](bloch-sphere) 3.1 [Describing the Restricted Qubit State](bloch-sphere-1) 3.2 [Visually Representing a Qubit State](bloch-sphere-2) 1. Classical vs Quantum Bits 1.1 StatevectorsIn quantum physics we use _statevectors_ to describe the state of our system. Say we wanted to describe the position of a car along a track, this is a classical system so we could use a number $x$:$$ x=4 $$Alternatively, we could instead use a collection of numbers in a vector called a _statevector._ Each element in the statevector contains the probability of finding the car in a certain place:$$|x\rangle = \begin{bmatrix} 0\\ \vdots \\ 0 \\ 1 \\ 0 \\ \vdots \\ 0 \end{bmatrix} \begin{matrix} \\ \\ \\ \leftarrow \\ \\ \\ \\ \end{matrix} \begin{matrix} \\ \\ \text{Probability of} \\ \text{car being at} \\ \text{position 4} \\ \\ \\ \end{matrix} $$This isn’t limited to position, we could also keep a statevector of all the possible speeds the car could have, and all the possible colours the car could be. With classical systems (like the car example above), this is a silly thing to do as it requires keeping huge vectors when we only really need one number. But as we will see in this chapter, statevectors happen to be a very good way of keeping track of quantum systems, including quantum computers. 1.2 Qubit Notation Classical bits always have a completely well-defined state: they are either `0` or `1` at every point during a computation. There is no more detail we can add to the state of a bit than this. So to write down the state of a of classical bit (`c`), we can just use these two binary values. For example: c = 0This restriction is lifted for quantum bits. Whether we get a `0` or a `1` from a qubit only needs to be well-defined when a measurement is made to extract an output. At that point, it must commit to one of these two options. At all other times, its state will be something more complex than can be captured by a simple binary value.To see how to describe these, we can first focus on the two simplest cases. As we saw in the last section, it is possible to prepare a qubit in a state for which it definitely gives the outcome `0` when measured.We need a name for this state. Let's be unimaginative and call it $0$ . Similarly, there exists a qubit state that is certain to output a `1`. We'll call this $1$. These two states are completely mutually exclusive. Either the qubit definitely outputs a ```0```, or it definitely outputs a ```1```. There is no overlap. One way to represent this with mathematics is to use two orthogonal vectors.$$|0\rangle = \begin{bmatrix} 1 \\ 0 \end{bmatrix} \, \, \, \, |1\rangle =\begin{bmatrix} 0 \\ 1 \end{bmatrix}.$$This is a lot of notation to take in all at once. First, let's unpack the weird $|$ and $\rangle$. Their job is essentially just to remind us that we are talking about the vectors that represent qubit states labelled $0$ and $1$. This helps us distinguish them from things like the bit values ```0``` and ```1``` or the numbers 0 and 1. It is part of the bra-ket notation, introduced by Dirac.If you are not familiar with vectors, you can essentially just think of them as lists of numbers which we manipulate using certain rules. If you are familiar with vectors from your high school physics classes, you'll know that these rules make vectors well-suited for describing quantities with a magnitude and a direction. For example, the velocity of an object is described perfectly with a vector. However, the way we use vectors for quantum states is slightly different to this, so don't hold on too hard to your previous intuition. It's time to do something new!With vectors we can describe more complex states than just $|0\rangle$ and $|1\rangle$. For example, consider the vector$$|q_0\rangle = \begin{bmatrix} \tfrac{1}{\sqrt{2}} \\ \tfrac{i}{\sqrt{2}} \end{bmatrix} .$$To understand what this state means, we'll need to use the mathematical rules for manipulating vectors. Specifically, we'll need to understand how to add vectors together and how to multiply them by scalars. Reminder: Matrix Addition and Multiplication by Scalars (Click here to expand) To add two vectors, we add their elements together: $$|a\rangle = \begin{bmatrix}a_0 \\ a_1 \\ \vdots \\ a_n \end{bmatrix}, \quad |b\rangle = \begin{bmatrix}b_0 \\ b_1 \\ \vdots \\ b_n \end{bmatrix}$$ $$|a\rangle + |b\rangle = \begin{bmatrix}a_0 + b_0 \\ a_1 + b_1 \\ \vdots \\ a_n + b_n \end{bmatrix} $$ And to multiply a vector by a scalar, we multiply each element by the scalar: $$x|a\rangle = \begin{bmatrix}x \times a_0 \\ x \times a_1 \\ \vdots \\ x \times a_n \end{bmatrix}$$ These two rules are used to rewrite the vector $|q_0\rangle$ (as shown above): $$ \begin{aligned} |q_0\rangle & = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle \\ & = \tfrac{1}{\sqrt{2}}\begin{bmatrix}1\\0\end{bmatrix} + \tfrac{i}{\sqrt{2}}\begin{bmatrix}0\\1\end{bmatrix}\\ & = \begin{bmatrix}\tfrac{1}{\sqrt{2}}\\0\end{bmatrix} + \begin{bmatrix}0\\\tfrac{i}{\sqrt{2}}\end{bmatrix}\\ & = \begin{bmatrix}\tfrac{1}{\sqrt{2}} \\ \tfrac{i}{\sqrt{2}} \end{bmatrix}\\ \end{aligned} $$ Reminder: Orthonormal Bases (Click here to expand) It was stated before that the two vectors $|0\rangle$ and $|1\rangle$ are orthonormal, this means they are both orthogonal and normalised. Orthogonal means the vectors are at right angles: And normalised means their magnitudes (length of the arrow) is equal to 1. The two vectors $|0\rangle$ and $|1\rangle$ are linearly independent, which means we cannot describe $|0\rangle$ in terms of $|1\rangle$, and vice versa. However, using both the vectors $|0\rangle$ and $|1\rangle$, and our rules of addition and multiplication by scalars, we can describe all possible vectors in 2D space: Because the vectors $|0\rangle$ and $|1\rangle$ are linearly independent, and can be used to describe any vector in 2D space using vector addition and scalar multiplication, we say the vectors $|0\rangle$ and $|1\rangle$ form a basis. In this case, since they are both orthogonal and normalised, we call it an orthonormal basis. Since the states $|0\rangle$ and $|1\rangle$ form an orthonormal basis, we can represent any 2D vector with a combination of these two states. This allows us to write the state of our qubit in the alternative form:$$ |q_0\rangle = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle $$This vector, $|q_0\rangle$ is called the qubit's _statevector,_ it tells us everything we could possibly know about this qubit. For now, we are only able to draw a few simple conclusions about this particular example of a statevector: it is not entirely $|0\rangle$ and not entirely $|1\rangle$. Instead, it is described by a linear combination of the two. In quantum mechanics, we typically describe linear combinations such as this using the word 'superposition'.Though our example state $|q_0\rangle$ can be expressed as a superposition of $|0\rangle$ and $|1\rangle$, it is no less a definite and well-defined qubit state than they are. To see this, we can begin to explore how a qubit can be manipulated. 1.3 Exploring Qubits with Qiskit First, we need to import all the tools we will need:
###Code
from qiskit import QuantumCircuit, assemble, Aer
from qiskit.visualization import plot_histogram, plot_bloch_vector
from math import sqrt, pi
###Output
_____no_output_____
###Markdown
In Qiskit, we use the `QuantumCircuit` object to store our circuits, this is essentially a list of the quantum operations on our circuit and the qubits they are applied to.
###Code
qc = QuantumCircuit(1) # Create a quantum circuit with one qubit
###Output
_____no_output_____
###Markdown
In our quantum circuits, our qubits always start out in the state $|0\rangle$. We can use the `initialize()` method to transform this into any state. We give `initialize()` the vector we want in the form of a list, and tell it which qubit(s) we want to initialise in this state:
###Code
qc = QuantumCircuit(1) # Create a quantum circuit with one qubit
initial_state = [0,1] # Define initial_state as |1>
qc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit
qc.draw() # Let's view our circuit
###Output
_____no_output_____
###Markdown
We can then use one of Qiskit’s simulators to view the resulting state of our qubit. To begin with we will use the statevector simulator, but we will explain the different simulators and their uses later.
###Code
svsim = Aer.get_backend('statevector_simulator') # Tell Qiskit how to simulate our circuit
###Output
_____no_output_____
###Markdown
To get the results from our circuit, we use `execute` to run our circuit, giving the circuit and the backend as arguments. We then use `.result()` to get the result of this:
###Code
qc = QuantumCircuit(1) # Create a quantum circuit with one qubit
initial_state = [0,1] # Define initial_state as |1>
qc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit
qobj = assemble(qc) # Create a Qobj from the circuit for the simulator to run
result = svsim.run(qobj).result() # Do the simulation and return the result
###Output
_____no_output_____
###Markdown
from `result`, we can then get the final statevector using `.get_statevector()`:
###Code
out_state = result.get_statevector()
print(out_state) # Display the output state vector
###Output
[0.+0.j 1.+0.j]
###Markdown
**Note:** Python uses `j` to represent $i$ in complex numbers. We see a vector with two complex elements: `0.+0.j` = 0, and `1.+0.j` = 1.Let’s now measure our qubit as we would in a real quantum computer and see the result:
###Code
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
This time, instead of the statevector we will get the counts for the `0` and `1` results using `.get_counts()`:
###Code
qobj = assemble(qc)
result = svsim.run(qobj).result()
counts = result.get_counts()
plot_histogram(counts)
###Output
_____no_output_____
###Markdown
We can see that we (unsurprisingly) have a 100% chance of measuring $|1\rangle$. This time, let’s instead put our qubit into a superposition and see what happens. We will use the state $|q_0\rangle$ from earlier in this section:$$ |q_0\rangle = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle $$We need to add these amplitudes to a python list. To add a complex amplitude, Python uses `j` for the imaginary unit (we normally call it "$i$" mathematically):
###Code
initial_state = [1/sqrt(2), 1j/sqrt(2)] # Define state |q_0>
###Output
_____no_output_____
###Markdown
And we then repeat the steps for initialising the qubit as before:
###Code
qc = QuantumCircuit(1) # Must redefine qc
qc.initialize(initial_state, 0) # Initialise the 0th qubit in the state `initial_state`
qobj = assemble(qc)
state = svsim.run(qobj).result().get_statevector() # Execute the circuit
print(state) # Print the result
qobj = assemble(qc)
results = svsim.run(qobj).result().get_counts()
plot_histogram(results)
###Output
_____no_output_____
###Markdown
We can see we have equal probability of measuring either $|0\rangle$ or $|1\rangle$. To explain this, we need to talk about measurement. 2. The Rules of Measurement 2.1 A Very Important Rule There is a simple rule for measurement. To find the probability of measuring a state $|\psi \rangle$ in the state $|x\rangle$ we do:$$p(|x\rangle) = | \langle x| \psi \rangle|^2$$The symbols $\langle$ and $|$ tell us $\langle x |$ is a row vector. In quantum mechanics we call the column vectors _kets_ and the row vectors _bras._ Together they make up _bra-ket_ notation. Any ket $|a\rangle$ has a corresponding bra $\langle a|$, and we convert between them using the conjugate transpose. Reminder: The Inner Product (Click here to expand) There are different ways to multiply vectors, here we use the inner product. The inner product is a generalisation of the dot product which you may already be familiar with. In this guide, we use the inner product between a bra (row vector) and a ket (column vector), and it follows this rule: $$\langle a| = \begin{bmatrix}a_0^*, & a_1^*, & \dots & a_n^* \end{bmatrix}, \quad |b\rangle = \begin{bmatrix}b_0 \\ b_1 \\ \vdots \\ b_n \end{bmatrix}$$ $$\langle a|b\rangle = a_0^* b_0 + a_1^* b_1 \dots a_n^* b_n$$ We can see that the inner product of two vectors always gives us a scalar. A useful thing to remember is that the inner product of two orthogonal vectors is 0, for example if we have the orthogonal vectors $|0\rangle$ and $|1\rangle$: $$\langle1|0\rangle = \begin{bmatrix} 0 , & 1\end{bmatrix}\begin{bmatrix}1 \\ 0\end{bmatrix} = 0$$ Additionally, remember that the vectors $|0\rangle$ and $|1\rangle$ are also normalised (magnitudes are equal to 1): $$ \begin{aligned} \langle0|0\rangle & = \begin{bmatrix} 1 , & 0\end{bmatrix}\begin{bmatrix}1 \\ 0\end{bmatrix} = 1 \\ \langle1|1\rangle & = \begin{bmatrix} 0 , & 1\end{bmatrix}\begin{bmatrix}0 \\ 1\end{bmatrix} = 1 \end{aligned}$$ In the equation above, $|x\rangle$ can be any qubit state. To find the probability of measuring $|x\rangle$, we take the inner product of $|x\rangle$ and the state we are measuring (in this case $|\psi\rangle$), then square the magnitude. This may seem a little convoluted, but it will soon become second nature.If we look at the state $|q_0\rangle$ from before, we can see the probability of measuring $|0\rangle$ is indeed $0.5$:$$\begin{aligned}|q_0\rangle & = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle \\\langle 0| q_0 \rangle & = \tfrac{1}{\sqrt{2}}\langle 0|0\rangle + \tfrac{i}{\sqrt{2}}\langle 0|1\rangle \\& = \tfrac{1}{\sqrt{2}}\cdot 1 + \tfrac{i}{\sqrt{2}} \cdot 0\\& = \tfrac{1}{\sqrt{2}}\\|\langle 0| q_0 \rangle|^2 & = \tfrac{1}{2}\end{aligned}$$You should verify the probability of measuring $|1\rangle$ as an exercise.This rule governs how we get information out of quantum states. It is therefore very important for everything we do in quantum computation. It also immediately implies several important facts. 2.2 The Implications of this Rule 1 NormalisationThe rule shows us that amplitudes are related to probabilities. If we want the probabilities to add up to 1 (which they should!), we need to ensure that the statevector is properly normalized. Specifically, we need the magnitude of the state vector to be 1.$$ \langle\psi|\psi\rangle = 1 \\ $$Thus if:$$ |\psi\rangle = \alpha|0\rangle + \beta|1\rangle $$Then:$$ \sqrt{|\alpha|^2 + |\beta|^2} = 1 $$This explains the factors of $\sqrt{2}$ you have seen throughout this chapter. In fact, if we try to give `initialize()` a vector that isn’t normalised, it will give us an error:
###Code
vector = [1,1]
qc.initialize(vector, 0)
###Output
_____no_output_____
###Markdown
Quick Exercise1. Create a state vector that will give a $1/3$ probability of measuring $|0\rangle$.2. Create a different state vector that will give the same measurement probabilities.3. Verify that the probability of measuring $|1\rangle$ for these two states is $2/3$. You can check your answer in the widget below (accepts answers ±1% accuracy, you can use numpy terms such as '`pi`' and '`sqrt()`' in the vector):
###Code
# Run the code in this cell to interact with the widget
from qiskit_textbook.widgets import state_vector_exercise
state_vector_exercise(target=1/3)
###Output
_____no_output_____
###Markdown
2 Alternative measurementThe measurement rule gives us the probability $p(|x\rangle)$ that a state $|\psi\rangle$ is measured as $|x\rangle$. Nowhere does it tell us that $|x\rangle$ can only be either $|0\rangle$ or $|1\rangle$.The measurements we have considered so far are in fact only one of an infinite number of possible ways to measure a qubit. For any orthogonal pair of states, we can define a measurement that would cause a qubit to choose between the two.This possibility will be explored more in the next section. For now, just bear in mind that $|x\rangle$ is not limited to being simply $|0\rangle$ or $|1\rangle$. 3 Global PhaseWe know that measuring the state $|1\rangle$ will give us the output `1` with certainty. But we are also able to write down states such as $$\begin{bmatrix}0 \\ i\end{bmatrix} = i|1\rangle.$$To see how this behaves, we apply the measurement rule.$$ |\langle x| (i|1\rangle) |^2 = | i \langle x|1\rangle|^2 = |\langle x|1\rangle|^2 $$Here we find that the factor of $i$ disappears once we take the magnitude of the complex number. This effect is completely independent of the measured state $|x\rangle$. It does not matter what measurement we are considering, the probabilities for the state $i|1\rangle$ are identical to those for $|1\rangle$. Since measurements are the only way we can extract any information from a qubit, this implies that these two states are equivalent in all ways that are physically relevant.More generally, we refer to any overall factor $\gamma$ on a state for which $|\gamma|=1$ as a 'global phase'. States that differ only by a global phase are physically indistinguishable.$$ |\langle x| ( \gamma |a\rangle) |^2 = | \gamma \langle x|a\rangle|^2 = |\langle x|a\rangle|^2 $$Note that this is distinct from the phase difference _between_ terms in a superposition, which is known as the 'relative phase'. This becomes relevant once we consider different types of measurement and multiple qubits. 4 The Observer EffectWe know that the amplitudes contain information about the probability of us finding the qubit in a specific state, but once we have measured the qubit, we know with certainty what the state of the qubit is. For example, if we measure a qubit in the state:$$ |q\rangle = \alpha|0\rangle + \beta|1\rangle$$And find it in the state $|0\rangle$, if we measure again, there is a 100% chance of finding the qubit in the state $|0\rangle$. This means the act of measuring _changes_ the state of our qubits.$$ |q\rangle = \begin{bmatrix} \alpha \\ \beta \end{bmatrix} \xrightarrow{\text{Measure }|0\rangle} |q\rangle = |0\rangle = \begin{bmatrix} 1 \\ 0 \end{bmatrix}$$We sometimes refer to this as _collapsing_ the state of the qubit. It is a potent effect, and so one that must be used wisely. For example, were we to constantly measure each of our qubits to keep track of their value at each point in a computation, they would always simply be in a well-defined state of either $|0\rangle$ or $|1\rangle$. As such, they would be no different from classical bits and our computation could be easily replaced by a classical computation. To achieve truly quantum computation we must allow the qubits to explore more complex states. Measurements are therefore only used when we need to extract an output. This means that we often place all the measurements at the end of our quantum circuit. We can demonstrate this using Qiskit’s statevector simulator. Let's initialise a qubit in superposition:
###Code
qc = QuantumCircuit(1) # We are redefining qc
initial_state = [0.+1.j/sqrt(2),1/sqrt(2)+0.j]
qc.initialize(initial_state, 0)
qc.draw()
###Output
_____no_output_____
###Markdown
This should initialise our qubit in the state:$$ |q\rangle = \tfrac{i}{\sqrt{2}}|0\rangle + \tfrac{1}{\sqrt{2}}|1\rangle $$We can verify this using the simulator:
###Code
qobj = assemble(qc)
state = svsim.run(qobj).result().get_statevector()
print("Qubit State = " + str(state))
###Output
Qubit State = [0. +0.70710678j 0.70710678+0.j ]
###Markdown
We can see here the qubit is initialised in the state `[0.+0.70710678j 0.70710678+0.j]`, which is the state we expected.Let’s now measure this qubit:
###Code
qc.measure_all()
qc.draw()
###Output
_____no_output_____
###Markdown
When we simulate this entire circuit, we can see that one of the amplitudes is _always_ 0:
###Code
qobj = assemble(qc)
state = svsim.run(qobj).result().get_statevector()
print("State of Measured Qubit = " + str(state))
###Output
State of Measured Qubit = [0.+1.j 0.+0.j]
###Markdown
You can re-run this cell a few times to reinitialise the qubit and measure it again. You will notice that either outcome is equally probable, but that the state of the qubit is never a superposition of $|0\rangle$ and $|1\rangle$. Somewhat interestingly, the global phase on the state $|0\rangle$ survives, but since this is global phase, we can never measure it on a real quantum computer. A Note about Quantum SimulatorsWe can see that writing down a qubit’s state requires keeping track of two complex numbers, but when using a real quantum computer we will only ever receive a yes-or-no (`0` or `1`) answer for each qubit. The output of a 10-qubit quantum computer will look like this:`0110111110`Just 10 bits, no superposition or complex amplitudes. When using a real quantum computer, we cannot see the states of our qubits mid-computation, as this would destroy them! This behaviour is not ideal for learning, so Qiskit provides different quantum simulators: The `qasm_simulator` behaves as if you are interacting with a real quantum computer, and will not allow you to use `.get_statevector()`. Alternatively, `statevector_simulator`, (which we have been using in this chapter) does allow peeking at the quantum states before measurement, as we have seen. 3. The Bloch Sphere 3.1 Describing the Restricted Qubit State We saw earlier in this chapter that the general state of a qubit ($|q\rangle$) is:$$|q\rangle = \alpha|0\rangle + \beta|1\rangle$$$$\alpha, \beta \in \mathbb{C}$$(The second line tells us $\alpha$ and $\beta$ are complex numbers). The first two implications in section 2 tell us that we cannot differentiate between some of these states. This means we can be more specific in our description of the qubit. Firstly, since we cannot measure global phase, we can only measure the difference in phase between the states $|0\rangle$ and $|1\rangle$. Instead of having $\alpha$ and $\beta$ be complex, we can confine them to the real numbers and add a term to tell us the relative phase between them:$$|q\rangle = \alpha|0\rangle + e^{i\phi}\beta|1\rangle$$$$\alpha, \beta, \phi \in \mathbb{R}$$Finally, since the qubit state must be normalised, i.e.$$\sqrt{\alpha^2 + \beta^2} = 1$$we can use the trigonometric identity:$$\sqrt{\sin^2{x} + \cos^2{x}} = 1$$to describe the real $\alpha$ and $\beta$ in terms of one variable, $\theta$:$$\alpha = \cos{\tfrac{\theta}{2}}, \quad \beta=\sin{\tfrac{\theta}{2}}$$From this we can describe the state of any qubit using the two variables $\phi$ and $\theta$:$$|q\rangle = \cos{\tfrac{\theta}{2}}|0\rangle + e^{i\phi}\sin{\tfrac{\theta}{2}}|1\rangle$$$$\theta, \phi \in \mathbb{R}$$ 3.2 Visually Representing a Qubit State We want to plot our general qubit state:$$|q\rangle = \cos{\tfrac{\theta}{2}}|0\rangle + e^{i\phi}\sin{\tfrac{\theta}{2}}|1\rangle$$If we interpret $\theta$ and $\phi$ as spherical co-ordinates ($r = 1$, since the magnitude of the qubit state is $1$), we can plot any single qubit state on the surface of a sphere, known as the _Bloch sphere._Below we have plotted a qubit in the state $|{+}\rangle$. In this case, $\theta = \pi/2$ and $\phi = 0$.(Qiskit has a function to plot a bloch sphere, `plot_bloch_vector()`, but at the time of writing it only takes cartesian coordinates. We have included a function that does the conversion automatically).
###Code
from qiskit_textbook.widgets import plot_bloch_vector_spherical
coords = [pi/2,0,1] # [Theta, Phi, Radius]
plot_bloch_vector_spherical(coords) # Bloch Vector with spherical coordinates
###Output
_____no_output_____
###Markdown
Warning!When first learning about qubit states, it's easy to confuse the qubits _statevector_ with its _Bloch vector_. Remember the statevector is the vector discussed in [1.1](notation), that holds the amplitudes for the two states our qubit can be in. The Bloch vector is a visualisation tool that maps the 2D, complex statevector onto real, 3D space. Quick ExerciseUse `plot_bloch_vector()` or `plot_bloch_sphere_spherical()` to plot a qubit in the states:1. $|0\rangle$2. $|1\rangle$3. $\tfrac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$4. $\tfrac{1}{\sqrt{2}}(|0\rangle - i|1\rangle)$5. $\tfrac{1}{\sqrt{2}}\begin{bmatrix}i\\1\end{bmatrix}$ We have also included below a widget that converts from spherical co-ordinates to cartesian, for use with `plot_bloch_vector()`:
###Code
from qiskit_textbook.widgets import bloch_calc
bloch_calc()
import qiskit
qiskit.__qiskit_version__
###Output
_____no_output_____ |
notebooks/Demo/Demo_Forecast.io.ipynb | ###Markdown
What's new in the Forecastwrapper - Solar Irradiance on a tilted plane- Wind on an oriented building face- No more "include this", "include that". Everything is included. (I implemented these flags to speed to speed up some things (which you cannot notice), but it complicates the code so much that it is not worth it)- Daytime aggregates have been deprecated (we don't need this anymore since we have irradiance from dark sky. But if anyone incists, i can perhaps re-implement it)- No more special timezone stuff, you get the data in a timezone-aware format, localized to the location of the request. If you want another timezone, use `tz_convert` Demo of the forecast.io wrapper to get past and future weather dataImportant: you need to register for an apikey here: https://developer.forecast.io/ Put the key you obtain in the opengrid.cfg file as follows: [Forecast.io] apikey: your_key
###Code
import os
import sys
import inspect
import pandas as pd
import charts
###Output
Server running in the folder /usr/local/opengrid/notebooks/Demo at 127.0.0.1:45501
###Markdown
Import API wrapper module
###Code
from opengrid_dev.library import forecastwrapper
###Output
_____no_output_____
###Markdown
Get weather data in daily and hourly resolution To get started, create a Weather object for a certain location and a period
###Code
start = pd.Timestamp('20150813')
end = pd.Timestamp('20150816')
Weather_Ukkel = forecastwrapper.Weather(location='Ukkel', start=start, end=end)
###Output
_____no_output_____
###Markdown
You can use the methods `days()` and `hours()` to get a dataframe in daily or hourly resolution
###Code
Weather_Ukkel.days()
Weather_Ukkel.hours().info()
###Output
_____no_output_____
###Markdown
Degree Days Daily resolution has the option of adding degree days.By default, the temperature equivalent and heating degree days with a base temperature of 16.5°C are added.Heating degree days are calculated as follows:$$heatingDegreeDays = max(0 , baseTemp - 0.6 * T_{today} + 0.3 * T_{today-1} + 0.1 * T_{today-2} )$$Cooling degree days are calculated in an analog way:$$coolingDegreeDays = max(0, 0.6 * T_{today} + 0.3 * T_{today-1} + 0.1 * T_{today-2} - baseTemp )$$Add degree days by supplying `heating_base_temperatures` and/or `cooling_base_temperatures` as a list (you can add multiple base temperatures, or just a list of 1 element) Get some more degree days
###Code
Weather_Ukkel.days(heating_base_temperatures = [15,18],
cooling_base_temperatures = [18,24]).filter(like='DegreeDays')
Weather_Ukkel.days()
###Output
_____no_output_____
###Markdown
Hourly resolution example Location can also be coördinates
###Code
start = pd.Timestamp('20150916')
end = pd.Timestamp('20150918')
Weather_Brussel = forecastwrapper.Weather(location=[50.8503396, 4.3517103], start=start, end=end)
Weather_Boutersem = forecastwrapper.Weather(location='Kapelstraat 1, 3370 Boutersem', start=start, end=end)
df_combined = pd.merge(Weather_Brussel.hours(), Weather_Boutersem.hours(), suffixes=('_Brussel', '_Boutersem'),
left_index=True, right_index=True)
charts.plot(df_combined.filter(like='cloud'), stock=True, show='inline')
###Output
_____no_output_____
###Markdown
Built-In Caching Caching is turned on by default, so when you try and get dataframes the first time it takes a long time...
###Code
start = pd.Timestamp('20170131', tz='Europe/Brussels')
end = pd.Timestamp('20170201', tz='Europe/Brussels')
Weather_Ukkel = forecastwrapper.Weather(location='Ukkel', start=start, end=end)
Weather_Ukkel.days().head(1)
###Output
_____no_output_____
###Markdown
... but now try that again and it goes very fast
###Code
Weather_Ukkel = forecastwrapper.Weather(location='Ukkel', start=start, end=end)
Weather_Ukkel.days().head(1)
###Output
_____no_output_____
###Markdown
You can turn of the behaviour by setting the cache flag to false:
###Code
Weather_Ukkel = forecastwrapper.Weather(location='Ukkel', start=start, end=end, cache=False)
###Output
_____no_output_____
###Markdown
Solar Irradiance! Dark Sky has added Solar Irradiance data as a beta.Note:- The values are calculated, not measured. Dark Sky uses the position of the sun in combination with cloud cover.- Western Europe is not in Dark Sky's "primary region", therefore the data is not super-accurate.- Since it is a beta, the algorithms and therefore the values may change- I (JrtPec) have done a qualitative analysis that compared these values with those measured by KNMI (Netherlands). The differences were significant (27% lower). I have notified Dark Sky and they will investigate and possibly update their algorithms.- You need to delete your cached files in order to include these new values (everything will have to be re-downloaded)- If Dark Sky were to update their values, the cache needs to be deleted again.
###Code
Weather_Ukkel = forecastwrapper.Weather(location='Ukkel', start=start, end=end)
###Output
_____no_output_____
###Markdown
Hourly data
###Code
Weather_Ukkel.hours()[[
'GlobalHorizontalIrradiance',
'DirectNormalIrradiance',
'DiffuseHorizontalIrradiance',
'ExtraTerrestrialRadiation',
'SolarAltitude',
'SolarAzimuth']].dropna().head()
###Output
_____no_output_____
###Markdown
- Global Horizontal Irradiance is the amount of Solar Irradiance that shines on a horizontal surface, direct and diffuse, in Wh/m2. It is calculated by transforming the Direct Normal Irradiance (DNI) to the horizontal plane and adding the Diffuse Horizontal Irradiance (DHI):$$GHI = DNI * cos(90° - Altitude) + DHI$$- The GHI is what you would use to benchmark PV-panels- Direct Normal Irradiance is the amount of solar irradiance that shines directly on a plane tilted towards the sun. In Wh/m2.- Diffuse Horizontal Irradiance is the amount of solar irradiance that is scattered in the atmosphere and by clouds. In Wh/m2.- Extra-Terrestrial Radiation is the GHI a point would receive if there was no atmosphere.- Altitude of the Sun is measured in degrees above the horizon.- Azimuth is the direction of the Sun in degrees, measured from the true north going clockwise. At night, all values will be `NaN` Daily data The daily sum of the GHI is included in the `day` dataframe. Values are in Wh/m2If you need other daily aggregates, give me a shout!
###Code
Weather_Ukkel.days()
###Output
_____no_output_____
###Markdown
Add Global Irradiance on a tilted surface! Create a list with all the different irradiances you wantA surface is specified by the orientation and tilt- Orientation in degrees from the north: 0 = North, 90 = East, 180 = South, 270 = West- Tilt in de degrees from the horizontal plane: 0 = Horizontal, 90 = Vertical
###Code
# Lets get the vertical faces of a house
irradiances=[
(0, 90), # north vertical
(90, 90), # east vertical
(180, 90), # south vertical
(270, 90), # west vertical
]
Weather_Ukkel.hours(irradiances=irradiances).filter(like='GlobalIrradiance').dropna().head()
###Output
_____no_output_____
###Markdown
The names of the columns reflect the orientation and the tilt
###Code
Weather_Ukkel.days(irradiances=irradiances).filter(like='GlobalIrradiance')
###Output
_____no_output_____
###Markdown
Wind on an oriented building face The hourly wind speed and bearing is projected on an oriented building face.We call this the windComponent for a given orientation.This value is also squared and called windComponentSquared. This can be equated with the force or pressure of the wind on a static surface, like a building face.The value is also cubed and called windComponentCubed. This can be correlated with the power output of a windturbine. First, define some orientations you want the wind calculated for. Orientation in degrees starting from the north and going clockwise
###Code
orientations = [0, 90, 180, 270]
Weather_Ukkel.hours(wind_orients=orientations).filter(like='wind').head()
Weather_Ukkel.days(wind_orients=orientations).filter(like='wind').head()
###Output
_____no_output_____ |
Ejercicios/02-Clasificador por particiones/.ipynb_checkpoints/clasificador_particiones-checkpoint.ipynb | ###Markdown
Clasificación por Particiones - Metodo del histograma Julian Ferres - Nro.Padrón 101483 Enunciado Sean las regiones $R_0$ y $R_1$ y la cantidad de puntos $n$, donde:- $R_0$ es el triangulo con vertices $(1,0)$, $(1,1)$ y $(\frac{1}{2},0)$- $R_1$ es el triangulo con vertices $(0,0)$, $(\frac{1}{2},1)$ y $(0,1)$- $n = 10, 100, 1000, 10000, \ldots$ Se simulan $n$ puntos en $\mathbb{R}^2$ siguiendo los pasos: >- Cada punto pertenece a una de las dos clases: **_Clase 0_** o **_Clase 1_** con probabilidad $\frac{1}{2}$>- Los puntos de la clase $i$ tienen distribución uniforme con soporte en $R_i$ , con $i=0,1$ **Se pide, con la muestra, construir una regla del histograma que permita clasificar un punto que no pertenezca a la misma** Solución
###Code
#Import libraries
import numpy as np
#Plots
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
n = 1000 #Tamaño de muestra
muestra = np.zeros((n,3))
###Output
_____no_output_____
###Markdown
Toma de muestra
###Code
i = 0 #Puntos incluidos hasta el momento.
while(i < n):
x = np.random.uniform(0,1)
y = np.random.uniform(0,1)
clase = np.random.randint( 0 , 1 + 1 ) #Uniforme discreta en {0,1}
if (( clase == 0 and abs(2*x-1) < y) or ( clase == 1 and y < 2*x < 2-y )):
muestra[i][0] = x
muestra[i][1] = y
muestra[i][2] = clase
i+=1
clase0 , clase1 = muestra[(muestra[:,2] == 0.)] , muestra[(muestra[:,2] == 1.)]
g = plt.scatter( clase0[:,0] , clase0[:,1] , alpha='0.5', color='darkgreen' , label = 'Clase 0');
g = plt.scatter( clase1[:,0] , clase1[:,1] ,alpha='0.5', color='darkorange' , label = 'Clase 1');
plt.legend()
plt.title("Distribuciones", fontsize=20)
plt.show()
###Output
_____no_output_____
###Markdown
Para generar la particion tengo que saber la longitud del lado de las cajas. Segun lo visto en clase, si $h_n$ es la longitud del lado de las cajas, entonces:$$h_n = \frac {1}{\sqrt[2d]{n}} = n^{-\frac{1}{2d}}$$cumple las condiciones para que la regla del histograma sea universalmente consistente. En este caso, con dos dimensiones, $d=2$:
###Code
d = 2
h_n = n **(-(1/(d*2)))
d_n = int(1/h_n) #Podria 1/h_n no ser entero
particion = np.ndarray((d_n , d_n), dtype = int )
particion.fill(0)
for i in range(n):
x_p , y_p = int(muestra[i,0]/h_n) , int(muestra[i,1]/h_n)
x_p = d_n - 1 if x_p >=d_n else x_p
y_p = d_n - 1 if y_p >=d_n else y_p
particion[y_p , x_p] += 1 if muestra[i,2] else -1
f = lambda x : 0 if (x>= 0) else 1
f_vec = np.vectorize(f)
for_heatmap = f_vec(particion) #Mapeo todos los numeros a 0 o 1
particion
for_heatmap
###Output
_____no_output_____
###Markdown
Clasificación mediante método del histograma
###Code
dims = (8, 8)
fig, axs = plt.subplots(figsize=dims)
annotable = (n<100000)
g = sns.heatmap(for_heatmap, annot = annotable , linewidths=.5,cmap=['darkgreen','darkorange'],\
cbar = False, annot_kws={"size": 30},\
xticklabels = [round(x/d_n,2) for x in range(d_n)],\
yticklabels = [(round(1-x/d_n,2)) for x in range(1,d_n+1)])
g.set_title('Particiones Clasificadas' , size = 30)
plt.show()
###Output
_____no_output_____ |
notebooks/Syft Tensor Example Notebook.ipynb | ###Markdown
CPU vs GPU
###Code
gpu = True
###Output
_____no_output_____
###Markdown
Absolute Value
###Code
a = FloatTensor(data)
if(gpu):
a.gpu()
b = a.abs()
b
a = FloatTensor(data)
if(gpu):
a.gpu()
a
a.abs_()
a.id
###Output
_____no_output_____
###Markdown
Addition
###Code
a = FloatTensor(data)
if(gpu):
a.gpu()
a
a = a + a
a
a += a
a
a = a + 3
a
a += 3
a
###Output
_____no_output_____
###Markdown
Subtraction
###Code
a = FloatTensor(data)
b = FloatTensor(data * 2)
if(gpu):
a.gpu()
b.gpu()
a
b
a - b
b - a
a - 1
b - 2
a -= 1
a
b -= 1
b
a -= b
a
###Output
_____no_output_____
###Markdown
Multiplication
###Code
a = a * a
a
a *= a
a
a = a * 3
a
a *= 3
a
###Output
_____no_output_____
###Markdown
Divison
###Code
a
b
a = a/a
a
b / a
a / b
a / 1
b / 1
a /= b
a
b /= a
b
a /= a
a
b /= 1
b
b /= 2
b
a *= 2
a
###Output
_____no_output_____
###Markdown
CPU vs GPU
###Code
gpu = True
###Output
_____no_output_____
###Markdown
Absolute Value
###Code
a = FloatTensor(data)
if(gpu):
a.gpu()
b = a.abs()
b
a = FloatTensor(data)
if(gpu):
a.gpu()
a
a.abs_()
a.id
###Output
_____no_output_____
###Markdown
Addition
###Code
a = FloatTensor(data)
if(gpu):
a.gpu()
a
a = a + a
a
a += a
a
a = a + 3
a
a += 3
a
###Output
_____no_output_____
###Markdown
Subtraction
###Code
a = FloatTensor(data)
b = FloatTensor(data * 2)
if(gpu):
a.gpu()
b.gpu()
a
b
a - b
b - a
a - 1
b - 2
a -= 1
a
b -= 1
b
a -= b
a
###Output
_____no_output_____
###Markdown
Multiplication
###Code
a = a * a
a
a *= a
a
a = a * 3
a
a *= 3
a
###Output
_____no_output_____
###Markdown
Divison
###Code
a
b
a = a/a
a
b / a
a / b
a / 1
b / 1
a /= b
a
b /= a
b
a /= a
a
b /= 1
b
b /= 2
b
a *= 2
a
###Output
_____no_output_____ |
Case Studies/Resturant Reviews/restaurant-reviews-cnn-tf (3).ipynb | ###Markdown
Resturant Reviews ***DataDescription***> The data consists of 2 columns Review and Liked> > Review: The reviews on resturants were in the Review Column> > Liked: The Good and Bad Review are denoted in the Liked column in the form of 0 and 1 > > 0- Bad Review> > 1- Good Review
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
###Output
/kaggle/input/restaurant-reviews/Restaurant_Reviews.tsv
###Markdown
Adding Basic Liberaries
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
###Output
_____no_output_____
###Markdown
Loading the Data
###Code
df = pd.read_csv('/kaggle/input/restaurant-reviews/Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
df.head()
# Getting the shape of data
df.shape
###Output
_____no_output_____
###Markdown
* **Setting Parameters**
###Code
vocab_size = 500
embedding_dim = 16
max_length = 100
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
training_size = 900
###Output
_____no_output_____
###Markdown
***Seperating data column to sentences and Labels***
###Code
sentences = df['Review'].tolist()
labels = df['Liked'].tolist()
###Output
_____no_output_____
###Markdown
Getting Training and Testing Data
###Code
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
###Output
_____no_output_____
###Markdown
Setting Tokenizer And Padding data
###Code
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
###Output
_____no_output_____
###Markdown
***Converting data into arrays***
###Code
import numpy as np
training_padded = np.array(training_padded)
training_labels = np.array(training_labels)
testing_padded = np.array(testing_padded)
testing_labels = np.array(testing_labels)
###Output
_____no_output_____
###Markdown
Creating CNN model Adding Layers Compiling Models
###Code
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
# Getting Summary
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 16) 8000
_________________________________________________________________
global_average_pooling1d (Gl (None, 16) 0
_________________________________________________________________
dense (Dense) (None, 24) 408
_________________________________________________________________
dense_1 (Dense) (None, 1) 25
=================================================================
Total params: 8,433
Trainable params: 8,433
Non-trainable params: 0
_________________________________________________________________
###Markdown
Fiting CNN Model
###Code
num_epochs = 50
history = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2)
###Output
Epoch 1/50
29/29 - 1s - loss: 0.6906 - accuracy: 0.5467 - val_loss: 0.7499 - val_accuracy: 0.0400
Epoch 2/50
29/29 - 0s - loss: 0.6876 - accuracy: 0.5511 - val_loss: 0.7771 - val_accuracy: 0.0400
Epoch 3/50
29/29 - 0s - loss: 0.6871 - accuracy: 0.5511 - val_loss: 0.7946 - val_accuracy: 0.0400
Epoch 4/50
29/29 - 0s - loss: 0.6861 - accuracy: 0.5511 - val_loss: 0.7814 - val_accuracy: 0.0400
Epoch 5/50
29/29 - 0s - loss: 0.6857 - accuracy: 0.5511 - val_loss: 0.7974 - val_accuracy: 0.0400
Epoch 6/50
29/29 - 0s - loss: 0.6840 - accuracy: 0.5511 - val_loss: 0.8091 - val_accuracy: 0.0400
Epoch 7/50
29/29 - 0s - loss: 0.6828 - accuracy: 0.5511 - val_loss: 0.7978 - val_accuracy: 0.0400
Epoch 8/50
29/29 - 0s - loss: 0.6808 - accuracy: 0.5511 - val_loss: 0.7940 - val_accuracy: 0.0400
Epoch 9/50
29/29 - 0s - loss: 0.6789 - accuracy: 0.5511 - val_loss: 0.7743 - val_accuracy: 0.0400
Epoch 10/50
29/29 - 0s - loss: 0.6745 - accuracy: 0.5511 - val_loss: 0.7815 - val_accuracy: 0.0500
Epoch 11/50
29/29 - 0s - loss: 0.6679 - accuracy: 0.5556 - val_loss: 0.7701 - val_accuracy: 0.0700
Epoch 12/50
29/29 - 0s - loss: 0.6604 - accuracy: 0.6344 - val_loss: 0.7739 - val_accuracy: 0.1000
Epoch 13/50
29/29 - 0s - loss: 0.6495 - accuracy: 0.5944 - val_loss: 0.7407 - val_accuracy: 0.2700
Epoch 14/50
29/29 - 0s - loss: 0.6360 - accuracy: 0.7044 - val_loss: 0.7851 - val_accuracy: 0.2100
Epoch 15/50
29/29 - 0s - loss: 0.6211 - accuracy: 0.6567 - val_loss: 0.6795 - val_accuracy: 0.4700
Epoch 16/50
29/29 - 0s - loss: 0.6020 - accuracy: 0.7411 - val_loss: 0.7551 - val_accuracy: 0.3400
Epoch 17/50
29/29 - 0s - loss: 0.5849 - accuracy: 0.7367 - val_loss: 0.6602 - val_accuracy: 0.5000
Epoch 18/50
29/29 - 0s - loss: 0.5653 - accuracy: 0.7378 - val_loss: 0.6569 - val_accuracy: 0.5100
Epoch 19/50
29/29 - 0s - loss: 0.5411 - accuracy: 0.7889 - val_loss: 0.7042 - val_accuracy: 0.4300
Epoch 20/50
29/29 - 0s - loss: 0.5204 - accuracy: 0.7778 - val_loss: 0.7312 - val_accuracy: 0.4200
Epoch 21/50
29/29 - 0s - loss: 0.4997 - accuracy: 0.8144 - val_loss: 0.6353 - val_accuracy: 0.5900
Epoch 22/50
29/29 - 0s - loss: 0.4790 - accuracy: 0.8089 - val_loss: 0.5193 - val_accuracy: 0.8000
Epoch 23/50
29/29 - 0s - loss: 0.4567 - accuracy: 0.8389 - val_loss: 0.6236 - val_accuracy: 0.5900
Epoch 24/50
29/29 - 0s - loss: 0.4366 - accuracy: 0.8367 - val_loss: 0.5772 - val_accuracy: 0.7000
Epoch 25/50
29/29 - 0s - loss: 0.4168 - accuracy: 0.8522 - val_loss: 0.5968 - val_accuracy: 0.6700
Epoch 26/50
29/29 - 0s - loss: 0.4005 - accuracy: 0.8689 - val_loss: 0.5868 - val_accuracy: 0.6700
Epoch 27/50
29/29 - 0s - loss: 0.3835 - accuracy: 0.8733 - val_loss: 0.5847 - val_accuracy: 0.6800
Epoch 28/50
29/29 - 0s - loss: 0.3693 - accuracy: 0.8800 - val_loss: 0.5659 - val_accuracy: 0.6800
Epoch 29/50
29/29 - 0s - loss: 0.3548 - accuracy: 0.8789 - val_loss: 0.5110 - val_accuracy: 0.7600
Epoch 30/50
29/29 - 0s - loss: 0.3394 - accuracy: 0.8822 - val_loss: 0.5492 - val_accuracy: 0.7200
Epoch 31/50
29/29 - 0s - loss: 0.3268 - accuracy: 0.8956 - val_loss: 0.5188 - val_accuracy: 0.7700
Epoch 32/50
29/29 - 0s - loss: 0.3169 - accuracy: 0.8900 - val_loss: 0.4821 - val_accuracy: 0.8000
Epoch 33/50
29/29 - 0s - loss: 0.3057 - accuracy: 0.8967 - val_loss: 0.3810 - val_accuracy: 0.8900
Epoch 34/50
29/29 - 0s - loss: 0.2956 - accuracy: 0.8922 - val_loss: 0.4779 - val_accuracy: 0.8000
Epoch 35/50
29/29 - 0s - loss: 0.2858 - accuracy: 0.9078 - val_loss: 0.5171 - val_accuracy: 0.7700
Epoch 36/50
29/29 - 0s - loss: 0.2756 - accuracy: 0.9044 - val_loss: 0.4503 - val_accuracy: 0.8200
Epoch 37/50
29/29 - 0s - loss: 0.2671 - accuracy: 0.9178 - val_loss: 0.4144 - val_accuracy: 0.8500
Epoch 38/50
29/29 - 0s - loss: 0.2590 - accuracy: 0.9133 - val_loss: 0.4407 - val_accuracy: 0.8400
Epoch 39/50
29/29 - 0s - loss: 0.2524 - accuracy: 0.9178 - val_loss: 0.4408 - val_accuracy: 0.8300
Epoch 40/50
29/29 - 0s - loss: 0.2463 - accuracy: 0.9122 - val_loss: 0.5152 - val_accuracy: 0.7800
Epoch 41/50
29/29 - 0s - loss: 0.2412 - accuracy: 0.9167 - val_loss: 0.3800 - val_accuracy: 0.8600
Epoch 42/50
29/29 - 0s - loss: 0.2507 - accuracy: 0.9022 - val_loss: 0.3470 - val_accuracy: 0.8700
Epoch 43/50
29/29 - 0s - loss: 0.2370 - accuracy: 0.9156 - val_loss: 0.3654 - val_accuracy: 0.8600
Epoch 44/50
29/29 - 0s - loss: 0.2279 - accuracy: 0.9167 - val_loss: 0.4165 - val_accuracy: 0.8400
Epoch 45/50
29/29 - 0s - loss: 0.2177 - accuracy: 0.9211 - val_loss: 0.5044 - val_accuracy: 0.7600
Epoch 46/50
29/29 - 0s - loss: 0.2116 - accuracy: 0.9278 - val_loss: 0.5369 - val_accuracy: 0.7600
Epoch 47/50
29/29 - 0s - loss: 0.2080 - accuracy: 0.9289 - val_loss: 0.4152 - val_accuracy: 0.8300
Epoch 48/50
29/29 - 0s - loss: 0.2077 - accuracy: 0.9267 - val_loss: 0.3864 - val_accuracy: 0.8600
Epoch 49/50
29/29 - 0s - loss: 0.1996 - accuracy: 0.9289 - val_loss: 0.3491 - val_accuracy: 0.8800
Epoch 50/50
29/29 - 0s - loss: 0.1975 - accuracy: 0.9267 - val_loss: 0.5408 - val_accuracy: 0.7700
###Markdown
Plotting accuracy and loss Graph
###Code
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
###Output
_____no_output_____
###Markdown
* The 1st Graph Show the Difference B/w Increase in accuracy and val_accuracy * The 2nd Graph show the difference b/w decrease in loss and val_loss ***Decoding Sentences***
###Code
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_sentence(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(decode_sentence(training_padded[0]))
print(training_sentences[0])
print(labels[0])
###Output
wow loved this place ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?
Wow... Loved this place.
1
###Markdown
Prediction on Testing Data
###Code
for n in range(10):
print(testing_sentences[n],': ',testing_labels[n])
###Output
Spend your money elsewhere. : 0
Their regular toasted bread was equally satisfying with the occasional pats of butter... Mmmm...! : 1
The Buffet at Bellagio was far from what I anticipated. : 0
And the drinks are WEAK, people! : 0
-My order was not correct. : 0
Also, I feel like the chips are bought, not made in house. : 0
After the disappointing dinner we went elsewhere for dessert. : 0
The chips and sals a here is amazing!!!!!!!!!!!!!!!!!!! : 1
We won't be returning. : 0
This is my new fav Vegas buffet spot. : 1
###Markdown
> As we can see here the testing was all perfect!!!!!> Bad Reviews are marked as 0> Good reviews are marked as 1 Getting Prediction with Randomly Created Reviews
###Code
# Checking Predictions
sentence = ["Awesome Pizza", "I will come here everytime!!!", "Dont come here ever, Worst Food"]
sequences = tokenizer.texts_to_sequences(sentence)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
print(model.predict(padded))
###Output
[[0.95942914]
[0.8467437 ]
[0.07651043]]
|
Resources/temp_analysis_bonus_1_starter.ipynb | ###Markdown
Bonus: Temperature Analysis I
###Code
import pandas as pd
from datetime import datetime as dt
# "tobs" is "temperature observations"
df = pd.read_csv('Resources/hawaii_measurements.csv')
df.head()
# Convert the date column format from string to datetime
df['date'] = pd.to_datetime(df['date'])
# Set the date column as the DataFrame index
df.set_index('date', inplace=True)
# Drop the date column
df.head()
###Output
_____no_output_____
###Markdown
Compare June and December data across all years
###Code
from scipy import stats
# Filter data for desired months
desired_mths_june = df.loc[df.index.month == 6]['tobs']
desired_mths_dec = df.loc[df.index.month == 12]['tobs']
# Identify the average temperature for June
temps_june = np.mean(desired_mths_june)
temps_june
# Identify the average temperature for December
temps_december = np.mean(desired_mths_dec)
temps_december
# Create collections of temperature data
print(len(desired_mths_june ))
print(len(desired_mths_dec))
# Run paired t-test
stats.ttest_ind(desired_mths_june, desired_mths_dec)
###Output
_____no_output_____
###Markdown
Analysis
###Code
Pvalue is 3.90 so im not seeing a relationship between June and Dec avg temp.
unable to provide the average temperature due to errors however what I can tell is that June had the highest collection of temperature data (1700 for June compared to 1517 for December).
###Output
_____no_output_____ |
notebooks/11_tree_metrics.ipynb | ###Markdown
Tree
###Code
from typing import List
from pprint import pprint
from operator import add
from functools import reduce
from collections import Counter
import pandas as pd
from new_semantic_parsing import TopSchemaTokenizer
LBR = '['
RBR = ']'
IN = 'IN:'
SL = 'SL:'
class Tree:
def __init__(self, entity, subtrees: List = None):
self.entity = entity
self.subtrees = subtrees
if subtrees is None:
self.subtrees = []
# for per-class metrics
self._counts = Counter([entity])
self._len = 1
if len(self.subtrees) > 0:
self._len += sum(map(len, self.subtrees))
self._counts += reduce(add, (s._counts for s in self.subtrees))
self._dict_repr = {self.entity: [s._dict_repr for s in self.subtrees]}
def __repr__(self):
return repr(self._dict_repr)
def __eq__(self, other):
if isinstance(other, dict):
return self._dict_repr == other
if isinstance(other, Tree):
return self._dict_repr == other._dict_repr
raise ValueError(type(other))
def __len__(self):
return self._len
@property
def counts(self):
return self._counts
@classmethod
def from_tokens(cls, tokens, return_index=False):
"""Builds a parsing tree for labeled bracketing score computation.
Args:
tokens: list of tokens
return_index: used in recursion to provide toke index
Returns:
tuple of size two: Tree, last_index
"""
# every tree should start with
# [ ENTITY_TYPE: ENTITY
if len(tokens) < 3 or tokens[0] != LBR:
raise ValueError(f'Tree starts with {tokens[:4]}')
entity_type = tokens[1]
# ignore invalid subtrees
if entity_type not in [IN, SL]:
return None
entity = entity_type + tokens[2] # e.g. IN:INTENT
subtrees = []
slot_value_tokens = []
i = 3
while i < len(tokens):
token = tokens[i]
if entity_type == IN and token not in [LBR, RBR]:
i += 1
continue
if token == LBR:
subtree, j = cls.from_tokens(tokens[i:], return_index=True)
subtrees.append(subtree)
i += j
continue
if token == RBR:
if slot_value_tokens:
subtrees = [Tree(' '.join(slot_value_tokens))]
slot_value_tokens = []
i += 1
break
if entity_type == SL:
slot_value_tokens.append(token)
i += 1
continue
tree = Tree(entity, subtrees)
if return_index:
return tree, i
return tree
test_case_1 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'output': Tree(IN + 'INTENT1', [Tree(SL + 'SLOT1', [Tree('slot value')])])
}
test_case_2 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, 'more', 'text', LBR, SL, 'SLOT2', 'slot2', 'value', RBR, RBR],
'output': {IN + 'INTENT1': [{SL + 'SLOT1': [Tree('slot value')]}, {SL + 'SLOT2': [Tree('slot2 value')]}]}
}
test_case_3 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, 'more', 'text', LBR, SL, 'SLOT1', 'slot2', 'value', RBR, RBR],
'output': {IN + 'INTENT1': [{SL + 'SLOT1': [Tree('slot value')]}, {SL + 'SLOT1': [Tree('slot2 value')]}]} # this is why you should use lists and not sets/dicts
}
test_case_4 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, 'more', 'text', LBR, SL, 'SLOT1'],
'output': {IN + 'INTENT1': [{SL + 'SLOT1': [Tree('slot value')]}, {SL + 'SLOT1': [Tree('slot2 value')]}]} # this is why you should use lists and not sets/dicts
}
tree = Tree.from_tokens(test_case_1['input'])
print(tree)
print(len(tree))
print(tree.counts)
assert tree == test_case_1['output']
tree = Tree.from_tokens(test_case_2['input'])
print(tree)
print(len(tree))
print(tree.counts)
assert tree == test_case_2['output']
tree = Tree.from_tokens(test_case_3['input'])
print(tree)
print(len(tree))
print(tree.counts)
assert tree == test_case_3['output']
tree = Tree.from_tokens(test_case_4['input'])
print(tree)
print(len(tree))
print(tree.counts)
data = pd.read_table('../data/top-dataset-semantic-parsing/eval.tsv', names=['text', 'tokens', 'schema'])
tokenized_schema = [TopSchemaTokenizer.tokenize(t) for t in data.schema]
i = 10
print(tokenized_schema[i])
print(Tree.from_tokens(tokenized_schema[i]))
complex_example = (
'[IN:GET_EVENT Are there any '
'[SL:CATEGORY_EVENT Concerts ] at '
'[SL:LOCATION [IN:GET_LOCATION [SL:POINT_ON_MAP Chattaqua Amphitheater ] ] ] '
'[SL:DATE_TIME this weekend ] with available tickets ]'
)
complex_example_tokens = TopSchemaTokenizer.tokenize(complex_example)
complex_tree = Tree.from_tokens(complex_example_tokens)
pprint(complex_tree._dict_repr)
###Output
{'IN:GET_EVENT': [{'SL:CATEGORY_EVENT': [{'Concerts': []}]},
{'SL:LOCATION': [{'IN:GET_LOCATION': [{'SL:POINT_ON_MAP': [{'Chattaqua Amphitheater': []}]}]}]},
{'SL:DATE_TIME': [{'this weekend': []}]}]}
###Markdown
Metrics
###Code
test_case_1 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'f1': 1,
'precision': 1,
'recall': 1,
}
test_case_2 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT2', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'f1': 0,
'precision': 0,
'recall': 0,
}
test_case_3 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT2', 'slot', 'value', RBR, RBR],
'f1': 0.5,
'precision': 0.5,
'recall': 0.5,
}
test_case_4 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, LBR, SL, 'SLOT2', 'value', RBR, RBR],
'f1': 2/3.,
'precision': 3/4.,
'recall': 1,
}
test_case_5 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'wrong value', RBR, RBR],
'f1': 2/3.,
'precision': 2/3.,
'recall': 2/3.,
}
def f1(p, r):
return 2 * p * r / (p + r)
tree1 = Tree.from_tokens(test_case_1['true'])
tree2 = Tree.from_tokens(test_case_1['pred'])
print(tree1)
print(tree2)
def labeled_bracketing_recall(pred_tokens, true_tokens):
"""Compute recall labeling bracketng score"""
pred_tree = Tree.from_tokens(pred_tokens)
true_tree = Tree.from_tokens(true_tokens)
true_positive, false_negative = 0, 0
if pred_tree.entity != true_tree.entity:
false_negative += 1
else:
true_positive += 1
tp, fn = _labeled_bracketing_tp_fn(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
false_negative += fn
recall = true_positive / (true_positive + false_negative)
return recall
def labeled_bracketing_precision(pred_tokens, true_tokens):
"""Compute precision labeling bracketng score"""
pred_tree = Tree.from_tokens(pred_tokens)
true_tree = Tree.from_tokens(true_tokens)
true_positive, false_positive = 0, 0
if pred_tree.entity != true_tree.entity:
false_positive += 1
else:
true_positive += 1
tp, fp = _labeled_bracketing_tp_fp(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
false_positive += fp
recall = true_positive / (true_positive + false_positive)
return recall
def _labeled_bracketing_tp_fn(pred_subtrees: List[Tree], true_subtrees: List[Tree]):
"""Compute true positive and false negative labeling bracketng scores"""
true_positive, false_negative = 0, 0
for i, true_tree in enumerate(true_subtrees):
correct_subtree_indices = [i for i, pred_tree in enumerate(pred_subtrees) if pred_tree.entity == true_tree.entity]
if len(correct_subtree_indices) == 0:
false_negative += 1
else:
true_positive += 1
for pred_subtree_idx in correct_subtree_indices:
pred_tree = pred_subtrees[pred_subtree_idx]
tp, fn = _labeled_bracketing_tp_fn(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
false_negative += fn
return true_positive, false_negative
def _labeled_bracketing_tp_fp(pred_subtrees: List[Tree], true_subtrees: List[Tree]):
"""Compute true positive and false positive labeling bracketng scores"""
return _labeled_bracketing_tp_fn(true_subtrees, pred_subtrees)
test_case = test_case_2
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
recall = labeled_bracketing_recall(test_case['pred'], test_case['true'])
if recall == test_case['recall']:
print(f'test_case_{i+1} passed. Computed recall: {recall}')
else:
print(f'\t test_case_{i+1} FAILED. Computed recall: {recall}')
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
precision = labeled_bracketing_precision(test_case['pred'], test_case['true'])
if precision == test_case['precision']:
print(f'test_case_{i+1} passed. Computed precision: {precision}')
else:
print(f'\t test_case_{i+1} FAILED. Computed precision: {precision}')
###Output
test_case_1 passed. Computed precision: 1.0
test_case_2 passed. Computed precision: 0.0
test_case_3 passed. Computed precision: 0.5
test_case_4 passed. Computed precision: 0.75
test_case_5 passed. Computed precision: 0.6666666666666666
###Markdown
Compare with the official TOP evaluation tool
###Code
data_test = pd.read_table('../data/top-dataset-semantic-parsing/test.tsv', names=['text', 'tokens', 'schema'])
data_pred = pd.read_table('../lightning_out/jul8_20epochs_small/predictions.tsv', names=['schema'])
tokenized_schema_test = [TopSchemaTokenizer.tokenize(t) for t in data_test.schema]
tokenized_schema_pred = [TopSchemaTokenizer.tokenize(t) for t in data_pred.schema]
# TOP script gives the following metrics
{'instance_count': 9042,
'exact_match': 0.25481088254810885,
'labeled_bracketing_scores': {
'precision': 0.6032053706505295,
'recall': 0.3814007712312797,
'f1': 0.46731984250526504
},
'tree_labeled_bracketing_scores': {
'precision': 0.3943362329803328,
'recall': 0.24933488775296686,
'f1': 0.30550315905136893
},
'tree_validity': 0.9382879893828799}
precisions = []
recalls = []
exact_match = 0
for pred, true in zip(tokenized_schema_pred, tokenized_schema_test):
pred_tree = Tree.from_tokens(pred)
true_tree = Tree.from_tokens(true)
if pred_tree == true_tree:
exact_match += 1
precision = labeled_bracketing_precision(pred, true)
recall = labeled_bracketing_recall(pred, true)
precisions.append(precision)
recalls.append(recall)
print(true)
print(true_tree)
mean_precision = sum(precisions) / len(precisions)
mean_recall = sum(recalls) / len(recalls)
exact_match /= len(precisions)
print('Precision: ', mean_precision)
print('Recall : ', mean_recall)
print('F1 : ', f1(mean_precision, mean_recall))
print('exact_match: ', exact_match)
###Output
Precision: 0.640802521534121
Recall : 0.5737675240412504
F1 : 0.6054351126465458
exact_match: 0.2591240875912409
###Markdown
New approach
###Code
def label_bracketing_scores(pred_trees, true_trees):
true_positives = 0
n_predicted = 0
n_expected = 0
for pred_tree, true_tree in zip(pred_trees, true_trees):
n_predicted += len(pred_tree)
n_expected += len(true_tree)
if pred_tree.entity == true_tree.entity:
true_positives += 1 + _tree_true_positive(pred_tree.subtrees, true_tree.subtrees)
precision = true_positives / n_predicted
recall = true_positives / n_expected
f1 = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
return {'LBS_precision': precision, 'LBS_recall': recall, 'LBS_F1': f1}
def _tree_true_positive(pred_subtrees, true_subtrees):
true_positive = 0
for i, true_tree in enumerate(true_subtrees):
correct_subtree_indices = [i for i, pred_tree in enumerate(pred_subtrees) if pred_tree.entity == true_tree.entity]
if len(correct_subtree_indices) == 0:
continue
true_positive += 1
for pred_subtree_idx in correct_subtree_indices:
pred_tree = pred_subtrees[pred_subtree_idx]
tp = _tree_true_positive(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
return true_positive
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
tree_true = Tree.from_tokens(test_case['true'])
tree_pred = Tree.from_tokens(test_case['pred'])
metrics = label_bracketing_scores([tree_pred], [tree_true])
print(f'test_case_{i+1}:')
print(metrics)
print()
pred_trees = [Tree.from_tokens(t) for t in tokenized_schema_pred]
true_trees = [Tree.from_tokens(t) for t in tokenized_schema_test]
metrics = label_bracketing_scores(pred_trees, true_trees)
print(metrics)
###Output
{'LBS_precision': 0.6405084598194851, 'LBS_recall': 0.441435314825186, 'LBS_F1': 0.5226575728511716}
###Markdown
Still a bit higher then the official implementation {'precision': 0.603, 'recall': 0.381, 'f1': 0.467}, Per-class scores
###Code
def label_bracketing_scores_for_classes(pred_trees, true_trees, classes):
"""Compute label bracketing scores only considering slots, intents and values from classes."""
true_positives = 0
n_predicted = 0
n_expected = 0
for pred_tree, true_tree in zip(pred_trees, true_trees):
n_predicted += len(pred_tree)
n_expected += len(true_tree)
if pred_tree.entity == true_tree.entity:
true_positives += 1 + _tree_true_positive(pred_tree.subtrees, true_tree.subtrees)
precision = 0 if n_predicted == 0 else true_positives / n_predicted
recall = 0 if n_expected == 0 else true_positives / n_expected
f1 = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
return {'cLBS_precision': precision, 'cLBS_recall': recall, 'cLBS_F1': f1}
def _tree_true_positive_for_classes(pred_subtrees, true_subtrees, classes):
true_positive = 0
for i, true_tree in enumerate(true_subtrees):
correct_subtree_indices = [i for i, pred_tree in enumerate(pred_subtrees) if pred_tree.entity == true_tree.entity]
if len(correct_subtree_indices) == 0:
continue
if true_tree.entity in classes:
true_positive += 1
for pred_subtree_idx in correct_subtree_indices:
pred_tree = pred_subtrees[pred_subtree_idx]
tp = _tree_true_positive_for_classes(pred_tree.subtrees, true_tree.subtrees, classes)
true_positive += tp
return true_positive
true_trees = [Tree.from_tokens(t) for ]
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
tree_true = Tree.from_tokens(test_case['true'])
tree_pred = Tree.from_tokens(test_case['pred'])
metrics = label_bracketing_scores([tree_pred], [tree_true])
print(f'test_case_{i+1}:')
print(metrics)
print()
###Output
_____no_output_____
###Markdown
Tree
###Code
from typing import List
from pprint import pprint
from operator import add
from functools import reduce
from collections import Counter
import pandas as pd
from new_semantic_parsing import TopSchemaTokenizer
LBR = '['
RBR = ']'
IN = 'IN:'
SL = 'SL:'
class Tree:
def __init__(self, entity, subtrees: List = None):
self.entity = entity
self.subtrees = subtrees
if subtrees is None:
self.subtrees = []
# for per-class metrics
self._counts = Counter([entity])
self._len = 1
if len(self.subtrees) > 0:
self._len += sum(map(len, self.subtrees))
self._counts += reduce(add, (s._counts for s in self.subtrees))
self._dict_repr = {self.entity: [s._dict_repr for s in self.subtrees]}
def __repr__(self):
return repr(self._dict_repr)
def __eq__(self, other):
if isinstance(other, dict):
return self._dict_repr == other
if isinstance(other, Tree):
return self._dict_repr == other._dict_repr
raise ValueError(type(other))
def __len__(self):
return self._len
@property
def counts(self):
return self._counts
@classmethod
def from_tokens(cls, tokens, return_index=False):
"""Builds a parsing tree for labeled bracketing score computation.
Args:
tokens: list of tokens
return_index: used in recursion to provide toke index
Returns:
tuple of size two: Tree, last_index
"""
# every tree should start with
# [ ENTITY_TYPE: ENTITY
if len(tokens) < 3 or tokens[0] != LBR:
raise ValueError(f'Tree starts with {tokens[:4]}')
entity_type = tokens[1]
# ignore invalid subtrees
if entity_type not in [IN, SL]:
return None
entity = entity_type + tokens[2] # e.g. IN:INTENT
subtrees = []
slot_value_tokens = []
i = 3
while i < len(tokens):
token = tokens[i]
if entity_type == IN and token not in [LBR, RBR]:
i += 1
continue
if token == LBR:
subtree, j = cls.from_tokens(tokens[i:], return_index=True)
subtrees.append(subtree)
i += j
continue
if token == RBR:
if slot_value_tokens:
subtrees = [Tree(' '.join(slot_value_tokens))]
slot_value_tokens = []
i += 1
break
if entity_type == SL:
slot_value_tokens.append(token)
i += 1
continue
tree = Tree(entity, subtrees)
if return_index:
return tree, i
return tree
test_case_1 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'output': Tree(IN + 'INTENT1', [Tree(SL + 'SLOT1', [Tree('slot value')])])
}
test_case_2 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, 'more', 'text', LBR, SL, 'SLOT2', 'slot2', 'value', RBR, RBR],
'output': {IN + 'INTENT1': [{SL + 'SLOT1': [Tree('slot value')]}, {SL + 'SLOT2': [Tree('slot2 value')]}]}
}
test_case_3 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, 'more', 'text', LBR, SL, 'SLOT1', 'slot2', 'value', RBR, RBR],
'output': {IN + 'INTENT1': [{SL + 'SLOT1': [Tree('slot value')]}, {SL + 'SLOT1': [Tree('slot2 value')]}]} # this is why you should use lists and not sets/dicts
}
test_case_4 = {
'input': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, 'more', 'text', LBR, SL, 'SLOT1'],
'output': {IN + 'INTENT1': [{SL + 'SLOT1': [Tree('slot value')]}, {SL + 'SLOT1': [Tree('slot2 value')]}]} # this is why you should use lists and not sets/dicts
}
tree = Tree.from_tokens(test_case_1['input'])
print(tree)
print(len(tree))
print(tree.counts)
assert tree == test_case_1['output']
tree = Tree.from_tokens(test_case_2['input'])
print(tree)
print(len(tree))
print(tree.counts)
assert tree == test_case_2['output']
tree = Tree.from_tokens(test_case_3['input'])
print(tree)
print(len(tree))
print(tree.counts)
assert tree == test_case_3['output']
tree = Tree.from_tokens(test_case_4['input'])
print(tree)
print(len(tree))
print(tree.counts)
data = pd.read_table('../data/top-dataset-semantic-parsing/eval.tsv', names=['text', 'tokens', 'schema'])
tokenized_schema = [TopSchemaTokenizer.tokenize(t) for t in data.schema]
i = 10
print(tokenized_schema[i])
print(Tree.from_tokens(tokenized_schema[i]))
complex_example = (
'[IN:GET_EVENT Are there any '
'[SL:CATEGORY_EVENT Concerts ] at '
'[SL:LOCATION [IN:GET_LOCATION [SL:POINT_ON_MAP Chattaqua Amphitheater ] ] ] '
'[SL:DATE_TIME this weekend ] with available tickets ]'
)
complex_example_tokens = TopSchemaTokenizer.tokenize(complex_example)
complex_tree = Tree.from_tokens(complex_example_tokens)
pprint(complex_tree._dict_repr)
###Output
{'IN:GET_EVENT': [{'SL:CATEGORY_EVENT': [{'Concerts': []}]},
{'SL:LOCATION': [{'IN:GET_LOCATION': [{'SL:POINT_ON_MAP': [{'Chattaqua Amphitheater': []}]}]}]},
{'SL:DATE_TIME': [{'this weekend': []}]}]}
###Markdown
Metrics
###Code
test_case_1 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'f1': 1,
'precision': 1,
'recall': 1,
}
test_case_2 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT2', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'f1': 0,
'precision': 0,
'recall': 0,
}
test_case_3 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT2', 'slot', 'value', RBR, RBR],
'f1': 0.5,
'precision': 0.5,
'recall': 0.5,
}
test_case_4 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, LBR, SL, 'SLOT2', 'value', RBR, RBR],
'f1': 2/3.,
'precision': 3/4.,
'recall': 1,
}
test_case_5 = {
'true': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'value', RBR, RBR],
'pred': [LBR, IN, 'INTENT1', 'text', LBR, SL, 'SLOT1', 'slot', 'wrong value', RBR, RBR],
'f1': 2/3.,
'precision': 2/3.,
'recall': 2/3.,
}
def f1(p, r):
return 2 * p * r / (p + r)
tree1 = Tree.from_tokens(test_case_1['true'])
tree2 = Tree.from_tokens(test_case_1['pred'])
print(tree1)
print(tree2)
def labeled_bracketing_recall(pred_tokens, true_tokens):
"""Compute recall labeling bracketng score"""
pred_tree = Tree.from_tokens(pred_tokens)
true_tree = Tree.from_tokens(true_tokens)
true_positive, false_negative = 0, 0
if pred_tree.entity != true_tree.entity:
false_negative += 1
else:
true_positive += 1
tp, fn = _labeled_bracketing_tp_fn(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
false_negative += fn
recall = true_positive / (true_positive + false_negative)
return recall
def labeled_bracketing_precision(pred_tokens, true_tokens):
"""Compute precision labeling bracketng score"""
pred_tree = Tree.from_tokens(pred_tokens)
true_tree = Tree.from_tokens(true_tokens)
true_positive, false_positive = 0, 0
if pred_tree.entity != true_tree.entity:
false_positive += 1
else:
true_positive += 1
tp, fp = _labeled_bracketing_tp_fp(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
false_positive += fp
recall = true_positive / (true_positive + false_positive)
return recall
def _labeled_bracketing_tp_fn(pred_subtrees: List[Tree], true_subtrees: List[Tree]):
"""Compute true positive and false negative labeling bracketng scores"""
true_positive, false_negative = 0, 0
for i, true_tree in enumerate(true_subtrees):
correct_subtree_indices = [i for i, pred_tree in enumerate(pred_subtrees) if pred_tree.entity == true_tree.entity]
if len(correct_subtree_indices) == 0:
false_negative += 1
else:
true_positive += 1
for pred_subtree_idx in correct_subtree_indices:
pred_tree = pred_subtrees[pred_subtree_idx]
tp, fn = _labeled_bracketing_tp_fn(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
false_negative += fn
return true_positive, false_negative
def _labeled_bracketing_tp_fp(pred_subtrees: List[Tree], true_subtrees: List[Tree]):
"""Compute true positive and false positive labeling bracketng scores"""
return _labeled_bracketing_tp_fn(true_subtrees, pred_subtrees)
test_case = test_case_2
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
recall = labeled_bracketing_recall(test_case['pred'], test_case['true'])
if recall == test_case['recall']:
print(f'test_case_{i+1} passed. Computed recall: {recall}')
else:
print(f'\t test_case_{i+1} FAILED. Computed recall: {recall}')
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
precision = labeled_bracketing_precision(test_case['pred'], test_case['true'])
if precision == test_case['precision']:
print(f'test_case_{i+1} passed. Computed precision: {precision}')
else:
print(f'\t test_case_{i+1} FAILED. Computed precision: {precision}')
###Output
test_case_1 passed. Computed precision: 1.0
test_case_2 passed. Computed precision: 0.0
test_case_3 passed. Computed precision: 0.5
test_case_4 passed. Computed precision: 0.75
test_case_5 passed. Computed precision: 0.6666666666666666
###Markdown
Compare with the official TOP evaluation tool
###Code
data_test = pd.read_table('../data/top-dataset-semantic-parsing/test.tsv', names=['text', 'tokens', 'schema'])
data_pred = pd.read_table('../lightning_out/jul8_20epochs_small/predictions.tsv', names=['schema'])
tokenized_schema_test = [TopSchemaTokenizer.tokenize(t) for t in data_test.schema]
tokenized_schema_pred = [TopSchemaTokenizer.tokenize(t) for t in data_pred.schema]
# TOP script gives the following metrics
{'instance_count': 9042,
'exact_match': 0.25481088254810885,
'labeled_bracketing_scores': {
'precision': 0.6032053706505295,
'recall': 0.3814007712312797,
'f1': 0.46731984250526504
},
'tree_labeled_bracketing_scores': {
'precision': 0.3943362329803328,
'recall': 0.24933488775296686,
'f1': 0.30550315905136893
},
'tree_validity': 0.9382879893828799}
precisions = []
recalls = []
exact_match = 0
for pred, true in zip(tokenized_schema_pred, tokenized_schema_test):
pred_tree = Tree.from_tokens(pred)
true_tree = Tree.from_tokens(true)
if pred_tree == true_tree:
exact_match += 1
precision = labeled_bracketing_precision(pred, true)
recall = labeled_bracketing_recall(pred, true)
precisions.append(precision)
recalls.append(recall)
print(true)
print(true_tree)
mean_precision = sum(precisions) / len(precisions)
mean_recall = sum(recalls) / len(recalls)
exact_match /= len(precisions)
print('Precision: ', mean_precision)
print('Recall : ', mean_recall)
print('F1 : ', f1(mean_precision, mean_recall))
print('exact_match: ', exact_match)
###Output
Precision: 0.640802521534121
Recall : 0.5737675240412504
F1 : 0.6054351126465458
exact_match: 0.2591240875912409
###Markdown
New approach
###Code
def label_bracketing_scores(pred_trees, true_trees):
true_positives = 0
n_predicted = 0
n_expected = 0
for pred_tree, true_tree in zip(pred_trees, true_trees):
n_predicted += len(pred_tree)
n_expected += len(true_tree)
if pred_tree.entity == true_tree.entity:
true_positives += 1 + _tree_true_positive(pred_tree.subtrees, true_tree.subtrees)
precision = true_positives / n_predicted
recall = true_positives / n_expected
f1 = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
return {'LBS_precision': precision, 'LBS_recall': recall, 'LBS_F1': f1}
def _tree_true_positive(pred_subtrees, true_subtrees):
true_positive = 0
for i, true_tree in enumerate(true_subtrees):
correct_subtree_indices = [i for i, pred_tree in enumerate(pred_subtrees) if pred_tree.entity == true_tree.entity]
if len(correct_subtree_indices) == 0:
continue
true_positive += 1
for pred_subtree_idx in correct_subtree_indices:
pred_tree = pred_subtrees[pred_subtree_idx]
tp = _tree_true_positive(pred_tree.subtrees, true_tree.subtrees)
true_positive += tp
return true_positive
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
tree_true = Tree.from_tokens(test_case['true'])
tree_pred = Tree.from_tokens(test_case['pred'])
metrics = label_bracketing_scores([tree_pred], [tree_true])
print(f'test_case_{i+1}:')
print(metrics)
print()
pred_trees = [Tree.from_tokens(t) for t in tokenized_schema_pred]
true_trees = [Tree.from_tokens(t) for t in tokenized_schema_test]
metrics = label_bracketing_scores(pred_trees, true_trees)
print(metrics)
###Output
{'LBS_precision': 0.6405084598194851, 'LBS_recall': 0.441435314825186, 'LBS_F1': 0.5226575728511716}
###Markdown
Still a bit higher then the official implementation {'precision': 0.603, 'recall': 0.381, 'f1': 0.467}, Per-class scores
###Code
def label_bracketing_scores_for_classes(pred_trees, true_trees, classes):
"""Compute label bracketing scores only considering slots, intents and values from classes."""
true_positives = 0
n_predicted = 0
n_expected = 0
for pred_tree, true_tree in zip(pred_trees, true_trees):
n_predicted += len(pred_tree)
n_expected += len(true_tree)
if pred_tree.entity == true_tree.entity:
true_positives += 1 + _tree_true_positive(pred_tree.subtrees, true_tree.subtrees)
precision = 0 if n_predicted == 0 else true_positives / n_predicted
recall = 0 if n_expected == 0 else true_positives / n_expected
f1 = 0
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
return {'cLBS_precision': precision, 'cLBS_recall': recall, 'cLBS_F1': f1}
def _tree_true_positive_for_classes(pred_subtrees, true_subtrees, classes):
true_positive = 0
for i, true_tree in enumerate(true_subtrees):
correct_subtree_indices = [i for i, pred_tree in enumerate(pred_subtrees) if pred_tree.entity == true_tree.entity]
if len(correct_subtree_indices) == 0:
continue
if true_tree.entity in classes:
true_positive += 1
for pred_subtree_idx in correct_subtree_indices:
pred_tree = pred_subtrees[pred_subtree_idx]
tp = _tree_true_positive_for_classes(pred_tree.subtrees, true_tree.subtrees, classes)
true_positive += tp
return true_positive
true_trees = [Tree.from_tokens(t) for ]
for i, test_case in enumerate([test_case_1, test_case_2, test_case_3, test_case_4, test_case_5]):
tree_true = Tree.from_tokens(test_case['true'])
tree_pred = Tree.from_tokens(test_case['pred'])
metrics = label_bracketing_scores([tree_pred], [tree_true])
print(f'test_case_{i+1}:')
print(metrics)
print()
###Output
_____no_output_____ |
notebooks/supervised_classification.ipynb | ###Markdown
Supervised Learning (Classification)In supervised learning, the task is to infer hidden structure fromlabeled data, comprised of training examples $\{(x_n, y_n)\}$.Classification means the output $y$ takes discrete values.We demonstrate with an example in Edward. A webpage version is available athttp://edwardlib.org/tutorials/supervised-classification.
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Bernoulli, MultivariateNormalTriL, Normal
from edward.util import rbf
###Output
_____no_output_____
###Markdown
DataUse the[crabs data set](https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/crabs.html),which consists of morphological measurements on a crab species. Weare interested in predicting whether a given crab has the color formblue or orange.
###Code
ed.set_seed(42)
data = np.loadtxt('data/crabs_train.txt', delimiter=',')
data[data[:, 0] == -1, 0] = 0 # replace -1 label with 0 label
N = data.shape[0] # number of data points
D = data.shape[1] - 1 # number of features
X_train = data[:, 1:]
y_train = data[:, 0]
print("Number of data points: {}".format(N))
print("Number of features: {}".format(D))
###Output
Number of data points: 100
Number of features: 5
###Markdown
ModelA Gaussian process is a powerful object for modeling nonlinearrelationships between pairs of random variables. It defines a distribution over(possibly nonlinear) functions, which can be applied for representingour uncertainty around the true functional relationship.Here we define a Gaussian process model for classification(Rasumussen & Williams, 2006).Formally, a distribution over functions $f:\mathbb{R}^D\to\mathbb{R}$ can be specifiedby a Gaussian process$$\begin{align*} p(f) &= \mathcal{GP}(f\mid \mathbf{0}, k(\mathbf{x}, \mathbf{x}^\prime)),\end{align*}$$whose mean function is the zero function, and whose covariancefunction is some kernel which describes dependence betweenany set of inputs to the function.Given a set of input-output pairs$\{\mathbf{x}_n\in\mathbb{R}^D,y_n\in\mathbb{R}\}$,the likelihood can be written as a multivariate normal\begin{align*} p(\mathbf{y}) &= \text{Normal}(\mathbf{y} \mid \mathbf{0}, \mathbf{K})\end{align*}where $\mathbf{K}$ is a covariance matrix given by evaluating$k(\mathbf{x}_n, \mathbf{x}_m)$ for each pair of inputs in the dataset.The above applies directly for regression where $\mathbb{y}$ is areal-valued response, but not for (binary) classification, where $\mathbb{y}$is a label in $\{0,1\}$. To deal with classification, we interpret theresponse as latent variables which is squashed into $[0,1]$. We thendraw from a Bernoulli to determine the label, with probability givenby the squashed value.Define the likelihood of an observation $(\mathbf{x}_n, y_n)$ as\begin{align*} p(y_n \mid \mathbf{z}, x_n) &= \text{Bernoulli}(y_n \mid \text{logit}^{-1}(\mathbf{x}_n^\top \mathbf{z})).\end{align*}Define the prior to be a multivariate normal\begin{align*} p(\mathbf{z}) &= \text{Normal}(\mathbf{z} \mid \mathbf{0}, \mathbf{K}),\end{align*}with covariance matrix given as previously stated.Let's build the model in Edward. We use a radial basis function (RBF)kernel, also known as the squared exponential or exponentiatedquadratic. It returns the kernel matrix evaluated over all pairs ofdata points; we then Cholesky decompose the matrix to parameterize themultivariate normal distribution.
###Code
X = tf.placeholder(tf.float32, [N, D])
f = MultivariateNormalTriL(loc=tf.zeros(N), scale_tril=tf.cholesky(rbf(X)))
y = Bernoulli(logits=f)
###Output
_____no_output_____
###Markdown
Here, we define a placeholder `X`. During inference, we pass inthe value for this placeholder according to data. InferencePerform variational inference.Define the variational model to be a fully factorized normal.
###Code
qf = Normal(loc=tf.Variable(tf.random_normal([N])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([N]))))
###Output
_____no_output_____
###Markdown
Run variational inference for `500` iterations.
###Code
inference = ed.KLqp({f: qf}, data={X: X_train, y: y_train})
inference.run(n_iter=5000)
###Output
5000/5000 [100%] ██████████████████████████████ Elapsed: 9s | Loss: 78.369
###Markdown
Supervised Learning (Classification)In supervised learning, the task is to infer hidden structure fromlabeled data, comprised of training examples $\{(x_n, y_n)\}$.Classification means the output $y$ takes discrete values.We demonstrate with an example in Edward. A webpage version is available athttp://edwardlib.org/tutorials/supervised-classification.
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Bernoulli, MultivariateNormalTriL, Normal
from edward.util import rbf
from observations import crabs
###Output
_____no_output_____
###Markdown
DataUse the[crabs data set](https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/crabs.html),which consists of morphological measurements on a crab species. Weare interested in predicting whether a given crab has the color formblue (encoded as 0) or orange (encoded as 1). We use all the numeric featuresin the dataset.
###Code
ed.set_seed(42)
data, metadata = crabs("~/data")
X_train = data[:100, 3:]
y_train = data[:100, 1]
N = X_train.shape[0] # number of data points
D = X_train.shape[1] # number of features
print("Number of data points: {}".format(N))
print("Number of features: {}".format(D))
###Output
Number of data points: 100
Number of features: 5
###Markdown
ModelA Gaussian process is a powerful object for modeling nonlinearrelationships between pairs of random variables. It defines a distribution over(possibly nonlinear) functions, which can be applied for representingour uncertainty around the true functional relationship.Here we define a Gaussian process model for classification(Rasumussen & Williams, 2006).Formally, a distribution over functions $f:\mathbb{R}^D\to\mathbb{R}$ can be specifiedby a Gaussian process$$\begin{align*} p(f) &= \mathcal{GP}(f\mid \mathbf{0}, k(\mathbf{x}, \mathbf{x}^\prime)),\end{align*}$$whose mean function is the zero function, and whose covariancefunction is some kernel which describes dependence betweenany set of inputs to the function.Given a set of input-output pairs$\{\mathbf{x}_n\in\mathbb{R}^D,y_n\in\mathbb{R}\}$,the likelihood can be written as a multivariate normal\begin{align*} p(\mathbf{y}) &= \text{Normal}(\mathbf{y} \mid \mathbf{0}, \mathbf{K})\end{align*}where $\mathbf{K}$ is a covariance matrix given by evaluating$k(\mathbf{x}_n, \mathbf{x}_m)$ for each pair of inputs in the dataset.The above applies directly for regression where $\mathbb{y}$ is areal-valued response, but not for (binary) classification, where $\mathbb{y}$is a label in $\{0,1\}$. To deal with classification, we interpret theresponse as latent variables which is squashed into $[0,1]$. We thendraw from a Bernoulli to determine the label, with probability givenby the squashed value.Define the likelihood of an observation $(\mathbf{x}_n, y_n)$ as\begin{align*} p(y_n \mid \mathbf{z}, x_n) &= \text{Bernoulli}(y_n \mid \text{logit}^{-1}(\mathbf{x}_n^\top \mathbf{z})).\end{align*}Define the prior to be a multivariate normal\begin{align*} p(\mathbf{z}) &= \text{Normal}(\mathbf{z} \mid \mathbf{0}, \mathbf{K}),\end{align*}with covariance matrix given as previously stated.Let's build the model in Edward. We use a radial basis function (RBF)kernel, also known as the squared exponential or exponentiatedquadratic. It returns the kernel matrix evaluated over all pairs ofdata points; we then Cholesky decompose the matrix to parameterize themultivariate normal distribution.
###Code
X = tf.placeholder(tf.float32, [N, D])
f = MultivariateNormalTriL(loc=tf.zeros(N), scale_tril=tf.cholesky(rbf(X)))
y = Bernoulli(logits=f)
###Output
_____no_output_____
###Markdown
Here, we define a placeholder `X`. During inference, we pass inthe value for this placeholder according to data. InferencePerform variational inference.Define the variational model to be a fully factorized normal.
###Code
qf = Normal(loc=tf.Variable(tf.random_normal([N])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([N]))))
###Output
_____no_output_____
###Markdown
Run variational inference for `500` iterations.
###Code
inference = ed.KLqp({f: qf}, data={X: X_train, y: y_train})
inference.run(n_iter=5000)
###Output
5000/5000 [100%] ██████████████████████████████ Elapsed: 9s | Loss: 78.369
###Markdown
Supervised Learning (Classification)In supervised learning, the task is to infer hidden structure fromlabeled data, comprised of training examples $\{(x_n, y_n)\}$.Classification means the output $y$ takes discrete values.We demonstrate with an example in Edward. A webpage version is available athttp://edwardlib.org/tutorials/supervised-classification.
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Bernoulli, MultivariateNormalTriL, Normal
from edward.util import rbf
from observations import crabs
###Output
_____no_output_____
###Markdown
DataUse the[crabs data set](https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/crabs.html),which consists of morphological measurements on a crab species. Weare interested in predicting whether a given crab has the color formblue (encoded as 0) or orange (encoded as 1). We use all the numeric featuresin the dataset.
###Code
ed.set_seed(42)
data, metadata = crabs("~/data")
X_train = data[:100, 3:]
y_train = data[:100, 1]
N = X_train.shape[0] # number of data points
D = X_train.shape[1] # number of features
print("Number of data points: {}".format(N))
print("Number of features: {}".format(D))
###Output
Number of data points: 100
Number of features: 5
###Markdown
ModelA Gaussian process is a powerful object for modeling nonlinearrelationships between pairs of random variables. It defines a distribution over(possibly nonlinear) functions, which can be applied for representingour uncertainty around the true functional relationship.Here we define a Gaussian process model for classification(Rasumussen & Williams, 2006).Formally, a distribution over functions $f:\mathbb{R}^D\to\mathbb{R}$ can be specifiedby a Gaussian process$$\begin{align*} p(f) &= \mathcal{GP}(f\mid \mathbf{0}, k(\mathbf{x}, \mathbf{x}^\prime)),\end{align*}$$whose mean function is the zero function, and whose covariancefunction is some kernel which describes dependence betweenany set of inputs to the function.Given a set of input-output pairs$\{\mathbf{x}_n\in\mathbb{R}^D,y_n\in\mathbb{R}\}$,the likelihood can be written as a multivariate normal\begin{align*} p(\mathbf{y}) &= \text{Normal}(\mathbf{y} \mid \mathbf{0}, \mathbf{K})\end{align*}where $\mathbf{K}$ is a covariance matrix given by evaluating$k(\mathbf{x}_n, \mathbf{x}_m)$ for each pair of inputs in the dataset.The above applies directly for regression where $\mathbb{y}$ is areal-valued response, but not for (binary) classification, where $\mathbb{y}$is a label in $\{0,1\}$. To deal with classification, we interpret theresponse as latent variables which is squashed into $[0,1]$. We thendraw from a Bernoulli to determine the label, with probability givenby the squashed value.Define the likelihood of an observation $(\mathbf{x}_n, y_n)$ as\begin{align*} p(y_n \mid \mathbf{z}, x_n) &= \text{Bernoulli}(y_n \mid \text{logit}^{-1}(\mathbf{x}_n^\top \mathbf{z})).\end{align*}Define the prior to be a multivariate normal\begin{align*} p(\mathbf{z}) &= \text{Normal}(\mathbf{z} \mid \mathbf{0}, \mathbf{K}),\end{align*}with covariance matrix given as previously stated.Let's build the model in Edward. We use a radial basis function (RBF)kernel, also known as the squared exponential or exponentiatedquadratic. It returns the kernel matrix evaluated over all pairs ofdata points; we then Cholesky decompose the matrix to parameterize themultivariate normal distribution.
###Code
X = tf.placeholder(tf.float32, [N, D])
f = MultivariateNormalTriL(loc=tf.zeros(N), scale_tril=tf.cholesky(rbf(X)))
y = Bernoulli(logits=f)
###Output
_____no_output_____
###Markdown
Here, we define a placeholder `X`. During inference, we pass inthe value for this placeholder according to data. InferencePerform variational inference.Define the variational model to be a fully factorized normal.
###Code
qf = Normal(loc=tf.get_variable("qf/loc", [N]),
scale=tf.nn.softplus(tf.get_variable("qf/scale", [N])))
###Output
_____no_output_____
###Markdown
Run variational inference for `5000` iterations.
###Code
inference = ed.KLqp({f: qf}, data={X: X_train, y: y_train})
inference.run(n_iter=5000)
###Output
5000/5000 [100%] ██████████████████████████████ Elapsed: 9s | Loss: 78.369
|
d2l/chapter_computer-vision/kaggle-dog.ipynb | ###Markdown
实战 Kaggle 比赛:狗的品种识别(ImageNet Dogs)本节我们将在 Kaggle 上实战狗品种识别问题。本次(**比赛网址是 https://www.kaggle.com/c/dog-breed-identification**)。:numref:`fig_kaggle_dog` 显示了鉴定比赛网页上的信息。你需要一个 Kaggle 账户才能提交结果。 在这场比赛中,我们将识别 120 类不同品种的狗。这个数据集实际上是著名的 ImageNet 的数据集子集,却与 :numref:`sec_kaggle_cifar10` 中 CIFAR-10 数据集中的图像不同。ImageNet数据集中的图像更高更宽,且尺寸不一。:width:`400px`:label:`fig_kaggle_dog`
###Code
import os
import torch
import torchvision
from torch import nn
from d2l import torch as d2l
###Output
_____no_output_____
###Markdown
获取和整理数据集比赛数据集分为训练集和测试集,其中分别包含三个 RGB(彩色)通道的 10222 和 10357 张 JPEG 图像。在训练数据集中,有 120 种犬类,如拉布拉多、贵宾、腊肠、萨摩耶、哈士奇、吉娃娃和约克夏等。 下载数据集登录 Kaggle 后,你可以点击 :numref:`fig_kaggle_dog` 中显示的竞争网页上的 “数据” 选项卡,然后点击 “全部下载” 按钮下载数据集。在 `../data` 中解压下载的文件后,你将在以下路径中找到整个数据集: * ../data/dog-breed-identification/labels.csv* ../data/dog-breed-identification/sample_submission.csv* ../数据/种身份识别/火车* ../数据/种身份识别/测试你可能已经注意到,上述结构与 :numref:`sec_kaggle_cifar10` 的 CIFAR-10 竞争对手类似,其中文件夹 `train/` 和 `test/` 分别包含训练和测试狗图像,`labels.csv` 包含训练图像的标签。同样,为了便于入门,[**我们提供完整数据集的小规模样本**]:`train_valid_test_tiny.zip`。如果你要在 Kaggle 比赛中使用完整的数据集,则需要将下面的 `demo` 变量更改为 `False`。
###Code
#@save
d2l.DATA_HUB['dog_tiny'] = (d2l.DATA_URL + 'kaggle_dog_tiny.zip',
'0cb91d09b814ecdc07b50f31f8dcad3e81d6a86d')
# 如果你使用Kaggle比赛的完整数据集,请将下面的变量更改为False
demo = True
if demo:
data_dir = d2l.download_extract('dog_tiny')
else:
data_dir = os.path.join('..', 'data', 'dog-breed-identification')
###Output
Downloading ../data/kaggle_dog_tiny.zip from http://d2l-data.s3-accelerate.amazonaws.com/kaggle_dog_tiny.zip...
###Markdown
[**整理数据集**]我们可以像 :numref:`sec_kaggle_cifar10` 中所做的那样整理数据集,即从原始训练集中拆分验证集,然后将图像移动到按标签分组的子文件夹中。 下面的 `reorg_dog_data` 函数读取训练数据标签、拆分验证集并整理训练集。
###Code
def reorg_dog_data(data_dir, valid_ratio):
labels = d2l.read_csv_labels(os.path.join(data_dir, 'labels.csv'))
d2l.reorg_train_valid(data_dir, labels, valid_ratio)
d2l.reorg_test(data_dir)
batch_size = 32 if demo else 128
valid_ratio = 0.1
reorg_dog_data(data_dir, valid_ratio)
###Output
_____no_output_____
###Markdown
[**图像增广**]回想一下,这个狗品种数据集是 ImageNet 数据集的子集,其图像大于 :numref:`sec_kaggle_cifar10` 中 CIFAR-10 数据集的图像。下面我们看一下如何在相对较大的图像上使用图像增广。
###Code
transform_train = torchvision.transforms.Compose([
# 随机裁剪图像,所得图像为原始面积的0.08到1之间,高宽比在3/4和4/3之间。
# 然后,缩放图像以创建224 x 224的新图像
torchvision.transforms.RandomResizedCrop(224, scale=(0.08, 1.0),
ratio=(3.0/4.0, 4.0/3.0)),
torchvision.transforms.RandomHorizontalFlip(),
# 随机更改亮度,对比度和饱和度
torchvision.transforms.ColorJitter(brightness=0.4,
contrast=0.4,
saturation=0.4),
# 添加随机噪声
torchvision.transforms.ToTensor(),
# 标准化图像的每个通道
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
###Output
_____no_output_____
###Markdown
测试时,我们只使用确定性的图像预处理操作。
###Code
transform_test = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
# 从图像中心裁切224x224大小的图片
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
###Output
_____no_output_____
###Markdown
[**读取数据集**]与 :numref:`sec_kaggle_cifar10` 一样,我们可以读取整理后的含原始图像文件的数据集。
###Code
train_ds, train_valid_ds = [torchvision.datasets.ImageFolder(
os.path.join(data_dir, 'train_valid_test', folder),
transform=transform_train) for folder in ['train', 'train_valid']]
valid_ds, test_ds = [torchvision.datasets.ImageFolder(
os.path.join(data_dir, 'train_valid_test', folder),
transform=transform_test) for folder in ['valid', 'test']]
###Output
_____no_output_____
###Markdown
下面我们创建数据加载器实例的方式与 :numref:`sec_kaggle_cifar10` 相同。
###Code
train_iter, train_valid_iter = [torch.utils.data.DataLoader(
dataset, batch_size, shuffle=True, drop_last=True)
for dataset in (train_ds, train_valid_ds)]
valid_iter = torch.utils.data.DataLoader(valid_ds, batch_size, shuffle=False,
drop_last=True)
test_iter = torch.utils.data.DataLoader(test_ds, batch_size, shuffle=False,
drop_last=False)
###Output
_____no_output_____
###Markdown
[**微调预训练模型**]同样,本次比赛的数据集是 ImageNet 数据集的子集。因此,我们可以使用 :numref:`sec_fine_tuning` 中讨论的方法在完整 ImageNet 数据集上选择预训练的模型,然后使用该模型提取图像要素,以便将其输入到定制的小规模输出网络中。深度学习框架的高级 API 提供了在 ImageNet 数据集上预训练的各种模型。在这里,我们选择预训练的 ResNet-34 模型,我们只需重复使用此模型的输出层(即提取的要素)的输入。然后,我们可以用一个可以训练的小型自定义输出网络替换原始输出层,例如堆叠两个完全连接的图层。与 :numref:`sec_fine_tuning` 中的实验不同,以下内容不重新训练用于特征提取的预训练模型,这节省了梯度下降的时间和内存空间。 回想一下,我们使用三个 RGB 通道的均值和标准差来对完整的 ImageNet 数据集进行图像标准化。事实上,这也符合 ImageNet 上预训练模型的标准化操作。
###Code
def get_net(devices):
finetune_net = nn.Sequential()
finetune_net.features = torchvision.models.resnet34(pretrained=True)
# 定义一个新的输出网络,共有120个输出类别
finetune_net.output_new = nn.Sequential(nn.Linear(1000, 256),
nn.ReLU(),
nn.Linear(256, 120))
# 将模型参数分配给用于计算的CPU或GPU
finetune_net = finetune_net.to(devices[0])
# 冻结参数
for param in finetune_net.features.parameters():
param.requires_grad = False
return finetune_net
###Output
_____no_output_____
###Markdown
在[**计算损失**]之前,我们首先获取预训练模型的输出层的输入,即提取的特征。然后我们使用此特征作为我们小型自定义输出网络的输入来计算损失。
###Code
loss = nn.CrossEntropyLoss(reduction='none')
def evaluate_loss(data_iter, net, devices):
l_sum, n = 0.0, 0
for features, labels in data_iter:
features, labels = features.to(devices[0]), labels.to(devices[0])
outputs = net(features)
l = loss(outputs, labels)
l_sum += l.sum()
n += labels.numel()
return l_sum / n
###Output
_____no_output_____
###Markdown
定义[**训练函数**]我们将根据模型在验证集上的表显选择模型并调整超参数。模型训练函数 `train` 只迭代小型自定义输出网络的参数。
###Code
def train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period,
lr_decay):
# 只训练小型自定义输出网络
net = nn.DataParallel(net, device_ids=devices).to(devices[0])
trainer = torch.optim.SGD((param for param in net.parameters()
if param.requires_grad), lr=lr,
momentum=0.9, weight_decay=wd)
scheduler = torch.optim.lr_scheduler.StepLR(trainer, lr_period, lr_decay)
num_batches, timer = len(train_iter), d2l.Timer()
legend = ['train loss']
if valid_iter is not None:
legend.append('valid loss')
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=legend)
for epoch in range(num_epochs):
metric = d2l.Accumulator(2)
for i, (features, labels) in enumerate(train_iter):
timer.start()
features, labels = features.to(devices[0]), labels.to(devices[0])
trainer.zero_grad()
output = net(features)
l = loss(output, labels).sum()
l.backward()
trainer.step()
metric.add(l, labels.shape[0])
timer.stop()
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(metric[0] / metric[1], None))
measures = f'train loss {metric[0] / metric[1]:.3f}'
if valid_iter is not None:
valid_loss = evaluate_loss(valid_iter, net, devices)
animator.add(epoch + 1, (None, valid_loss.detach()))
scheduler.step()
if valid_iter is not None:
measures += f', valid loss {valid_loss:.3f}'
print(measures + f'\n{metric[1] * num_epochs / timer.sum():.1f}'
f' examples/sec on {str(devices)}')
###Output
_____no_output_____
###Markdown
[**训练和验证模型**]现在我们可以训练和验证模型了,以下超参数都是可调的。例如,可以增加迭代周期:由于 `lr_period` 和 `lr_decay` 分别设置为 2 和 0.9,因此优化算法的学习速率将在每 2 个迭代后乘以 0.9。
###Code
devices, num_epochs, lr, wd = d2l.try_all_gpus(), 10, 1e-4, 1e-4
lr_period, lr_decay, net = 2, 0.9, get_net(devices)
train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period,
lr_decay)
###Output
train loss 1.250, valid loss 1.317
561.1 examples/sec on [device(type='cuda', index=0), device(type='cuda', index=1)]
###Markdown
[**对测试集分类**]并在 Kaggle 提交结果与 :numref:`sec_kaggle_cifar10` 中的最后一步类似,最终所有标记的数据(包括验证集)都用于训练模型和对测试集进行分类。我们将使用训练好的自定义输出网络进行分类。
###Code
net = get_net(devices)
train(net, train_valid_iter, None, num_epochs, lr, wd, devices, lr_period,
lr_decay)
preds = []
for data, label in test_iter:
output = torch.nn.functional.softmax(net(data.to(devices[0])), dim=0)
preds.extend(output.cpu().detach().numpy())
ids = sorted(os.listdir(
os.path.join(data_dir, 'train_valid_test', 'test', 'unknown')))
with open('submission.csv', 'w') as f:
f.write('id,' + ','.join(train_valid_ds.classes) + '\n')
for i, output in zip(ids, preds):
f.write(i.split('.')[0] + ',' + ','.join(
[str(num) for num in output]) + '\n')
###Output
train loss 1.188
918.4 examples/sec on [device(type='cuda', index=0), device(type='cuda', index=1)]
|
regression/linear-regression-and-regularization.ipynb | ###Markdown
Linear Regression and RegularizationLoosely following Chapter 10 of Python Machine Learning 3rd Edition, Raschka>Disclaimer: Regression is a huge field. It is impossible to cover it all in one class (or even two).[Image Source](https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html)>For a sense of the depth and potential complexity of regression models, see [Mostly Harmless Econometrics](https://www.mostlyharmlesseconometrics.com) Ordinary Least Squares (OLS) Linear Regression>[All models are wrong, but some are useful.](https://en.wikipedia.org/wiki/All_models_are_wrong)George BoxLinear regression is one of the most popular, widely used, and foundational concepts in statistics, econometrics, and machine learning. Boils down having a numeric target value ($y$) we want to either predict or understand the variance drivers. We use data ($X$) we think impacts our target to understand the underlying **linear** relationship. Big Assumption: - The regression function $E(Y|X)$ is **linear** in the inputs $X_{1},\dots,X_{p}$- Transformations can be applied to satisfy that requirment.Typically will see it expressed as $y = \beta X$, or formally: $$f(X)=\beta_{0}+\sum{X{j}\beta_{j}}$$- Linear model assumes the function is linear or reasonably linear.- The true $\beta_{j}$'s are unknown parameters (coefficients/weights). - The features must be able to be represented within a non-null numeric matrix. Goal - Minimize the mean-squared error. Why?Residuals will be positive and negative, need to penalize negative and positive equally.Sum of errors: $\epsilon_{1} + \epsilon_{2} + \dots + \epsilon_{n}$ RSS: $\epsilon_{1}^2 + \epsilon_{2}^2 + \dots + \epsilon_{n}^2$ MSE: $\frac{1}{N}\sum{\epsilon_{i}^2}$Most statistical programs solve for the $\beta$ values using plain old linear alegbra, in what is called the closed-form: $\hat\beta = (X^TX)^{-1}X^{T}y$ Closed Form Derivation$$RSS(\beta)=\sum{(y_{i}-f(x_{i}))^2}$$$$=\sum(y_{i}-\beta_{0}-\sum{x_{ij}\beta{j}})^2$$$(x_i,y_i)$ should be independent from other $(x_i,y_i)$'s - Time series models violate this without special treatmentWe are seeking a $f(X)$ that minimizes the sum of squared residuals from $Y$:[Image source: Elements of Statistical Learning, Figure 3.1](https://www.statlearning.com) $$RSS(\beta)=(y-X\beta)^T(y-X\beta)$$ Differentiating:$$\frac{dRSS}{d\beta}=-2X^T(y-X\beta)$$ And again:$$\frac{d^2RSS}{d\beta d \beta^T}=2X^TX$$ Setting the first derivative to zero:$$ X^T(y-X\beta)=0$$ And we get:$$\hat{\beta}=(X^TX)^{-1}X^Ty$$ And our predicted values are:$$\hat{y}=X\hat{\beta}$$ And relates to $y$ by:$$y = \hat{y} + \epsilon =X\hat{\beta}+\epsilon $$ > Unique solution means we can derive with pure linear algebra, i.e., no need to use convergence algorithms. Slope and Intercept Remember in its simple form: $$y=mx+b$$[Image source](https://en.wikipedia.org/wiki/Linear_regression/media/File:Linear_least_squares_example2.png)> Since we need an estimate for the intercept, we'll need to manually add the constant. Not all implementations do this automatically, e.g., statsmodels.- Intercept: where the line go through the y-axis. - Slope: for a 1-unit change in x, y will increase by $\beta$ Example - Credit Data[Data from Elements of Statistical Learning](https://www.statlearning.com)
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
credit = pd.read_csv('data/islr-credit.csv')
credit = credit.iloc[:, 1:]
credit.head()
###Output
_____no_output_____
###Markdown
Find Function so $Rating=f(Limit)$We'll need to convert the pandas objects to numpy arrays.
###Code
credit.plot.scatter(x='Limit', y='Rating')
plt.show()
###Output
_____no_output_____
###Markdown
Regression using closed-form:
###Code
X = np.array(credit['Limit']).reshape(-1,1)
y = np.array(credit['Rating']).reshape(-1,1)
X.shape, y.shape
###Output
_____no_output_____
###Markdown
And since we are going to implement a version, we'll need to manually add the constant for the intercept. Why?$y=\beta_{0}(1)+\beta_{i}x_{i}$
###Code
from numpy.linalg import inv
'''
- Manually adding the constant
- Sometimes this is done via the API (check the docs)
'''
const = np.ones(shape=y.shape)
mat = np.concatenate( (const, X), axis=1)
# first 5 examples
mat[:5,:]
###Output
_____no_output_____
###Markdown
BetasWe have a feature matrix that has 2 columns, so we'll get estimate for the constant ($\beta_{0}$) and the credit limit ($\beta_{1}$). Calculate the coefficient estimatesRecall $\hat\beta = (X^TX)^{-1}X^{T}y$
###Code
betas = inv(mat.transpose().dot(mat)).dot(mat.transpose()).dot(y)
b0, b1 = betas
print(f'Beta 0: {np.round(b0[0],3)}')
print(f'Beta 1: {np.round(b1[0],3)}')
###Output
Beta 0: 38.492
Beta 1: 0.067
###Markdown
Predict $\hat{y}$ and plot the fit$$\begin{equation}\hat{y}=\hat{\beta}X=\hat{\beta_{0}}\begin{pmatrix}1 \\\dots \\1\end{pmatrix}+\hat{\beta_{1}}\begin{pmatrix}3606 \\\dots \\5524\end{pmatrix}\end{equation}$$
###Code
yhat = mat.dot(betas)
plt.plot(X, y, 'bo')
plt.plot(X, yhat, 'r')
plt.xlabel('Credit Limit')
plt.ylabel('Credit Rating')
plt.title('$Rating=f(Limit)$', loc='left')
plt.show()
###Output
_____no_output_____
###Markdown
Quantifying fit with metricsCommon metrics: $R^2$ [Wikipedia](https://en.wikipedia.org/wiki/Coefficient_of_determination)$$1 - \frac{\sum (\hat {y}-y)^{2}}{\sum ({\bar y}-y)^{2}}$$ Mean squared error (MSE) [Wikipedia](https://en.wikipedia.org/wiki/Mean_squared_error)$$\frac{\sum (\hat {y}-y)^{2}}{n}$$ Mean Absolute Error (MAE) [Wikipedia](https://en.wikipedia.org/wiki/Mean_absolute_error)$$1/n\sum |\hat {y}-y|$$ Root mean squared error (RMSE) [Wikipedia](https://en.wikipedia.org/wiki/Root_mean_square)$$\sqrt \frac{\sum (\hat {y}-y)^{2}}{n}$$ Notes:- $r^2$ expresses the percent of variance explained (bound between 0% and 100%). - RMSE expresses the variance in unit terms. - MSE/MAE are heavily influenced by outliers. - Usually RMSE is chosen for optimizing since it's an unbiased estimator.- If there are a lot of outliers, MAE may be a better choice. Further reading:[$r^2$ vs. RMSE](https://www.statology.org/rmse-vs-r-squared/) IntrepretabilityA nice property of linear regression is the relatively simple intrepretations that can be drawn. The implementation in statsmodels includes all the output needed to evaluate and interpret model output.[statsmodels OLS regression](https://www.statsmodels.org/stable/regression.html)
###Code
# betas we calculated:
betas.reshape(1,-1)[0]
###Output
_____no_output_____
###Markdown
statsmodels output:
###Code
import statsmodels.api as smf
simpleModel = smf.OLS(y, mat).fit()
print(simpleModel.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: y R-squared: 0.994
Model: OLS Adj. R-squared: 0.994
Method: Least Squares F-statistic: 6.348e+04
Date: Fri, 31 Dec 2021 Prob (F-statistic): 0.00
Time: 09:51:08 Log-Likelihood: -1568.1
No. Observations: 400 AIC: 3140.
Df Residuals: 398 BIC: 3148.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 38.4918 1.397 27.555 0.000 35.746 41.238
x1 0.0668 0.000 251.949 0.000 0.066 0.067
==============================================================================
Omnibus: 7.074 Durbin-Watson: 2.077
Prob(Omnibus): 0.029 Jarque-Bera (JB): 5.177
Skew: 0.155 Prob(JB): 0.0751
Kurtosis: 2.537 Cond. No. 1.20e+04
==============================================================================
Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.2e+04. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
Results Discussion:- Overall $r^2$ of 99% - very strong linear relationship as we saw in the initial plot we created. - You can ignore the t-statistic on the constant - it isn't meaningful. - The t-statistic for the credit limit (x1) is very high (252), with a [p-value](https://en.wikipedia.org/wiki/P-value) of essentially 0 - recall a p-value is the probably that the effect we are seeing is by random chance. - We can conclude there is a very strong linear relationship. Further reading:[Rethinking p-values](https://www.vox.com/2016/3/15/11225162/p-value-simple-definition-hacking) [Econometrics](https://en.wikipedia.org/wiki/Econometrics) A Note on p-Hacking>Taken from "Having Too Little or Too Much Time Is Linked to Lower Subjective Well-Being", Sharif et al. scikit-learn's LinearRegression[scikit-learn's LinearRegression](https://scikit-learn.org/stable/modules/linear_model.htmlordinary-least-squares)This will probably be the implementation you'd want to use for comparing various regression models for prediction problems since the API will be similar.scikit-learn is geared more towards prediction and won't have some of the friendly output that statsmodels has - if you are going for an intrepretation, you may be better off using statsmodels.
###Code
from sklearn.linear_model import LinearRegression
# we added the intercept manually, so turn that option off
skOLS = LinearRegression(fit_intercept=False).fit(mat,y)
skOLS.coef_
###Output
_____no_output_____
###Markdown
And they all provide the same coefficient/model estimates - assuming the same set-up.
###Code
print(betas.reshape(1,-1)[0])
print(simpleModel.params)
print(skOLS.coef_[0])
###Output
[38.49178871 0.06682326]
[38.49178871 0.06682326]
[38.49178871 0.06682326]
###Markdown
Weakness - Outlier SensitivityThere aren't any outliers in the data, I'm going to introduce one.
###Code
matCopy = mat.copy()
matCopy[4, 1] = matCopy[4, 1] + 150000
outModel = smf.OLS(y, matCopy).fit()
print(outModel.summary())
yhat_out = outModel.predict(matCopy)
plt.plot(matCopy[:,1], y, 'bo')
plt.plot(matCopy[:,1], yhat_out, 'r')
plt.xlabel('Credit Limit')
plt.ylabel('Actual / Expected Credit Rating')
plt.show()
###Output
_____no_output_____
###Markdown
Why the sensitivity?Recall we are minimizing the sum of squared residuals. That really big outlier is going to a lot of influence. How to combat?- [RANdom SAmple Consensus (RANSAC)](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RANSACRegressor.html) - Replace or remove the outliers. RANSAC- Select random samples. - Tests non-sample points and creates a inlier list (opposite of outlier). - Refits models with all inliers. - Evaluates error. - Stops or repeats until iterations/threshold met.- **Not guaranteed to get same answer each time* - why?**> [More details](https://scikit-learn.org/stable/modules/linear_model.htmlransac-regression)[Image Source](https://scikit-learn.org/stable/auto_examples/linear_model/plot_ransac.html)
###Code
from sklearn.linear_model import RANSACRegressor
ransac = RANSACRegressor().fit(matCopy, y)
yhat_ransac = ransac.predict(matCopy)
plt.plot(matCopy[:,1], y, 'bo')
plt.plot(matCopy[:,1], yhat_ransac, 'r')
plt.xlabel('Credit Limit')
plt.ylabel('Actual / Expected Credit Rating')
plt.show()
###Output
_____no_output_____
###Markdown
Strength: Robust to Overfitting>Simple is better than complex. No Overfitting Severe Overfitting Multiple RegressionInstead of an $mx1$ input matrix, we'll have $mxn$. $y = w_{0} + w_{1}x_{1} + \dots + w_{m}x_{m} = \sum{w_{i}x_{i}}=w^{T}x$Coefficients still reference the effect on $y$ of a 1-unit change to a $x$ - all else held constant.Example with the California housing dataset: California Housing
###Code
import pandas as pd
import numpy as np
from sklearn.datasets import fetch_california_housing
from matplotlib import pyplot as plt
import warnings
%matplotlib inline
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
X, y = fetch_california_housing(return_X_y=True, as_frame=True)
x0, x1 = X.shape
print(f'Rows: {x0:,}\nFeatures: {x1}')
california_df = pd.concat([X,y], axis=1)
california_df.head()
###Output
_____no_output_____
###Markdown
Run the regression Need to add a constant/intercept term manually as statsmodels doesn't handle this automatically.
###Code
california_df['const'] = 1
featureNames = [x for x in california_df.columns if x != 'MedHouseVal']
featureNames = ['const'] + list(featureNames)
print(featureNames)
###Output
['const', 'MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'Latitude', 'Longitude', 'const']
###Markdown
> Double and triple check that you properly separated your target variable from your features!
###Code
import statsmodels.api as smf
statsModelsCoefs = smf.OLS(california_df['MedHouseVal'], california_df[featureNames]).fit()
print(statsModelsCoefs.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: MedHouseVal R-squared: 0.606
Model: OLS Adj. R-squared: 0.606
Method: Least Squares F-statistic: 3970.
Date: Wed, 29 Dec 2021 Prob (F-statistic): 0.00
Time: 17:41:20 Log-Likelihood: -22624.
No. Observations: 20640 AIC: 4.527e+04
Df Residuals: 20631 BIC: 4.534e+04
Df Model: 8
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const -18.4710 0.329 -56.067 0.000 -19.117 -17.825
MedInc 0.4367 0.004 104.054 0.000 0.428 0.445
HouseAge 0.0094 0.000 21.143 0.000 0.009 0.010
AveRooms -0.1073 0.006 -18.235 0.000 -0.119 -0.096
AveBedrms 0.6451 0.028 22.928 0.000 0.590 0.700
Population -3.976e-06 4.75e-06 -0.837 0.402 -1.33e-05 5.33e-06
AveOccup -0.0038 0.000 -7.769 0.000 -0.005 -0.003
Latitude -0.4213 0.007 -58.541 0.000 -0.435 -0.407
Longitude -0.4345 0.008 -57.682 0.000 -0.449 -0.420
const -18.4710 0.329 -56.067 0.000 -19.117 -17.825
==============================================================================
Omnibus: 4393.650 Durbin-Watson: 0.885
Prob(Omnibus): 0.000 Jarque-Bera (JB): 14087.596
Skew: 1.082 Prob(JB): 0.00
Kurtosis: 6.420 Cond. No. 1.94e+18
==============================================================================
Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The smallest eigenvalue is 1.82e-26. This might indicate that there are
strong multicollinearity problems or that the design matrix is singular.
###Markdown
Check Residuals
###Code
yhat = statsModelsCoefs.predict(california_df[featureNames])
resid = y - yhat
plt.figure(figsize=(5,5))
plt.plot(y, yhat, 'ro')
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('Actual vs. Predicted')
plt.show()
plt.figure(figsize=(5,5))
plt.hist(resid)
plt.title('Residual Distribution')
plt.show()
###Output
_____no_output_____
###Markdown
Patterns in residuals are not ideal. You'll want these to look like normally distributed white noise (ideally). We might be able to make those residuals behave with feature transformations and other techniques we'll talk about later.>In certain cases, it may help to log-transform your target variable, which will compress some of the variance.[Log-linear models](https://en.wikipedia.org/wiki/Log-linear_model)
###Code
import statsmodels.api as smf
logMV = np.log(california_df['MedHouseVal'])
statsModelsCoefs = smf.OLS(logMV, california_df[featureNames]).fit()
print(statsModelsCoefs.summary())
logyhat = statsModelsCoefs.predict(california_df[featureNames])
logresid = logMV - logyhat
plt.figure(figsize=(5,5))
plt.plot(np.exp(logMV), np.exp(logyhat), 'ro', alpha=0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('Actual vs. Predicted')
plt.show()
###Output
_____no_output_____
###Markdown
> We'll go over models later that can catch the tail of this data better. What if we had categorical variables? Role of [Dummy Variables](https://en.wikipedia.org/wiki/Dummy_variable_(statistics))A way to incorporate categorical data into modeling, since models require numerical matrices. Essentially acts to change the intercept. Simple Example
###Code
dummy = pd.DataFrame([[2,4,1],
[3,6,1],
[4,8,1],
[6,12,1],
[7,14,1],
[2,6,0],
[4,10,0],
[6,14,0],
[7,16,0],
[3,8,0]], columns=['hgt', 'wgt', 'label'])
dummy
###Output
_____no_output_____
###Markdown
Persistent differences between these lines. Track parallel to one another.
###Code
class1 = dummy.query('label==1')
class2 = dummy.query('label==0')
plt.figure(figsize=(6,6))
plt.plot(class1['hgt'], class1['wgt'], 'bo')
plt.plot(class2['hgt'], class2['wgt'], 'ro')
plt.legend(['Label = 1', 'Label = 0'])
plt.show()
###Output
_____no_output_____
###Markdown
Let's look at the means for the data by group
###Code
dummy.groupby('label')['wgt'].mean().diff()[1]
###Output
_____no_output_____
###Markdown
Compare running a model with and without a dummy variable
###Code
from sklearn.linear_model import LinearRegression
Xa = np.array(dummy['hgt']).reshape(-1,1)
Xb = np.array(dummy[['hgt','label']])
y = np.array(dummy['wgt']).reshape(-1,1)
bothOLS = LinearRegression().fit(Xa,y)
yhat_both = bothOLS.predict(Xa)
sepOLS = LinearRegression().fit(Xb, y)
yhat_sep = sepOLS.predict(Xb)
print('No Dummy:\n')
print(f' Intercept: {np.round(bothOLS.intercept_[0],2)}')
print(f' Slope: {np.round(bothOLS.coef_[0][0],2)}\n')
print('w/ Dummy:\n')
print(f' Intercept: {np.round(sepOLS.intercept_[0], 2)}')
print(f' Slope: {np.round(sepOLS.coef_[0][0],2)}')
print(f' Dummy: {np.round(sepOLS.coef_[0][1], 0)}')
###Output
No Dummy:
Intercept: 1.0
Slope: 2.0
w/ Dummy:
Intercept: 2.0
Slope: 2.0
Dummy: -2.0
###Markdown
The dummy captures the mean difference! Otherwise the slope is identical.If the dummy was present:$$y=1.0 + (2)(x_1) + (2)(1) $$If the dummy was not present:$$y=2.0 + (2)(x_1) + (2)(0)$$ Incorporating Categorical Variables into a Model w/ Pipelines[Example from "Hands-on Machine Learning with Scikit-Learn, Keras & Tensorflow"](https://github.com/ageron/handson-ml2)This is an expanded, rawer form of the california housing data that is available in scikit-learn.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
housing = pd.read_csv('data/housing.csv')
housing.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 20640 entries, 0 to 20639
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 longitude 20640 non-null float64
1 latitude 20640 non-null float64
2 housing_median_age 20640 non-null int64
3 total_rooms 20640 non-null int64
4 total_bedrooms 20433 non-null float64
5 population 20640 non-null int64
6 households 20640 non-null int64
7 median_income 20640 non-null float64
8 ocean_proximity 20640 non-null object
9 median_house_value 20640 non-null int64
dtypes: float64(4), int64(5), object(1)
memory usage: 1.6+ MB
###Markdown
Goal - Predict Median House Value Things we need to consider:- Ocean Proximity is categorical. - Missing values in Total Bedrooms. - Data of significantly different scales.
###Code
housing.median_house_value.hist()
plt.title('Distribution of Median Home Values')
plt.show()
housing.hist(bins=50, figsize=(8,8))
plt.show()
###Output
_____no_output_____
###Markdown
Interesting items of note:- Wide variances in scales. - Median house value truncated at $500,000 - Outliers are present
###Code
housing.groupby('ocean_proximity')['median_house_value'].median().plot.barh()
plt.xlabel('Median of Median Home Value')
plt.ylabel('')
plt.title('Price differences by Ocean Proximity')
plt.show()
###Output
_____no_output_____
###Markdown
Inland homes have significantly lower prices than homes closer to the water.
###Code
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4, s=housing['population']/100,
label='population', figsize=(10,7), c='median_house_value', cmap=plt.get_cmap('jet'),
colorbar=True)
plt.legend('')
plt.show()
###Output
_____no_output_____
###Markdown
Higher values are largely clustered around the coast.[Example from "Hands-on Machine Learning with Scikit-Learn, Keras & TensorFlow](https://github.com/ageron/handson-ml2) Missing Values Options:- Drop rows (can be bad, what if incoming data has missing values?). - Drop columns (could be bad, what if there value in that feature?). - Fill in the missing values (ding ding). - If categorical, might want to add a dummy to indicate it was missing (ding ding). - Best strategy will be situationally dependent. This can be treated as a hyperparameter - no perfect answer. Strategies:- Simple inputers with median, median, mode, random values. - Estimate the missing value with another machine learning model (increasing overal complexity). >Not all strategies will work for all data types, so you may need to split it up, e.g., one method for the numerical variables and another for the categorical variables.
###Code
housing.isna().sum()
###Output
_____no_output_____
###Markdown
Filling with median
###Code
from sklearn.impute import SimpleImputer
example_imputer = SimpleImputer(strategy='median')
example_imputer.fit_transform(np.array(housing.total_bedrooms).reshape(-1,1))
example_imputer = pd.Series(example_imputer)
example_imputer.isna().sum()
###Output
_____no_output_____
###Markdown
If you wanted to add an indictor for the missing value> Probably more useful for categorical variables
###Code
from sklearn.impute import SimpleImputer
example_imputer = SimpleImputer(strategy='median', add_indicator=True)
example_imputer.fit_transform(np.array(housing.total_bedrooms).reshape(-1,1))
###Output
_____no_output_____
###Markdown
Note on One-Hot EncodingFrom "Hands-on Machine Learning with Scikit-Learn, Keras & TensorFlow":> If a categorical attribute has a large number of possible categories, then one-hot encoding will result in a large number of input features. This may slow down training and degrade performance. Possible alternatives in that situation:- Recode to a numerical feature, e.g., distance to ocean. - Only use the most frequent $N$ categories. - Convert to embeddings. A risk for us:May not have any islands in the training data, what would happen if we encountered that in our test/evaluation data?
###Code
housing.ocean_proximity.value_counts()
###Output
_____no_output_____
###Markdown
Islands are really rare- Adding a dummy for this won't do much - it'll basically be zero. - Replace to nearest category?
###Code
from sklearn.preprocessing import OneHotEncoder
example_ohe = OneHotEncoder()
example_ohe = example_ohe.fit_transform(np.array(housing['ocean_proximity']).reshape(-1,1))
example_ohe
###Output
_____no_output_____
###Markdown
> Sparse matrix returns lists of coordinates in the matrix with a non-zero. It's a more efficient structure:
###Code
print(example_ohe[:5,])
###Output
(0, 3) 1.0
(1, 3) 1.0
(2, 3) 1.0
(3, 3) 1.0
(4, 3) 1.0
###Markdown
>Can be converted back to a dense format:
###Code
example_ohe.toarray()[:5,:]
###Output
_____no_output_____
###Markdown
Should we use this for modeling?>In statistics, multicollinearity (also collinearity) is a phenomenon in which one predictor variable in a multiple regression model can be linearly predicted from the others with a substantial degree of accuracy. In this situation, the coefficient estimates of the multiple regression may change erratically in response to small changes in the model or the data. Multicollinearity does not reduce the predictive power or reliability of the model as a whole, at least within the sample data set; it only affects calculations regarding individual predictors. That is, a multivariate regression model with collinear predictors can indicate how well the entire bundle of predictors predicts the outcome variable, but it may not give valid results about any individual predictor, or about which predictors are redundant with respect to others.[Wikipedia](https://en.wikipedia.org/wiki/Multicollinearity) Could argue that this could be represented as an ordinal variable (island > near ocean > near bay > ...)See [OrdinalEncoder for as example](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)You could try this using both methods to see if one is better. A Brief Rant on LabelEncoderShould not be used on your feature set! This is commonly done on Kaggle (incorrectly!). Do not use this for your feature processing Scaling Numerical VariablesDon't skip this - most models don't perform well when variables are on different scales.
###Code
housing.select_dtypes(['float','integer']).describe().round(0)
###Output
_____no_output_____
###Markdown
Two main methods:> Only fit these to the training data, no leaking information from the test set! Min-max scaling- Simple - Bound between 0 and 1 - a lot of algorithms like that, especially neural networks- scikit-learn gives you some additional flexibility in terms of the range - Very susceptible to outliers$$x_{scaled} = \frac{x - x_{min}}{x_{max}-x_{min}}$$ Standardization- Little more involved. - More robust to outliers. - No specific range boundary. $$x_{scaled} = \frac{x - \hat{x}}{\sigma_{x}}$$ Since we are doing regression and don't have a scaling boundary requirement and there are probably outliers, we'll use standardization.
###Code
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
example_SS = StandardScaler()
example_SS = example_SS.fit_transform(np.array(housing.total_rooms).reshape(-1,1))
plt.hist(housing.total_rooms, bins=100)
plt.title('Total Rooms', loc='left')
plt.show()
plt.hist(example_SS, bins=100)
plt.title('Total Rooms - Standardized', loc='left')
plt.show()
###Output
_____no_output_____
###Markdown
Training/Test Splits>Put the test data aside and never look at it again.- All of the feature transformations and model training should be on the training data. - In production, you wouldn't exactly know what the incoming data would look like ahead of time. - If you use the test data to inform **any** of the feature transformations or modeling, then you are letting that test data leak into the training data and that will (may) bias your evaluations. This is called **leakage** or **data snooping** - both are not good. Simpliest form is splitting your data into two parts:- Training: will base feature transforms and modeling on this. - Test: evaluate the models on this data. >There are more robust methods that we'll talk about later. You can think of this simple splitting as a quick and dirty way to evaluate performance, but it isn't a methodology you'd want to use to estimate what your performance is truly likely to be. Split off the features and the target variable
###Code
y = housing.median_house_value
features = ['housing_median_age', 'total_rooms', 'total_bedrooms',
'population', 'households', 'median_income', 'ocean_proximity', 'longitude', 'latitude'
]
X = housing[features]
X.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 20640 entries, 0 to 20639
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 housing_median_age 20640 non-null int64
1 total_rooms 20640 non-null int64
2 total_bedrooms 20433 non-null float64
3 population 20640 non-null int64
4 households 20640 non-null int64
5 median_income 20640 non-null float64
6 ocean_proximity 20640 non-null object
7 longitude 20640 non-null float64
8 latitude 20640 non-null float64
dtypes: float64(4), int64(4), object(1)
memory usage: 1.4+ MB
###Markdown
Split into training and test sets>80/20 split is pretty standard, but not universal. For very large datasets, I've heard of 99/1 splits.
###Code
from sklearn.model_selection import train_test_split
X_training, X_test, y_training, y_test = train_test_split(X, y, test_size=0.20)
print(f'Training samples: {X_training.shape[0]:,}')
print(f'Test samples: {X_test.shape[0]:,}')
###Output
Training samples: 16,512
Test samples: 4,128
###Markdown
>Remember, the test data is only for evaluation.
###Code
X_training.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 16512 entries, 9505 to 15571
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 housing_median_age 16512 non-null int64
1 total_rooms 16512 non-null int64
2 total_bedrooms 16354 non-null float64
3 population 16512 non-null int64
4 households 16512 non-null int64
5 median_income 16512 non-null float64
6 ocean_proximity 16512 non-null object
7 longitude 16512 non-null float64
8 latitude 16512 non-null float64
dtypes: float64(4), int64(4), object(1)
memory usage: 1.3+ MB
###Markdown
Pipelines- Fill missing values. - Create dummies for categorical. - Standardize numerical variables. - Fit the model. Pipelines can be made of collections of pipelines
###Code
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
cat_vars = ['ocean_proximity']
num_vars = ['housing_median_age', 'total_rooms', 'total_bedrooms', 'population',
'households', 'median_income', 'longitude', 'latitude']
num_pipeline = Pipeline([('impute_missing', SimpleImputer(strategy='median')),
('standardize_num', StandardScaler())
])
cat_pipeline = Pipeline([('impute_missing_cats', SimpleImputer(strategy='most_frequent')),
('create_dummies_cats', OneHotEncoder(handle_unknown='ignore', drop='first'))])
processing_pipeline = ColumnTransformer(transformers=[('proc_numeric', num_pipeline, num_vars),
('create_dummies', cat_pipeline, cat_vars)])
print(processing_pipeline)
from sklearn.linear_model import LinearRegression
modeling_pipeline = Pipeline([('data_processing', processing_pipeline), ('lm', LinearRegression())])
modeling_pipeline.fit(X_training, y_training)
###Output
_____no_output_____
###Markdown
Evaluating the model>Really evaluatign the entire preprocessing process and the model itself.
###Code
housing_predictions = modeling_pipeline.predict(X_test)
###Output
_____no_output_____
###Markdown
Get the mean squared error, root mean squared error, and $R^2$
###Code
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(y_test, housing_predictions)
mse
rmse = np.sqrt(mse)
rmse
from sklearn.metrics import r2_score
r2 = r2_score(y_test, housing_predictions)
r2
###Output
_____no_output_____
###Markdown
Plot the test and predictions
###Code
import matplotlib.pyplot as plt
plt.plot(y_test, housing_predictions, 'ro')
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
###Output
_____no_output_____
###Markdown
Observations- If this was a perfect model, the residuals would be 0 for all actual/predicted values. - Residuals should look like white noise across all values - seeing some patterns. - Some information is leaking into the residuals that the model is capturing. - Could be a feature we don't have access to. - Could be noise in the data. - Could be the underlying relationships are linear. - Insert any number of additional explanations. There may be some overfitting - the training data fits better than the test data. We can explore other models to see if they are able to reduce the overfitting. Bias-variance Tradeoff[Image source](http://scott.fortmann-roe.com/docs/BiasVariance.html)>At its root, dealing with bias and variance is really about dealing with **over- and under-fitting**. Bias is reduced and variance is increased in relation to model complexity. As more and more parameters are added to a model, the complexity of the model rises and variance becomes our primary concern while bias steadily fallsUnderstanding the Bias-Variance Tradeoff, Fortmann-RoeFrom Raschka (paraphrased):>Variance measures the consistency (or variability) of the model prediction. If we retrain the model on different subsets of the training get and observe difference results, we say it is subject to high variance.>Bias measures how far off the predictions are from the correct values. This will be error that isn't due to differences in the training datasets.[Bias and variance from Raschka's Evaluation Lecture Notes](https://sebastianraschka.com/pdf/lecture-notes/stat479fs18/08_eval-intro_notes.pdf) Simple Usually Triumphs Over the ComplexIt's a balancing act though. You'll need a minimum level of complexity to capture the relationships in the data.[Image source](http://scott.fortmann-roe.com/docs/BiasVariance.html) Potential Options- Include less features. - Shrinkage methods. - Data over models. Low Variance, Forward, and Backward Selection [Low Variance](https://scikit-learn.org/stable/modules/feature_selection.html)>You can automatically omit features with zero-variance (i.e., constants).
###Code
from sklearn.feature_selection import VarianceThreshold
X = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1], [0, 1, 0], [0, 1, 1]])
print(X)
sel = VarianceThreshold(threshold=(.8 * (1 - .8)))
sel.fit_transform(X)
###Output
_____no_output_____
###Markdown
[Forward or Backward Selection](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SequentialFeatureSelector.htmlsklearn.feature_selection.SequentialFeatureSelector)From scikit-learn:>Forward-SFS is a greedy procedure that iteratively finds the best new feature to add to the set of selected features. Concretely, we initially start with zero feature and find the one feature that maximizes a cross-validated score when an estimator is trained on this single feature. Once that first feature is selected, we repeat the procedure by adding a new feature to the set of selected features. The procedure stops when the desired number of selected features is reached, as determined by the n_features_to_select parameter.>Backward-SFS follows the same idea but works in the opposite direction: instead of starting with no feature and greedily adding features, we start with all the features and greedily remove features from the set. The direction parameter controls whether forward or backward SFS is used.In general, forward and backward selection do not yield equivalent results. Also, one may be much faster than the other depending on the requested number of selected features: if we have 10 features and ask for 7 selected features, forward selection would need to perform 7 iterations while backward selection would only need to perform 3.>SFS differs from RFE and SelectFromModel in that it does not require the underlying model to expose a coef_ or feature_importances_ attribute. It may however be slower considering that more models need to be evaluated, compared to the other approaches. For example in backward selection, the iteration going from m features to m - 1 features using k-fold cross-validation requires fitting m * k models, while RFE would require only a single fit, and SelectFromModel always just does a single fit and requires no iterations.[See a *Comparative Study of Techniques for Large-Scale Feature Selection* for more discussion.](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.24.4369&rep=rep1&type=pdf) Basic Idea- Start with all the features. - Determine the feature provided the least added benefit. - Remove the above feature. - Continue until reach the desired number of features or hit a threshold. Rational> Automatically select the most relevant subset of features. > Really only necessary if your models don't support regularization.> Can help with the *Curse of Dimensionality* since it'll generally select a more parsimonious model with dense features.From Machine Learning with Python, SBS (backward) showed a model with 3 features would have achieved 100% accuracy on the validation data set. See pages 137-139.[Image source - Raschka's GitHub; Python for Machine Learning 3rd Edition, Figure 4.8](https://github.com/rasbt/python-machine-learning-book-3rd-edition/blob/master/ch04/images/04_08.png)- I've never used these in practice - there are other ways to guard against overfitting and selecting a more parsimonious model. - Can add a lot of computational overhead. [See an example from scikit-learn for code example.](https://scikit-learn.org/stable/auto_examples/feature_selection/plot_select_from_model_diabetes.htmlsphx-glr-auto-examples-feature-selection-plot-select-from-model-diabetes-py) Regularization>Helps solve overfitting (high variance) - having too many parameters (i.e., too complex). >Also helps with multicolinearity (which we saw) and filtering out noise. Types of regularization:- $L1$ (lasso - least absolute shrinkage and selection operator): penalizing the sum of $\lvert \beta_{j} \rvert$ - $L2$ (ridge): penalizing the sum of the $\beta_{j}$'s Shrinkage with Ridge Regression ($l_2$ Regularization) We can modify our loss function to penalize complexityShrinking parameter values to penalize for increased complexity (i.e., parameters with meaningful weights).>Shrinks, but does not eliminate. Coefficients will be non-zero. Will that help when there any many many features? Reduces the influence of parameters that carry less weightRecall $\hat\beta = (X^TX)^{-1}X^{T}y$ Goal: $\frac{1}{N}\sum{\epsilon_{i}^2}$ We can force parameters to shrink towards zero, effectively reducing their influence by adding a penality to the loss function:$argmin\frac{1}{N}\sum{\epsilon_{i}^2} + \sum{\lambda\beta_{j}^2}$- What happens with $\lambda=0$?New closed form: $\hat\beta = (X^TX+\lambda{I})^{-1}X^{T}y$ > Ridge has a unique solution, similar to OLS. You can modify the closed form implementation from last class as an independent proof. scikit-learn's implementation uses various optimization methods to solve the loss optimization problem, so it won't be 100% comparable to OLS with $\alpha=0$, it'll be close though. Introduces $\lambda$ - our first (official) hyperparameter!$\lambda$ controls the amount of the penality, it is bounded between 0 and $\infty$.>When $\lambda=0$, ridge will provide the same coefficients as OLS, since the $\lambda{I}$ will become zero.
###Code
from sklearn.linear_model import Ridge
modeling_pipeline_ridge = Pipeline([('data_processing', processing_pipeline), ('ridge', Ridge(alpha=0))])
modeling_pipeline_ridge.fit(X_training, y_training)
modeling_pipeline_ridge['ridge'].coef_
###Output
_____no_output_____
###Markdown
> Compared to the below from the original model:
###Code
modeling_pipeline['lm'].coef_
###Output
_____no_output_____
###Markdown
Now to evaluate different $\lambda$ values>We'll need to evaluate a gradient of $\lambda$ values to determine the best one to use.
###Code
from collections import defaultdict
alphas = [0, 1, 2, 5, 10, 50]
ridge_results = defaultdict(dict)
for alph in alphas:
modeling_pipeline_ridge = Pipeline([('data_processing', processing_pipeline), ('ridge', Ridge(alpha=alph))])
modeling_pipeline_ridge.fit(X_training, y_training)
ridge_results['coefficients'][alph] = modeling_pipeline_ridge['ridge'].coef_
ridge_results['training score'][alph] = modeling_pipeline_ridge.score(X_training, y_training)
ridge_results['test score'][alph] = modeling_pipeline_ridge.score(X_test, y_test)
print('Done')
coefficients_ridge = pd.DataFrame.from_dict(ridge_results['coefficients'])
coefficients_ridge = coefficients_ridge.reset_index()
coefficients_ridge = coefficients_ridge.rename(columns={'index':'coefficient_nbr'})
coefficients_ridge = coefficients_ridge.melt(id_vars='coefficient_nbr', var_name='alpha', value_name='coefficient')
(
coefficients_ridge.pivot_table(index='alpha', columns='coefficient_nbr', values='coefficient')
.plot(figsize=(8,4),legend=False)
)
plt.title('Ridge Coefficients', loc='left')
plt.xlabel('Alpha (Regularization Amount)')
plt.ylabel('Coefficient')
plt.show()
###Output
_____no_output_____
###Markdown
Changes in $R^2$
###Code
ridge_training_r2 = pd.Series(ridge_results['training score'])
ridge_test_r2 = pd.Series(ridge_results['test score'])
ridge_training_r2.plot()
ridge_test_r2.plot()
plt.title('$R^2$ for Ridge Regression')
plt.legend(['Training','Test'])
plt.xlabel('Alpha (Regularization Level)')
plt.ylabel('Percent of Variance Explained')
plt.ylim(0.4, 1)
plt.show()
###Output
_____no_output_____
###Markdown
Another Option is Lasso - Requires Gradient Descent (GD)OLS and Ridge regression have unique solutions (even though scikit-learn uses optimization). In order to talk about some of the other variants, we need to talk about an optimization technique called gradient descent.Gradient descent is an optimization technique that allows us to "learn" what the coefficients should be by iteration and continuous improvment. Traditional statisticans don't love this technique since it's not too far away from guessing a bunch of times until you can't guess much better. [Image source](https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.datasciencecentral.com%2Fprofiles%2Fblogs%2Falternatives-to-the-gradient-descent-algorithm&psig=AOvVaw1ki8gWYTrWRy-NKpu7RFgo&ust=1631036623607000&source=images&cd=vfe&ved=0CAsQjRxqFwoTCJiSq6nz6vICFQAAAAAdAAAAABAD)Gradient descent essentially is looking for the local minimums of loss functions that are differentiable. While least-squares does not a closed-form solution, you can also approximate it using gradient descent. Gradient descent will reappear with other algorithms.GD requires a loss function, which for OLS regression is the sum of squared errors:$$J(w)=\frac{1}{2}\sum(y^{(i)} - \hat{y}^{(i)})^2$$This also be used in logistic regression and neural networks. Updating weightsAll of the weights are set simultaneously.1. Initialize weights to 0 or small random numbers. 2. For each training example, $x^{i}$: a. Compute the output value, $\hat{y}$. b. Update the weights.
###Code
def gradientDescent(x, y, theta, alpha, m, numIterations):
thetaHistory = list()
xTrans = x.transpose()
costList = list()
for i in range(0, numIterations):
# data x feature weights = y_hat
hypothesis = np.dot(x, theta)
# how far we are off
loss = hypothesis - y
# mse
cost = np.sum(loss ** 2) / (2 * m)
costList.append(cost)
# avg gradient per example
gradient = np.dot(xTrans, loss) / m
# update
theta = theta - alpha * gradient
thetaHistory.append(theta)
return thetaHistory, costList
###Output
_____no_output_____
###Markdown
Create training data
###Code
data_pipeline = Pipeline([('data_processing', processing_pipeline)])
data_pipeline.fit(X_training)
gs_training_data = data_pipeline.fit_transform(X_training)
###Output
_____no_output_____
###Markdown
Run the model
###Code
import datetime
start_ts = datetime.datetime.now()
betaHistory, costList = gradientDescent(gs_training_data,y_training,
theta=np.zeros(gs_training_data.shape[1]),
alpha=0.01,
m=gs_training_data.shape[0], numIterations=5000)
end_ts = datetime.datetime.now()
print(f'Completed in {end_ts-start_ts}')
plt.plot(costList)
plt.title(f'Final cost: {costList[-1]:,.2f}', loc='left')
plt.show()
###Output
Completed in 0:00:02.319182
###Markdown
Show changes in $\beta$ throughout the iterations
###Code
from collections import defaultdict
thetas = defaultdict(list)
for i in range(len(betaHistory)):
for j in range(len(betaHistory[i])):
thetas[j].append(betaHistory[i][j])
thetasD = pd.DataFrame.from_dict(thetas)
thetasD.plot(legend=False)
plt.title('Beta Estimates')
plt.ylabel('Coefficient')
plt.xlabel('Iteration')
plt.show()
###Output
_____no_output_____
###Markdown
Predictions
###Code
gs_betas = betaHistory[4999]
gs_predictions = np.dot(gs_training_data, gs_betas)
plt.plot(y_training, gs_predictions, 'bo', alpha=0.4)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('Gradient Descent Regression Fit on Training Data')
plt.show()
###Output
_____no_output_____
###Markdown
Where things can go wrong:- The learning rate (alpha generally) is really important. If you pick a rate that is too large, you may hop over the minimum and the models will either be very poor or never converge. - Other thresholds may take some trial and error. - You can get stuck at a local minima and never find the local maxima. - You need to know when to stop. It'll keep adjusting coefficients until it reaches an iteration limit or a derivative threshold. Too long and overfitting could occur. Lasso Regression ($l_1$ Regularization)No closed form solution - will need to use optimization techniques regardless. $$J(w)_{lasso}=\sum{(y^{(i)}-\hat{y}^{(i)})^2+\lambda \lvert \lvert w \rvert \rvert_1}$$$$L1:\lambda \lvert \lvert w \rvert \rvert_1 = \lambda \sum{\lvert w_j \rvert}$$Could modify the gradient descent with the above, but we'll let sci-kit learn handle it. Differences from Ridge[Introduction to Statistical Learning, Figure 3.11](https://www.statlearning.com)>Estimation picture for the lasso (left) and ridge regression (right). Shown are contours of the error and constraint functions. The solid blue areas are the constraint regions $\lvert \beta_1 \rvert \leq t$ and $\beta_{1}^2 + \beta_{2}^2 \leq t^2$, respectivity, while the red ellipses are the contours of the least squares error function. Explanation from Raschka (Python Machine Learning 3rd Edition, Chapter 4, pages 129-131):> We can think of regularization as adding a penalty term to the cost function to encourage smaller weighs; in other words, we penalize large weights. Thus, by increasing the regularization strength via the regularization parameter, we shrink the weights toward zero and decrease the dependence of our model on the training data.> The shaded regions of represent the regularization "budget" - the combination of the weights cannot exceed those limits. As the regularization term increases, so does the area of that shaded region.[See Elements of Statistical Learning Section 3.4 for a more thorough discussion.](https://web.stanford.edu/~hastie/ElemStatLearn/) TL:DR - Lasso can create sparse models, Ridge cannot.Ridge will have non-zero estimates for its $\beta$ values, and lasso can result in some $\beta$ values equal to zero (i.e., sparse).- Lasso should provide better protection to overfitting than Ridge and OLS. - Can also be a technique by itself for feature selection.
###Code
from sklearn.linear_model import Lasso
from collections import defaultdict
alphas = [1, 2, 5, 10, 50]
lasso_results = defaultdict(dict)
for alph in alphas:
modeling_pipeline_lasso = Pipeline([('data_processing', processing_pipeline), ('lasso', Lasso(alpha=alph))])
modeling_pipeline_lasso.fit(X_training, y_training)
lasso_results['coefficients'][alph] = modeling_pipeline_lasso['lasso'].coef_
lasso_results['training score'][alph] = modeling_pipeline_lasso.score(X_training, y_training)
lasso_results['test score'][alph] = modeling_pipeline_lasso.score(X_test, y_test)
coefficients_lasso = pd.DataFrame.from_dict(lasso_results['coefficients'])
coefficients_lasso = coefficients_lasso.reset_index()
coefficients_lasso = coefficients_lasso.rename(columns={'index':'coefficient_nbr'})
coefficients_lasso = coefficients_lasso.melt(id_vars='coefficient_nbr', var_name='alpha', value_name='coefficient')
coefficients_lasso.pivot_table(index='alpha', columns='coefficient_nbr', values='coefficient').plot(figsize=(8,4))
plt.title('Lasso Coefficients', loc='left')
plt.xlabel('Alpha (Regularization Amount)')
plt.ylabel('Coefficient')
plt.legend('')
plt.show()
coefficients_ridge.query('coefficient_nbr == 9')
coefficients_lasso.query('coefficient_nbr == 9')
###Output
_____no_output_____ |
VNN/notebooks/network_experiments/sensorless/test3.ipynb | ###Markdown
S(X,X,X)
###Code
model_fun = lambda: get_scalar_model(dataset_shapes, hidden_layer_units=shapes['S'], activation='relu', output_activation=None, \
kernel_initializer='random_normal', bias_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 443.94009 sec
Last measures: [0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094586968422, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090987592935562, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198]
Loss history average: [0.0859451 0.09083878 0.0908445 0.09084531 0.09084549]
Measure history average: [0.09090947 0.09090947 0.09090948 0.09090948 0.09090948]
Measure history worst: [0.09090969 0.09090964 0.09090971 0.09090971 0.09090972]
Measure history best: [0.09090943 0.09090943 0.09090943 0.09090943 0.09090943]
###Markdown
V1(X):U(2)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V2'], inner_hidden_layer_units=(2,), \
activation='relu', output_activation=None, \
weight_type="unique", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 502.31294 sec
Last measures: [0.09090720862150192, 0.0909094288945198, 0.0909094288945198, 0.0909094586968422, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094512462616, 0.0909094288945198, 0.0909094288945198, 0.09090599417686462, 0.0909094288945198, 0.0909094512462616, 0.0909094288945198, 0.0909094288945198, 0.09090134501457214, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198]
Loss history average: [0.08916436 0.08785033 0.0879908 0.08823442 0.08868072]
Measure history average: [0.09090899 0.09090937 0.09090939 0.09090941 0.09090942]
Measure history worst: [0.09090944 0.09090943 0.09090944 0.09090943 0.09090943]
Measure history best: [0.09090781 0.0909092 0.09090927 0.09090929 0.09090934]
###Markdown
V1(X):S(2)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V2'], inner_hidden_layer_units=(2,), \
activation='relu', output_activation=None, \
weight_type="shared", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 472.77850 sec
Last measures: [0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090794622898102, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090530127286911, 0.0909094363451004, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090809524059296, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198]
Loss history average: [0.08955504 0.0890771 0.08916495 0.08933082 0.08932989]
Measure history average: [0.0909092 0.09090942 0.09090943 0.09090943 0.09090943]
Measure history worst: [0.09090943 0.09090943 0.09090943 0.09090943 0.09090943]
Measure history best: [0.09090861 0.09090939 0.09090943 0.09090943 0.09090943]
###Markdown
V1(X):U(3)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V3'], inner_hidden_layer_units=(3,), \
activation='relu', output_activation=None, \
weight_type="unique", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 259.89548 sec
Last measures: [0.090904101729393, 0.0909094288945198, 0.0909094512462616, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090012311935425, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094586968422, 0.0909094288945198, 0.09090796858072281, 0.0909094288945198, 0.0909094288945198, 0.0909094512462616, 0.0909094288945198]
Loss history average: [0.0899984 0.08931655 0.08935949 0.08949251 0.08962395]
Measure history average: [0.09090896 0.09090933 0.09090942 0.09090912 0.09090934]
Measure history worst: [0.09090947 0.09090944 0.09090944 0.09090944 0.09090943]
Measure history best: [0.09090783 0.09090898 0.09090932 0.09090757 0.09090883]
###Markdown
V1(X):S(3)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V3'], inner_hidden_layer_units=(3,), \
activation='relu', output_activation=None, \
weight_type="shared", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 311.62306 sec
Last measures: [0.0909094288945198, 0.0909152403473854, 0.0909094288945198, 0.09090948104858398, 0.09090189635753632, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909087210893631, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09091003239154816, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198]
Loss history average: [0.08938964 0.08878635 0.08888237 0.08911528 0.08929572]
Measure history average: [0.09090979 0.09090969 0.09090973 0.09090978 0.09090957]
Measure history worst: [0.09091066 0.09091071 0.09091106 0.09091137 0.09091014]
Measure history best: [0.09090943 0.09090929 0.09090943 0.09090943 0.09090943]
###Markdown
V1(X):U(4)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V4'], inner_hidden_layer_units=(4,), \
activation='relu', output_activation=None, \
weight_type="unique", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 260.31439 sec
Last measures: [0.09090554714202881, 0.0909094288945198, 0.09090948849916458, 0.09090947359800339, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09125572443008423, 0.09091045707464218, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090398252010345, 0.0909094288945198, 0.0909094363451004, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094363451004, 0.09090985357761383, 0.0909094288945198, 0.09090333431959152, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198]
Loss history average: [0.08938623 0.08813142 0.08821785 0.08824628 0.08838546]
Measure history average: [0.09092853 0.09092386 0.09092256 0.09092175 0.09092106]
Measure history worst: [0.0910269 0.09099605 0.09098806 0.09098308 0.09097889]
Measure history best: [0.09090825 0.09090925 0.09090934 0.09090946 0.09090946]
###Markdown
V1(X):S(4)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V4'], inner_hidden_layer_units=(4,), \
activation='relu', output_activation=None, \
weight_type="shared", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 308.20358 sec
Last measures: [0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094586968422, 0.0909094288945198, 0.0909094363451004, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094363451004, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.090909443795681, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198]
Loss history average: [0.08926986 0.08909271 0.08942631 0.08960705 0.08950872]
Measure history average: [0.09090944 0.09090944 0.09090946 0.09090947 0.09090954]
Measure history worst: [0.09090945 0.09090944 0.09090961 0.09090968 0.09090969]
Measure history best: [0.09090943 0.09090943 0.09090943 0.09090943 0.09090943]
###Markdown
V1(X):U(5)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V5'], inner_hidden_layer_units=(5,), \
activation='relu', output_activation=None, \
weight_type="unique", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 261.20305 sec
Last measures: [0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09126327931880951, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094363451004, 0.0909094288945198, 0.09090985357761383, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09088828414678574, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090162813663483, 0.0909094288945198, 0.0909094288945198, 0.09090948849916458, 0.0909094288945198]
Loss history average: [0.0900649 0.08952187 0.08949224 0.08921378 0.08955544]
Measure history average: [0.09092028 0.09092188 0.0909219 0.09403648 0.09093333]
Measure history worst: [0.0909802 0.09098621 0.09098509 0.09098496 0.09098478]
Measure history best: [0.0909052 0.09090745 0.0909084 0.09090919 0.09090944]
###Markdown
V1(X):S(5)
###Code
model_fun = lambda: get_vector_model(dataset_shapes, fractal_depth=1, hidden_layer_units=shapes['V5'], inner_hidden_layer_units=(5,), \
activation='relu', output_activation=None, \
weight_type="shared", weight_initializer='random_normal', \
optimizer=keras.optimizers.Adam(), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanSquaredError()])
validate_model_multiple(model_fun, datasets_generator_fun, epochs=epochs, num_tries=num_tries, \
loss_name="mean_squared_error", measure_name="val_mean_squared_error",
print_data=True)
###Output
Average elapsed k-fold validation time: 309.48448 sec
Last measures: [0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090474247932434, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909099206328392, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09090874344110489, 0.14179177582263947, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.0909094288945198, 0.09063983708620071, 0.0909094288945198, 0.0909094288945198]
Loss history average: [0.08787237 0.08848708 0.08937286 0.08940356 0.08957561]
Measure history average: [0.09689058 0.09362292 0.09344053 0.09587962 0.09563675]
Measure history worst: [0.10923585 0.1072164 0.10612001 0.10369376 0.1010859 ]
Measure history best: [0.09085551 0.090883 0.09088518 0.09089922 0.09089862]
|
example/sample_usage.ipynb | ###Markdown
Example Worksheet This worksheet was created to show function usability and test cases passes.
###Code
import matplotlib.pyplot as plt
from colourblind8.colourblind8 import Colourblind8
import numpy as np
###Output
_____no_output_____
###Markdown
Testing the function `plot_lines()`
###Code
#Here is the test data set
x=[1,2,4,5]
y_list=[[1,2,3.3,2.2],[2,3,3.5,4],[3,3.6,4.3,4],[4,4.2,5,5.6],[5,5.5,5.2,6.5],[6,7.2,6.1,6.9],[7,8,7.4,8],[8,9,7.8,8.2],[9,9.3,9.6,9.2]]
###Output
_____no_output_____
###Markdown
Testing with the colour palette `deutera`
###Code
cb = Colourblind8()
cb.plot_lines(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'deutera',
title = "Deutera Line Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" );
###Output
_____no_output_____
###Markdown
Testing with the colour palette `prota`
###Code
cb = Colourblind8()
cb.plot_lines(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'prota',
title = "Prota Line Example",
x_lab = "X label" , y_lab = "Y label" ,
legend_title = "Legend" );
###Output
_____no_output_____
###Markdown
Testing with the colour palette `trita`
###Code
cb = Colourblind8()
cb.plot_lines(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita', title = "Trita Line Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" );
###Output
_____no_output_____
###Markdown
Are they passing our tests?
###Code
def test_input(x,y ,alpha, labels, palette, title, x_lab, y_lab, legend_title):
'''tests input parameters are correct types and in correct range '''
assert type(x) == list
assert type(y) == list
assert type(labels) == list
assert type(palette) == str
assert type(title) == str
assert type(x_lab) == str
assert type(y_lab) == str
assert type(legend_title) == str
assert type(alpha) == float
assert alpha <= 1.0
assert alpha >= 0.0
assert len(y) == len(labels)
test_input(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita', title = "Trita Line Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend")
def test_num_lines():
'''A function that checks that the functions returns the correct number of
lines given an input.
'''
line_plot =cb.plot_lines(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita', title = "Trita Line Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" )
num_lines = line_plot.get_lines()
assert len(num_lines) == 9
def test_labels():
'''A function that checks that the functions returns the correct labels'''
line_plot = cb.plot_lines(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita', title = "Trita Line Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" )
assert line_plot.get_xlabel() == "X label"
assert line_plot.get_ylabel() == "Y label"
assert line_plot.get_title() == "Trita Line Example"
def test_legend():
'''A function that checks that the legend assignment inside the function works'''
line_plot = cb.plot_lines(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita', title = "Trita Line Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend")
class_legend = str(type(line_plot.get_legend()))
assert class_legend == "<class 'matplotlib.legend.Legend'>"
test_num_lines()
test_labels()
test_legend
###Output
_____no_output_____
###Markdown
YES! Testing the function `plot_scatter()`
###Code
#Here is the test data set
N = 10
x = [1,2,3,4,5,6,7,8,9,10]
y_1 = np.random.rand(N)
y_2 = np.random.rand(N)
y_3 = np.random.rand(N)
y_4 = np.random.rand(N)
y_5 = np.random.rand(N)
y_6 = np.random.rand(N)
y_7 = np.random.rand(N)
y_8 = np.random.rand(N)
y_9 = np.random.rand(N)
y_list= []
for i in range(9):
y = np.random.rand(N)
y_list.append(y)
###Output
_____no_output_____
###Markdown
Testing with the colour palette `deutera`
###Code
cb = Colourblind8()
cb.plot_scatter(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'deutera',
title = "Deutera scatterplot Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" );
###Output
_____no_output_____
###Markdown
Testing with the colour palette `prota`
###Code
cb = Colourblind8()
cb.plot_scatter(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'prota',
title = "Prota scatterplot Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" );
###Output
_____no_output_____
###Markdown
Testing with the colour palette `trita`
###Code
cb = Colourblind8()
cb.plot_scatter(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita',
title = "Trita scatterplot Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" );
###Output
_____no_output_____
###Markdown
Are they passing our tests?
###Code
def test_input(x,y ,alpha, labels, palette, title, x_lab, y_lab, legend_title):
'''tests input parameters are correct types and in correct range '''
assert type(x) == list
assert type(y) == list
assert type(labels) == list
assert type(palette) == str
assert type(title) == str
assert type(x_lab) == str
assert type(y_lab) == str
assert type(legend_title) == str
assert type(alpha) == float
assert alpha <= 1.0
assert alpha >= 0.0
assert len(y) == len(labels)
test_input(x=x,y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita',
title = "Trita scatterplot Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend" )
def test_num_geoms():
'''A function that checks that the functions returns the correct number of
geom objects given an input.
'''
line_plot = cb.plot_scatter(x=x,
y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita',
title = "Trita scatterplot Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend")
num_lines = line_plot.get_children()
assert len(num_lines) == 20
def test_labels():
'''A function that checks that the functions returns the correct labels'''
line_plot = cb.plot_scatter(x=x,y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita',
title = "Trita scatterplot Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend")
assert line_plot.get_xlabel() == 'X label'
assert line_plot.get_ylabel() == 'Y label'
assert line_plot.get_title() == 'Trita scatterplot Example'
def test_legend():
'''A function that checks that the legend assignment inside the function works'''
line_plot = cb.plot_scatter(x=x,y=y_list,
alpha =1.0,
labels =['a','b','c','d','e','f','g','h','i'],
palette = 'trita',
title = "Trita scatterplot Example",
x_lab = "X label" ,
y_lab = "Y label" ,
legend_title = "Legend")
class_legend = str(type(line_plot.get_legend()))
assert class_legend == "<class 'matplotlib.legend.Legend'>"
test_num_geoms()
test_labels()
test_legend()
###Output
_____no_output_____
###Markdown
YES! Testing the function `plot_histogram()`
###Code
# Test dataset
cb = Colourblind8()
N = 100
x = np.random.rand(N)
y = np.random.rand(N)
z = np.random.rand(N)
list_y = [x,y,z]
###Output
_____no_output_____
###Markdown
Testing with the colour palette `deutera`
###Code
cb.plot_histogram(y = list_y,
palette='deutera',
x_lab=' X Label',
title = 'Deutera Histogram Example', alpha = 0.5, bins =10,
labels=['c', 'b', "c"],
legend_title="legend");
###Output
_____no_output_____
###Markdown
Testing with the colour palette `prota`
###Code
cb.plot_histogram(y = list_y,
palette='prota',
x_lab='X Label',
title = 'Prota Histogram Example', alpha = 0.5, bins =10,
labels=['c', 'b', "c"],
legend_title="legend");
###Output
_____no_output_____
###Markdown
Testing with the colour palette `trita`
###Code
cb.plot_histogram(y = list_y,
palette='trita',
x_lab='X Label',
title = 'Trita Histogram Example', alpha = 0.5, bins =10,
labels=['c', 'b', "c"],
legend_title="legend");
###Output
_____no_output_____
###Markdown
Are they passing our tests?
###Code
def test_input(y,alpha,bins, labels, palette, title, x_lab, legend_title):
'''tests input parameters are correct types and in correct range '''
assert type(y) == list
assert type(labels) == list
assert type(bins) == int
assert type(palette) == str
assert type(title) == str
assert type(x_lab) == str
assert type(legend_title) == str
assert type(alpha) == float
assert alpha <= 1.0
assert alpha >= 0.0
assert len(y) == len(labels)
assert bins > 0
test_input(y = list_y,
palette='trita',
x_lab='X Label',
title = 'Trita Histogram Example', alpha = 0.5, bins =10,
labels=['c', 'b', "c"],
legend_title="legend")
def test_num_geoms():
'''A function that checks that the functions returns the correct number of
geom objects given an input.
'''
hist_plot = cb.plot_histogram(y = list_y,
palette='trita',
x_lab='X Label',
title = 'Trita Histogram Example', alpha = 0.5, bins =10,
labels=['c', 'b', "c"],
legend_title="legend")
num_geoms = hist_plot.get_children()
assert len(num_geoms) == 41
def test_labels():
'''A function that checks that the functions returns the correct labels'''
hist_plot = cb.plot_histogram(y = list_y,
palette='trita',
x_lab='X Label',
title = 'Trita Histogram Example', alpha = 0.5, bins =10,
labels=['c', 'b', "c"],
legend_title="legend")
assert hist_plot.get_xlabel() == 'X Label'
assert hist_plot.get_ylabel() == 'Frequency'
assert hist_plot.get_title() == 'Trita Histogram Example'
def test_legend():
'''A function that checks that the legend assignment inside the function works'''
hist_plot = cb.plot_histogram(y = list_y,
palette='trita',
x_lab='X Label',
title = 'Trita Histogram Example', alpha = 0.5, bins =10,
labels=['c', 'b', "c"],
legend_title="legend")
class_legend = str(type(hist_plot.get_legend()))
assert class_legend == "<class 'matplotlib.legend.Legend'>"
test_num_geoms()
test_labels()
test_legend()
###Output
_____no_output_____ |
tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb | ###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'r1.7.0'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include a detailed process of getting ASR results or diarization results, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/offline_diarization_with_asr.py). Speaker diarization in ASR pipelineSpeaker diarization results in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using the speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: `{"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}`2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are `audio_filepath`, `offset`, `duration`, `label` and `text`. For the rest if you would like to evaluate with a known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM to have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathsfiles_to_manifest.py` to generate a manifest file from a list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results.
###Code
# Create a manifest file for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Let's set the parameters required for diarization. In this tutorial, we obtain voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`.
###Code
pretrained_speaker_model='titanet_large'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# Using VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> Not using oracle VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=100 # ASR based VAD threshold: If 100, all silences under 1 sec are ignored.
cfg.diarizer.asr.parameters.decoder_delay_in_sec=0.2 # Decoder delay is compensated for 0.2 sec
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. Let's import `ASR_TIMESTAMPS` class and create `asr_ts_decoder` instance that returns an ASR model. Using this ASR model, the following two variables are obtained from `asr_ts_decoder.run_ASR()` function. - word_hyp Dict[str, List[str]]: contains the sequence of words.- word_ts_hyp Dict[str, List[int]]: contains frame level index of the start and the end of each word.
###Code
from nemo.collections.asr.parts.utils.decoder_timestamps_utils import ASR_TIMESTAMPS
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Let's create an instance `asr_diar_offline` from ASR_DIAR_OFFLINE class, which matches diarization results with ASR outputs. We pass ``cfg.diarizer`` to setup the parameters for both ASR and diarization. We also set `word_ts_anchor_offset` variable that determines the anchor position of each word. Here, we use the default value from `asr_ts_decoder` instance.
###Code
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
`asr_diar_offline` instance is now ready. As a next step, we run diarization. Run diarization with the extracted word timestamps Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function. `run_diarization()` will return two different variables : `diar_hyp` and `diar_score`. `diar_hyp` is diarization inference result which is written in `[start time] [end time] [speaker]` format. `diar_score` contains `None` since we did not provide `rttm_filepath` in the input manifest file.
###Code
diar_hyp, diar_score = asr_diar_offline.run_diarization(cfg, word_ts_hyp)
print("Diarization hypothesis output: \n", diar_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
`run_diarization()` function also creates `an4_diarize_test.rttm` file. Let's check what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the speaker-labeled ASR transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with the ASR result and get the final output. `get_transcript_with_speaker_labels()` function in `asr_diar_offline` matches diarization output `diar_hyp` with `word_hyp` using the timestamp information from `word_ts_hyp`.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
###Output
_____no_output_____
###Markdown
After running `get_transcript_with_speaker_labels()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Optional Features for ASR with Speaker Diarization Beam search decoderBeam-search decoder can be applied to CTC based ASR models. To use this feature, [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) should be installed. [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) supports word timestamp generation and can be applied to speaker diarization. pyctcdecode also requires [KenLM](https://github.com/kpu/kenlm) and KenLM is recommended to be installed using PyPI. Install pyctcdecode in your environment with the following commands:
###Code
!pip install pyctcdecode
!pip install https://github.com/kpu/kenlm/archive/master.zip
###Output
_____no_output_____
###Markdown
You can download publicly available language models (`.arpa` files) at [KALDI Tedlium Language Models](https://kaldi-asr.org/models/m5). Download [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) and provide the model path. Let's download the language model file to `data_dir` folder.
###Code
import gzip
import shutil
def gunzip(file_path,output_path):
with gzip.open(file_path,"rb") as f_in, open(output_path,"wb") as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
ARPA_URL = 'https://kaldi-asr.org/models/5/4gram_big.arpa.gz'
f = wget.download(ARPA_URL, data_dir)
gunzip(f,f.replace(".gz",""))
###Output
_____no_output_____
###Markdown
Provide the downloaded arpa language model file to `cfg.diarizer`.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.ctc_decoder_parameters.pretrained_language_model = arpa_model_path
###Output
_____no_output_____
###Markdown
create a new `asr_ts_decoder` instance with the updated `cfg.diarizer`. The decoder script will launch pyctcdecode for decoding words and timestamps.
###Code
import importlib
import nemo.collections.asr.parts.utils.decoder_timestamps_utils as decoder_timestamps_utils
importlib.reload(decoder_timestamps_utils) # This module should be reloaded after you install pyctcdecode.
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Realign Words with a Language Model (Experimental) Diarization result with ASR transcript can be enhanced by applying a language model. The mapping between speaker labels and words can be realigned by employing language models. The realigning process calculates the probability of the words around the boundary between two hypothetical sentences spoken by different speakers. k-th word: `but` hyp_former: "since i think like tuesday but he's coming back to albuquerque" hyp_latter: "since i think like tuesday but he's coming back to albuquerque"The joint probabilities of words in the sentence are computed for these two hypotheses. In this example, `hyp_former` is likely to get a higher score and thus word `but` will be assigned to the second speaker.To use this feature, python package [arpa](https://pypi.org/project/arpa/) should be installed.
###Code
!pip install arpa
###Output
_____no_output_____
###Markdown
`diarizer.asr.realigning_lm_parameters.logprob_diff_threshold` can be modified to optimize the diarization performance (default value is 1.2). This is a threshold value for the gap between two log-probabilities of two hypotheses. Thus, the lower the threshold, the more changes are expected to be seen in the output transcript. `arpa` package also uses KenLM language models as in pyctcdecode. You can download publicly available [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) model and provide the model path to hydra configuration as follows.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.realigning_lm_parameters.arpa_language_model = arpa_model_path
cfg.diarizer.asr.realigning_lm_parameters.logprob_diff_threshold = 1.2
import importlib
import nemo.collections.asr.parts.utils.diarization_utils as diarization_utils
importlib.reload(diarization_utils) # This module should be reloaded after you install arpa.
# Create a new instance with realigning language model
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
Now that the language model for realigning is set up, you can run `get_transcript_with_speaker_labels()` to get the results with realigning.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include detailed process of getting ASR result or diarization result, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/asr_with_diarization.py). Speaker diarization in ASR pipelineSpeaker diarization result in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using speaker diarizer model.
###Code
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/speaker_diarization.yaml"
params = {
"round_float": 2,
"window_length_in_sec": 1.0,
"shift_length_in_sec": 0.25,
"fix_word_ts_with_VAD": False,
"print_transcript": False,
"word_gap_in_sec": 0.01,
"max_word_ts_length_in_sec": 0.6,
"minimum": True,
"threshold": 300,
"diar_config_url": CONFIG_URL,
"ASR_model_name": 'QuartzNet15x5Base-En',
}
###Output
_____no_output_____
###Markdown
Let's create an instance from ASR_DIAR_OFFLINE class. We pass the ``params`` variable to setup the parameters for both ASR and diarization.
###Code
asr_diar_offline = ASR_DIAR_OFFLINE(params)
asr_model = asr_diar_offline.set_asr_model(params['ASR_model_name'])
###Output
_____no_output_____
###Markdown
We will create folders that we need for storing VAD stamps and ASR/diarization results.Under the folder named ``asr_with_diar``, the following folders will be created.- ``oracle_vad``- ``json_result``- ``transcript_with_speaker_labels``
###Code
asr_diar_offline.create_directories()
print("Folders are created as below.")
print("VAD file path: \n", asr_diar_offline.oracle_vad_dir)
print("JSON result path: \n", asr_diar_offline.json_result_dir)
print("Transcript result path: \n", asr_diar_offline.trans_with_spks_dir)
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. The following two variables are obtained from `run_ASR()` function. - words List[str]: contains the sequence of words.- word_ts List[int]: contains frame level index of the start and the end of each word.
###Code
word_list, word_ts_list = asr_diar_offline.run_ASR(audio_file_list, asr_model)
print("Decoded word output: \n", word_list[0])
print("Word-level timestamps \n", word_ts_list[0])
###Output
_____no_output_____
###Markdown
Run diarization with extracted word timestampsWe need to convert ASR based VAD output (*.rttm format) to VAD manifest (*.json format) file. The following function converts the rttm files into manifest file and returns the path for manifest file. Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function.
###Code
oracle_manifest = 'asr_based_vad'
# If we know the number of speakers, we can assign "2".
num_speakers = None
speaker_embedding_model = 'ecapa_tdnn'
diar_labels = asr_diar_offline.run_diarization(audio_file_list,
word_ts_list,
oracle_manifest=oracle_manifest,
oracle_num_speakers=num_speakers,
pretrained_speaker_model=speaker_embedding_model)
###Output
_____no_output_____
###Markdown
`run_diarization()` function creates `./asr_with_diar/oracle_vad/pred_rttm/an4_diarize_test.rttm` file. Let's see what is written in this `rttm` file.
###Code
predicted_speaker_label_rttm_path = f"{ROOT}/asr_with_diar/oracle_vad/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
###Output
_____no_output_____
###Markdown
`run_diarization()` also returns a variable named `diar_labels` which contains the estimated speaker label information with timestamps from the predicted rttm file.
###Code
print("Diarization Labels:")
pp.pprint(diar_labels[0])
color = get_color(signal, diar_labels[0])
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with ASR result and get the final output. `write_json_and_transcript()` function matches diarization output `diar_labels` with `word_list` using the timestamp information `word_ts_list`.
###Code
asr_output_dict = asr_diar_offline.write_json_and_transcript(audio_file_list, diar_labels, word_list, word_ts_list)
###Output
_____no_output_____
###Markdown
After running `write_json_and_transcript()` function, the transcription output will be located in `./asr_with_diar/transcript_with_speaker_labels` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/transcript_with_speaker_labels/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./asr_with_diar/json_result`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/json_result/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include detailed process of getting ASR result or diarization result, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/asr_with_diarization.py). Speaker diarization in ASR pipelineSpeaker diarization result in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
%cd /home/taejinp/projects/asr_with_diar_update/NeMo
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
CONFIG = '/home/taejinp/projects/asr_with_diar_update/NeMo/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml'
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: `{"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}`2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are `audio_filepath`, `offset`, `duration`, `label` and `text`. For the rest if you would like to evaluate with known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathsfiles_to_manifest.py` to generate a manifest file from list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results.
###Code
# Create a manifest file for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Let's set the parameters required for diarization. In this tutorial, we obtain voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`.
###Code
pretrained_speaker_model='titanet_large'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# Using VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> Not using oracle VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=100 # ASR based VAD threshold: If 100, all silences under 1 sec are ignored.
cfg.diarizer.asr.parameters.decoder_delay_in_sec=0.2 # Decoder delay is compensated for 0.2 sec
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. Let's import `ASR_TIMESTAMPS` class and create `asr_ts_decoder` instance that returns an ASR model. Using this ASR model, the following two variables are obtained from `asr_ts_decoder.run_ASR()` function. - word_hyp Dict[str, List[str]]: contains the sequence of words.- word_ts_hyp Dict[str, List[int]]: contains frame level index of the start and the end of each word.
###Code
from nemo.collections.asr.parts.utils.decoder_timestamps_utils import ASR_TIMESTAMPS
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Let's create an instance `asr_diar_offline` from ASR_DIAR_OFFLINE class, which matches diarization results with ASR outputs. We pass ``cfg.diarizer`` to setup the parameters for both ASR and diarization. We also set `word_ts_anchor_offset` variable that determines the anchor position of each word. Here, we use the default value from `asr_ts_decoder` instance.
###Code
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
`asr_diar_offline` instance is now ready. As a next step, we run diarization. Run diarization with the extracted word timestamps Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function. `run_diarization()` will return two different varialbes: `diar_hyp` and `diar_score`. `diar_hyp` is diarization inference result which is written in `[start time] [end time] [speaker]` format. `diar_score` contains `None` since we did not provide `rttm_filepath` in the input manifest file.
###Code
diar_hyp, diar_score = asr_diar_offline.run_diarization(cfg, word_ts_hyp)
print("Diarization hypothesis output: \n", diar_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
`run_diarization()` function also creates `an4_diarize_test.rttm` file. Let's check what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the speaker-labeled ASR transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with ASR result and get the final output. `get_transcript_with_speaker_labels()` function in `asr_diar_offline` matches diarization output `diar_hyp` with `word_hyp` using the timestamp information from `word_ts_hyp`.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
###Output
_____no_output_____
###Markdown
After running `get_transcript_with_speaker_labels()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Optional Features for ASR with Speaker Diarization Beam search decoderBeam-search decoder can be applied to CTC based ASR models. To use this feature, [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) should be installed. [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) supports word timestamp generation and can be applied to speaker diarization. pyctcdecode also requires [KenLM](https://github.com/kpu/kenlm) and KenLM is recommended to be installed using PyPI. Install pyctcdecode in your environment with the following commands:
###Code
!pip install pyctcdecode
!pip install https://github.com/kpu/kenlm/archive/master.zip
###Output
_____no_output_____
###Markdown
You can download publicly available language models (`.arpa` files) at [KALDI Tedlium Language Models](https://kaldi-asr.org/models/m5). Download [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) and provide the model path. Let's download the language model file to `data_dir` folder.
###Code
import gzip
import shutil
def gunzip(file_path,output_path):
with gzip.open(file_path,"rb") as f_in, open(output_path,"wb") as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
ARPA_URL = 'https://kaldi-asr.org/models/5/4gram_big.arpa.gz'
f = wget.download(ARPA_URL, data_dir)
gunzip(f,f.replace(".gz",""))
###Output
_____no_output_____
###Markdown
Provide the downloaded arpa language model file to `cfg.diarizer`.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.ctc_decoder_parameters.pretrained_language_model = arpa_model_path
###Output
_____no_output_____
###Markdown
create a new `asr_ts_decoder` instance with the updated `cfg.diarizer`. The decoder script will launch pyctcdecode for decoding words and timestamps.
###Code
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Realign Words with a Language Model (Experimental) Diarization result with ASR transcript can be enhanced by applying a language model. The mapping between speaker labels and words can be realigned by employing language model. The realigning process calculates the probability of the around the words at the boundary between two hypothetical sentences spoken by different speakers. k-th word: `but` hyp_former: "since i think like tuesday but he's coming back to albuquerque" hyp_latter: "since i think like tuesday but he's coming back to albuquerque"The joint probabilities of words in the sentence are computed for these two hypotheses. In this example, `hyp_former` is likely to get higher score and thus word `but` will be assigned to the second speaker.To use this feature, python package [arpa](https://pypi.org/project/arpa/) should be installed.
###Code
!pip install arpa
###Output
_____no_output_____
###Markdown
`diarizer.asr.realigning_lm_parameters.logprob_diff_threshold` can be modified to optimize the diarization performance (default value is 1.2). This is a threshold value for the gap between two log-probabilities of two hypotheses. Thus, the lower the threshold, the more changes are expected to be seen in the output transcript. `arpa` package also uses KenLM language models as in pyctcdecode. You can download publicly available [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) model and provide the model path to hydra configuration as follows.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.realigning_lm_parameters.arpa_language_model = arpa_model_path
cfg.diarizer.asr.realigning_lm_parameters.logprob_diff_threshold = 1.2
# Create a new instance with realigning language model
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
Now that the language model for realigning is setup, you can run `get_transcript_with_speaker_labels()` to get the results with realigning.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include detailed process of getting ASR result or diarization result, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/asr_with_diarization.py). Speaker diarization in ASR pipelineSpeaker diarization result in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged an4 audio, that has two speakers (male and female) speaking dates in different formats. If the audio does not exist, we download it, and listen to it
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using speaker diarizer model.
###Code
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/speaker_diarization.yaml"
params = {
"time_stride": 0.02, # This should not be changed if you are using QuartzNet15x5Base.
"offset": -0.18, # This should not be changed if you are using QuartzNet15x5Base.
"round_float": 2,
"window_length_in_sec": 1.5,
"shift_length_in_sec": 0.25,
"print_transcript": False,
"threshold": 50, # minimun width to consider non-speech activity
"external_oracle_vad": False,
"diar_config_url": CONFIG_URL,
"ASR_model_name": 'QuartzNet15x5Base-En',
}
###Output
_____no_output_____
###Markdown
Let's create an instance from ASR_DIAR_OFFLINE class. We pass the ``params`` variable to setup the parameters for both ASR and diarization.
###Code
asr_diar_offline = ASR_DIAR_OFFLINE(params)
asr_model = asr_diar_offline.set_asr_model(params['ASR_model_name'])
###Output
_____no_output_____
###Markdown
We will create folders that we need for storing VAD stamps and ASR/diarization results.Under the folder named ``asr_with_diar``, the following folders will be created.- ``oracle_vad``- ``json_result``- ``transcript_with_speaker_labels``
###Code
asr_diar_offline.create_directories()
print("Folders are created as below.")
print("VAD file path: \n", asr_diar_offline.oracle_vad_dir)
print("JSON result path: \n", asr_diar_offline.json_result_dir)
print("Transcript result path: \n", asr_diar_offline.trans_with_spks_dir)
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate VAD timestamps and decoded words. `run_ASR()` function extracts word sequence, logit values for each frame and timestamps for each token (character). These three types of results are included in ``ASR_output`` variable.
###Code
ASR_output = asr_diar_offline.run_ASR(asr_model, audio_file_list)
print("Decoded word output: \n", ASR_output[0][0])
print("Logit values for each frame: \n",ASR_output[0][1].shape, ASR_output[0][1])
print("Framelevel timestamps for each token: \n", ASR_output[0][2])
###Output
_____no_output_____
###Markdown
The following three variables are obtained from `get_speech_labels_list()` function.- words List[str]: contains the sequence of words.- spaces List[int]: contains frame level index of the end of the last word and the start time of the next word. - word_ts List[int]: contains frame level index of the start and the end of each word.
###Code
words, spaces, word_ts = asr_diar_offline.get_speech_labels_list(ASR_output, audio_file_list)
print("Transcribed words: \n", words[0])
print("Spaces between words: \n", spaces[0])
print("Timestamps for the words: \n", word_ts[0])
###Output
_____no_output_____
###Markdown
Then we multiply `params['time_stride']=0.02` to get timestamp in second. Run diarization with extracted VAD timestampWe need to convert ASR based VAD output (*.rttm format) to VAD manifest (*.json format) file. The following function converts the rttm files into manifest file and returns the path for manifest file.
###Code
vad_manifest_path = asr_diar_offline.write_VAD_rttm(asr_diar_offline.oracle_vad_dir, audio_file_list)
print("VAD manifest file path: \n", vad_manifest_path)
print("VAD Manifest file content:")
pp.pprint(read_file(vad_manifest_path))
###Output
_____no_output_____
###Markdown
Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function.
###Code
num_speakers = None # If we know the number of speakers, we can assign "2".
pretrained_speaker_model = 'ecapa_tdnn'
asr_diar_offline.run_diarization(audio_file_list, vad_manifest_path, num_speakers, pretrained_speaker_model)
print(nemo.__file__)
###Output
_____no_output_____
###Markdown
`run_diarization()` function will create `./asr_with_diar/oracle_vad/pred_rttm/an4_diarize_test.rttm` file. Let's see what is written in this `rttm` file.
###Code
predicted_speaker_label_rttm_path = f"{ROOT}/asr_with_diar/oracle_vad/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
###Output
_____no_output_____
###Markdown
Let's check out the diarization output. `get_diarization_labels()` function extracts the estimated speaker label information with timestamps from the predicted rttm file.
###Code
diar_labels = asr_diar_offline.get_diarization_labels(audio_file_list)
print("Diarization Labels:")
pp.pprint(diar_labels[0])
color = get_color(signal, diar_labels[0])
display_waveform(signal,'Audio with Speaker Labels',color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with ASR result and get the final output. `write_json_and_transcript()` function matches diarization output `diar_labels` with `words` using the timestamp information `word_ts`.
###Code
asr_output_dict = asr_diar_offline.write_json_and_transcript(audio_file_list, diar_labels, words, word_ts)
###Output
_____no_output_____
###Markdown
After running `write_json_and_transcript()` function, the transcription output will be located in `./asr_with_diar/transcript_with_speaker_labels` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/transcript_with_speaker_labels/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./asr_with_diar/json_result`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/json_result/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include a detailed process of getting ASR results or diarization results, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/offline_diarization_with_asr.py). Speaker diarization in ASR pipelineSpeaker diarization results in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using the speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: `{"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}`2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are `audio_filepath`, `offset`, `duration`, `label` and `text`. For the rest if you would like to evaluate with a known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM to have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathsfiles_to_manifest.py` to generate a manifest file from a list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results.
###Code
# Create a manifest file for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Let's set the parameters required for diarization. In this tutorial, we obtain voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`.
###Code
pretrained_speaker_model='titanet_large'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# Using VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> Not using oracle VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=100 # ASR based VAD threshold: If 100, all silences under 1 sec are ignored.
cfg.diarizer.asr.parameters.decoder_delay_in_sec=0.2 # Decoder delay is compensated for 0.2 sec
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. Let's import `ASR_TIMESTAMPS` class and create `asr_ts_decoder` instance that returns an ASR model. Using this ASR model, the following two variables are obtained from `asr_ts_decoder.run_ASR()` function. - word_hyp Dict[str, List[str]]: contains the sequence of words.- word_ts_hyp Dict[str, List[int]]: contains frame level index of the start and the end of each word.
###Code
from nemo.collections.asr.parts.utils.decoder_timestamps_utils import ASR_TIMESTAMPS
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Let's create an instance `asr_diar_offline` from ASR_DIAR_OFFLINE class, which matches diarization results with ASR outputs. We pass ``cfg.diarizer`` to setup the parameters for both ASR and diarization. We also set `word_ts_anchor_offset` variable that determines the anchor position of each word. Here, we use the default value from `asr_ts_decoder` instance.
###Code
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
`asr_diar_offline` instance is now ready. As a next step, we run diarization. Run diarization with the extracted word timestamps Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function. `run_diarization()` will return two different variables : `diar_hyp` and `diar_score`. `diar_hyp` is diarization inference result which is written in `[start time] [end time] [speaker]` format. `diar_score` contains `None` since we did not provide `rttm_filepath` in the input manifest file.
###Code
diar_hyp, diar_score = asr_diar_offline.run_diarization(cfg, word_ts_hyp)
print("Diarization hypothesis output: \n", diar_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
`run_diarization()` function also creates `an4_diarize_test.rttm` file. Let's check what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the speaker-labeled ASR transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with the ASR result and get the final output. `get_transcript_with_speaker_labels()` function in `asr_diar_offline` matches diarization output `diar_hyp` with `word_hyp` using the timestamp information from `word_ts_hyp`.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
###Output
_____no_output_____
###Markdown
After running `get_transcript_with_speaker_labels()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Optional Features for ASR with Speaker Diarization Beam search decoderBeam-search decoder can be applied to CTC based ASR models. To use this feature, [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) should be installed. [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) supports word timestamp generation and can be applied to speaker diarization. pyctcdecode also requires [KenLM](https://github.com/kpu/kenlm) and KenLM is recommended to be installed using PyPI. Install pyctcdecode in your environment with the following commands:
###Code
!pip install pyctcdecode
!pip install https://github.com/kpu/kenlm/archive/master.zip
###Output
_____no_output_____
###Markdown
You can download publicly available language models (`.arpa` files) at [KALDI Tedlium Language Models](https://kaldi-asr.org/models/m5). Download [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) and provide the model path. Let's download the language model file to `data_dir` folder.
###Code
import gzip
import shutil
def gunzip(file_path,output_path):
with gzip.open(file_path,"rb") as f_in, open(output_path,"wb") as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
ARPA_URL = 'https://kaldi-asr.org/models/5/4gram_big.arpa.gz'
f = wget.download(ARPA_URL, data_dir)
gunzip(f,f.replace(".gz",""))
###Output
_____no_output_____
###Markdown
Provide the downloaded arpa language model file to `cfg.diarizer`.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.ctc_decoder_parameters.pretrained_language_model = arpa_model_path
###Output
_____no_output_____
###Markdown
create a new `asr_ts_decoder` instance with the updated `cfg.diarizer`. The decoder script will launch pyctcdecode for decoding words and timestamps.
###Code
import importlib
import nemo.collections.asr.parts.utils.decoder_timestamps_utils as decoder_timestamps_utils
importlib.reload(decoder_timestamps_utils) # This module should be reloaded after you install pyctcdecode.
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Realign Words with a Language Model (Experimental) Diarization result with ASR transcript can be enhanced by applying a language model. The mapping between speaker labels and words can be realigned by employing language models. The realigning process calculates the probability of the words around the boundary between two hypothetical sentences spoken by different speakers. k-th word: `but` hyp_former: "since i think like tuesday but he's coming back to albuquerque" hyp_latter: "since i think like tuesday but he's coming back to albuquerque"The joint probabilities of words in the sentence are computed for these two hypotheses. In this example, `hyp_former` is likely to get a higher score and thus word `but` will be assigned to the second speaker.To use this feature, python package [arpa](https://pypi.org/project/arpa/) should be installed.
###Code
!pip install arpa
###Output
_____no_output_____
###Markdown
`diarizer.asr.realigning_lm_parameters.logprob_diff_threshold` can be modified to optimize the diarization performance (default value is 1.2). This is a threshold value for the gap between two log-probabilities of two hypotheses. Thus, the lower the threshold, the more changes are expected to be seen in the output transcript. `arpa` package also uses KenLM language models as in pyctcdecode. You can download publicly available [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) model and provide the model path to hydra configuration as follows.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.realigning_lm_parameters.arpa_language_model = arpa_model_path
cfg.diarizer.asr.realigning_lm_parameters.logprob_diff_threshold = 1.2
import importlib
import nemo.collections.asr.parts.utils.diarization_utils as diarization_utils
importlib.reload(diarization_utils) # This module should be reloaded after you install arpa.
# Create a new instance with realigning language model
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
Now that the language model for realigning is set up, you can run `get_transcript_with_speaker_labels()` to get the results with realigning.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include detailed process of getting ASR result or diarization result, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/asr_with_diarization.py). Speaker diarization in ASR pipelineSpeaker diarization result in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged an4 audio, that has two speakers (male and female) speaking dates in different formats. If the audio does not exist, we download it, and listen to it
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using speaker diarizer model.
###Code
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/speaker_diarization.yaml"
params = {
"time_stride": 0.02, # This should not be changed if you are using QuartzNet15x5Base.
"offset": -0.18, # This should not be changed if you are using QuartzNet15x5Base.
"round_float": 2,
"window_length_in_sec": 1.5,
"shift_length_in_sec": 0.25,
"print_transcript": False,
"threshold": 50, # minimun width to consider non-speech activity
"external_oracle_vad": False,
"diar_config_url": CONFIG_URL,
"ASR_model_name": 'QuartzNet15x5Base-En',
}
###Output
_____no_output_____
###Markdown
Let's create an instance from ASR_DIAR_OFFLINE class. We pass the ``params`` variable to setup the parameters for both ASR and diarization.
###Code
asr_diar_offline = ASR_DIAR_OFFLINE(params)
asr_model = asr_diar_offline.set_asr_model(params['ASR_model_name'])
###Output
_____no_output_____
###Markdown
We will create folders that we need for storing VAD stamps and ASR/diarization results.Under the folder named ``asr_with_diar``, the following folders will be created.- ``oracle_vad``- ``json_result``- ``transcript_with_speaker_labels``
###Code
asr_diar_offline.create_directories()
print("Folders are created as below.")
print("VAD file path: \n", asr_diar_offline.oracle_vad_dir)
print("JSON result path: \n", asr_diar_offline.json_result_dir)
print("Transcript result path: \n", asr_diar_offline.trans_with_spks_dir)
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate VAD timestamps and decoded words. `run_ASR()` function extracts word sequence, logit values for each frame and timestamps for each token (character). These three types of results are included in ``ASR_output`` variable.
###Code
ASR_output = asr_diar_offline.run_ASR(asr_model, audio_file_list)
print("Decoded word output: \n", ASR_output[0][0])
print("Logit values for each frame: \n",ASR_output[0][1].shape, ASR_output[0][1])
print("Framelevel timestamps for each token: \n", ASR_output[0][2])
###Output
_____no_output_____
###Markdown
The following three varialbes are obtained from `get_speech_labels_list()` function.- words List[str]: contains the sequence of words.- spaces List[int]: contains frame level index of the end of the last word and the start time of the next word. - word_ts List[int]: contains frame level index of the start and the end of each word.
###Code
words, spaces, word_ts = asr_diar_offline.get_speech_labels_list(ASR_output, audio_file_list)
print("Transcribed words: \n", words[0])
print("Spaces between words: \n", spaces[0])
print("Timestamps for the words: \n", word_ts[0])
###Output
_____no_output_____
###Markdown
Then we multiply `params['time_stride']=0.02` to get timestamp in second. Run diarization with extracted VAD timestampWe need to convert ASR based VAD output (*.rttm format) to VAD manifest (*.json format) file. The following function converts the rttm files into manifest file and returns the path for manifest file.
###Code
vad_manifest_path = asr_diar_offline.write_VAD_rttm(asr_diar_offline.oracle_vad_dir, audio_file_list)
print("VAD manifest file path: \n", vad_manifest_path)
print("VAD Manifest file content:")
pp.pprint(read_file(vad_manifest_path))
###Output
_____no_output_____
###Markdown
Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function.
###Code
num_speakers = None # If we know the number of speakers, we can assign "2".
pretrained_speaker_model = 'ecapa_tdnn'
asr_diar_offline.run_diarization(audio_file_list, vad_manifest_path, num_speakers, pretrained_speaker_model)
print(nemo.__file__)
###Output
_____no_output_____
###Markdown
`run_diarization()` function will create `./asr_with_diar/oracle_vad/pred_rttm/an4_diarize_test.rttm` file. Let's see what is written in this `rttm` file.
###Code
predicted_speaker_label_rttm_path = f"{ROOT}/asr_with_diar/oracle_vad/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
###Output
_____no_output_____
###Markdown
Let's check out the diarization output. `get_diarization_labels()` function extracts the estimated speaker label information with timestamps from the predicted rttm file.
###Code
diar_labels = asr_diar_offline.get_diarization_labels(audio_file_list)
print("Diarization Labels:")
pp.pprint(diar_labels[0])
color = get_color(signal, diar_labels[0])
display_waveform(signal,'Audio with Speaker Labels',color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with ASR result and get the final output. `write_json_and_transcript()` function matches diarization output `diar_labels` with `words` using the timestamp information `word_ts`.
###Code
asr_output_dict = asr_diar_offline.write_json_and_transcript(audio_file_list, diar_labels, words, word_ts)
###Output
_____no_output_____
###Markdown
After running `write_json_and_transcript()` function, the transcription output will be located in `./asr_with_diar/transcript_with_speaker_labels` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/transcript_with_speaker_labels/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./asr_with_diar/json_result`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/json_result/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include a detailed process of getting ASR results or diarization results, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/offline_diarization_with_asr.py). Speaker diarization in ASR pipelineSpeaker diarization results in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using the speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: `{"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}`2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are `audio_filepath`, `offset`, `duration`, `label` and `text`. For the rest if you would like to evaluate with a known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM to have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathsfiles_to_manifest.py` to generate a manifest file from a list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results.
###Code
# Create a manifest file for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Let's set the parameters required for diarization. In this tutorial, we obtain voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`.
###Code
pretrained_speaker_model='titanet_large'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# Using VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> Not using oracle VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=100 # ASR based VAD threshold: If 100, all silences under 1 sec are ignored.
cfg.diarizer.asr.parameters.decoder_delay_in_sec=0.2 # Decoder delay is compensated for 0.2 sec
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. Let's import `ASR_TIMESTAMPS` class and create `asr_ts_decoder` instance that returns an ASR model. Using this ASR model, the following two variables are obtained from `asr_ts_decoder.run_ASR()` function. - word_hyp Dict[str, List[str]]: contains the sequence of words.- word_ts_hyp Dict[str, List[int]]: contains frame level index of the start and the end of each word.
###Code
from nemo.collections.asr.parts.utils.decoder_timestamps_utils import ASR_TIMESTAMPS
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Let's create an instance `asr_diar_offline` from ASR_DIAR_OFFLINE class, which matches diarization results with ASR outputs. We pass ``cfg.diarizer`` to setup the parameters for both ASR and diarization. We also set `word_ts_anchor_offset` variable that determines the anchor position of each word. Here, we use the default value from `asr_ts_decoder` instance.
###Code
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
`asr_diar_offline` instance is now ready. As a next step, we run diarization. Run diarization with the extracted word timestamps Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function. `run_diarization()` will return two different variables : `diar_hyp` and `diar_score`. `diar_hyp` is diarization inference result which is written in `[start time] [end time] [speaker]` format. `diar_score` contains `None` since we did not provide `rttm_filepath` in the input manifest file.
###Code
diar_hyp, diar_score = asr_diar_offline.run_diarization(cfg, word_ts_hyp)
print("Diarization hypothesis output: \n", diar_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
`run_diarization()` function also creates `an4_diarize_test.rttm` file. Let's check what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the speaker-labeled ASR transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with the ASR result and get the final output. `get_transcript_with_speaker_labels()` function in `asr_diar_offline` matches diarization output `diar_hyp` with `word_hyp` using the timestamp information from `word_ts_hyp`.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
###Output
_____no_output_____
###Markdown
After running `get_transcript_with_speaker_labels()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Optional Features for ASR with Speaker Diarization Beam search decoderBeam-search decoder can be applied to CTC based ASR models. To use this feature, [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) should be installed. [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) supports word timestamp generation and can be applied to speaker diarization. pyctcdecode also requires [KenLM](https://github.com/kpu/kenlm) and KenLM is recommended to be installed using PyPI. Install pyctcdecode in your environment with the following commands:
###Code
!pip install pyctcdecode
!pip install https://github.com/kpu/kenlm/archive/master.zip
###Output
_____no_output_____
###Markdown
You can download publicly available language models (`.arpa` files) at [KALDI Tedlium Language Models](https://kaldi-asr.org/models/m5). Download [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) and provide the model path. Let's download the language model file to `data_dir` folder.
###Code
import gzip
import shutil
def gunzip(file_path,output_path):
with gzip.open(file_path,"rb") as f_in, open(output_path,"wb") as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
ARPA_URL = 'https://kaldi-asr.org/models/5/4gram_big.arpa.gz'
f = wget.download(ARPA_URL, data_dir)
gunzip(f,f.replace(".gz",""))
###Output
_____no_output_____
###Markdown
Provide the downloaded arpa language model file to `cfg.diarizer`.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.ctc_decoder_parameters.pretrained_language_model = arpa_model_path
###Output
_____no_output_____
###Markdown
create a new `asr_ts_decoder` instance with the updated `cfg.diarizer`. The decoder script will launch pyctcdecode for decoding words and timestamps.
###Code
import importlib
import nemo.collections.asr.parts.utils.decoder_timestamps_utils as decoder_timestamps_utils
importlib.reload(decoder_timestamps_utils) # This module should be reloaded after you install pyctcdecode.
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Realign Words with a Language Model (Experimental) Diarization result with ASR transcript can be enhanced by applying a language model. The mapping between speaker labels and words can be realigned by employing language models. The realigning process calculates the probability of the words around the boundary between two hypothetical sentences spoken by different speakers. k-th word: `but` hyp_former: "since i think like tuesday but he's coming back to albuquerque" hyp_latter: "since i think like tuesday but he's coming back to albuquerque"The joint probabilities of words in the sentence are computed for these two hypotheses. In this example, `hyp_former` is likely to get a higher score and thus word `but` will be assigned to the second speaker.To use this feature, python package [arpa](https://pypi.org/project/arpa/) should be installed.
###Code
!pip install arpa
###Output
_____no_output_____
###Markdown
`diarizer.asr.realigning_lm_parameters.logprob_diff_threshold` can be modified to optimize the diarization performance (default value is 1.2). This is a threshold value for the gap between two log-probabilities of two hypotheses. Thus, the lower the threshold, the more changes are expected to be seen in the output transcript. `arpa` package also uses KenLM language models as in pyctcdecode. You can download publicly available [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) model and provide the model path to hydra configuration as follows.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.realigning_lm_parameters.arpa_language_model = arpa_model_path
cfg.diarizer.asr.realigning_lm_parameters.logprob_diff_threshold = 1.2
import importlib
import nemo.collections.asr.parts.utils.diarization_utils as diarization_utils
importlib.reload(diarization_utils) # This module should be reloaded after you install arpa.
# Create a new instance with realigning language model
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
Now that the language model for realigning is set up, you can run `get_transcript_with_speaker_labels()` to get the results with realigning.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition combined with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction In the early years, speaker diarization algorithms were developed for speech recognition on multispeaker audio recordings to enable speaker adaptive processing, but also gained its own value as a stand-alone application overtime to provide speaker-specific meta information for downstream tasks such as audio retrieval.Automatic Speech Recognition output when combined with Speaker labels has shown immense use in many tasks, ranging from analyzing telephonic conversation to decoding meeting transcriptions. In this tutorial we demonstrate how one can get ASR transcriptions combined with Speaker labels along with voice activity time stamps using NeMo asr collections. For detailed understanding of transcribing words with ASR refer to this [ASR tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb), and for detailed understanding of speaker diarizing an audio refer to this [Diarization inference](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial Let's first import nemo asr and other libraries for visualization purposes
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using merged an4 audio, that has two speakers(male and female) speaking dates in different formats. If not exists already download the data and listen to it
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
def show_figure(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
###Output
_____no_output_____
###Markdown
plot the audio
###Code
show_figure(signal)
###Output
_____no_output_____
###Markdown
We start our demonstration by first transcribing the audio using our pretrained model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for words spoken. We then later use these timestamps to get speaker label information using speaker diarizer model. Download and load pretrained quartznet asr model
###Code
#Load model
asr_model = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name='QuartzNet15x5Base-En', strict=False)
###Output
_____no_output_____
###Markdown
Transcribe the audio
###Code
files = [AUDIO_FILENAME]
transcript = asr_model.transcribe(paths2audio_files=files)[0]
print(f'Transcript: "{transcript}"')
###Output
_____no_output_____
###Markdown
Get CTC log probabilities with output labels
###Code
# softmax implementation in NumPy
def softmax(logits):
e = np.exp(logits - np.max(logits))
return e / e.sum(axis=-1).reshape([logits.shape[0], 1])
# let's do inference once again but without decoder
logits = asr_model.transcribe(files, logprobs=True)[0]
probs = softmax(logits)
# 20ms is duration of a timestep at output of the model
time_stride = 0.02
# get model's alphabet
labels = list(asr_model.decoder.vocabulary) + ['blank']
labels[0] = 'space'
###Output
_____no_output_____
###Markdown
We use CTC labels for voice activity detection. To detect speech and non-speech segments in the audio, we use blank and space labels in the CTC outputs. Consecutive labels with spaces or blanks longer than a threshold are considered non-speech segments
###Code
blanks = []
state = ''
idx_state = 0
if np.argmax(probs[0]) == 28:
state = 'blank'
for idx in range(1, probs.shape[0]):
current_char_idx = np.argmax(probs[idx])
if state == 'blank' and current_char_idx != 0 and current_char_idx != 28:
blanks.append([idx_state, idx-1])
state = ''
if state == '':
if current_char_idx == 28:
state = 'blank'
idx_state = idx
if state == 'blank':
blanks.append([idx_state, len(probs)-1])
threshold=20 #minimun width to consider non-speech activity
non_speech=list(filter(lambda x:x[1]-x[0]>threshold,blanks))
# get timestamps for space symbols
spaces = []
state = ''
idx_state = 0
if np.argmax(probs[0]) == 0:
state = 'space'
for idx in range(1, probs.shape[0]):
current_char_idx = np.argmax(probs[idx])
if state == 'space' and current_char_idx != 0 and current_char_idx != 28:
spaces.append([idx_state, idx-1])
state = ''
if state == '':
if current_char_idx == 0:
state = 'space'
idx_state = idx
if state == 'space':
spaces.append([idx_state, len(pred)-1])
# calibration offset for timestamps: 180 ms
offset = -0.18
# split the transcript into words
words = transcript.split()
###Output
_____no_output_____
###Markdown
Frame level stamps for non speech frames
###Code
print(non_speech)
###Output
_____no_output_____
###Markdown
write to rttm type file for later use in extracting speaker labels
###Code
frame_offset=offset/time_stride
speech_labels=[]
uniq_id = os.path.basename(AUDIO_FILENAME).split('.')[0]
with open(uniq_id+'.rttm','w') as f:
for idx in range(len(non_speech)-1):
start = (non_speech[idx][1]+frame_offset)*time_stride
end = (non_speech[idx+1][0]+frame_offset)*time_stride
f.write("SPEAKER {} 1 {:.3f} {:.3f} <NA> <NA> speech <NA>\n".format(uniq_id,start,end-start))
speech_labels.append("{:.3f} {:.3f} speech".format(start,end))
if non_speech[-1][1] < len(probs):
start = (non_speech[-1][1]+frame_offset)*time_stride
end = (len(probs)+frame_offset)*time_stride
f.write("SPEAKER {} 1 {:.3f} {:.3f} <NA> <NA> speech <NA>\n".format(uniq_id,start,end-start))
speech_labels.append("{:.3f} {:.3f} speech".format(start,end))
###Output
_____no_output_____
###Markdown
Time stamps for speech frames
###Code
print(speech_labels)
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
With voice activity time stamps extracted from CTC outputs, here we show the Voice Activity signal in **red** color and background speech in **black** color
###Code
color=get_color(signal,speech_labels)
show_figure(signal,'an4 audio signal with vad',color)
###Output
_____no_output_____
###Markdown
We use helper function from speaker utils to convert voice activity rttm file to manifest to diarize using speaker diarizer clustering inference model
###Code
from nemo.collections.asr.parts.utils.speaker_utils import write_rttm2manifest
output_dir = os.path.join(ROOT, 'oracle_vad')
os.makedirs(output_dir,exist_ok=True)
oracle_manifest = os.path.join(output_dir,'oracle_manifest.json')
write_rttm2manifest(paths2audio_files=files,
paths2rttm_files=[uniq_id+'.rttm'],
manifest_file=oracle_manifest)
!cat {output_dir}/oracle_manifest.json
###Output
_____no_output_____
###Markdown
Set up diarizer model
###Code
from omegaconf import OmegaConf
MODEL_CONFIG = os.path.join(data_dir,'speaker_diarization.yaml')
if not os.path.exists(MODEL_CONFIG):
config_url = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/speaker_diarization.yaml"
MODEL_CONFIG = wget.download(config_url,data_dir)
config = OmegaConf.load(MODEL_CONFIG)
pretrained_speaker_model='speakerdiarization_speakernet'
config.diarizer.paths2audio_files = files
config.diarizer.out_dir = output_dir #Directory to store intermediate files and prediction outputs
config.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
# Ignoring vad we just need to pass the manifest file we created
config.diarizer.speaker_embeddings.oracle_vad_manifest = oracle_manifest
config.diarizer.oracle_num_speakers = 2
###Output
_____no_output_____
###Markdown
Diarize the audio at provided time stamps
###Code
from nemo.collections.asr.models import ClusteringDiarizer
oracle_model = ClusteringDiarizer(cfg=config);
oracle_model.diarize();
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_rttm=os.path.join(output_dir,'pred_rttms',uniq_id+'.rttm')
labels=rttm_to_labels(pred_rttm)
print("speaker labels with time stamps\n",labels)
###Output
_____no_output_____
###Markdown
Now let us see the audio plot color coded per speaker
###Code
color=get_color(signal,labels)
show_figure(signal,'audio with speaker labels',color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Finally transcribe audio with time stamps and speaker label information
###Code
pos_prev = 0
idx=0
start_point,end_point,speaker=labels[idx].split()
print("{} [{:.2f} - {:.2f} sec]".format(speaker,float(start_point),float(end_point)),end=" ")
for j, spot in enumerate(spaces):
pos_end = offset + (spot[0]+spot[1])/2*time_stride
if pos_prev < float(end_point):
print(words[j],end=" ")
else:
print()
idx+=1
start_point,end_point,speaker=labels[idx].split()
print("{} [{:.2f} - {:.2f} sec]".format(speaker,float(start_point),float(end_point)),end=" ")
print(words[j],end=" ")
pos_prev = pos_end
print(words[j+1],end=" ")
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include detailed process of getting ASR result or diarization result, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/asr_with_diarization.py). Speaker diarization in ASR pipelineSpeaker diarization result in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged an4 audio, that has two speakers (male and female) speaking dates in different formats. If the audio does not exist, we download it, and listen to it
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using speaker diarizer model.
###Code
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/speaker_diarization.yaml"
params = {
"time_stride": 0.02, # This should not be changed if you are using QuartzNet15x5Base.
"offset": -0.18, # This should not be changed if you are using QuartzNet15x5Base.
"round_float": 2,
"window_length_in_sec": 1.5,
"shift_length_in_sec": 0.25,
"print_transcript": False,
"threshold": 50, # minimun width to consider non-speech activity
"external_oracle_vad": False,
"diar_config_url": CONFIG_URL,
"ASR_model_name": 'QuartzNet15x5Base-En',
}
###Output
_____no_output_____
###Markdown
Let's create an instance from ASR_DIAR_OFFLINE class. We pass the ``params`` variable to setup the parameters for both ASR and diarization.
###Code
asr_diar_offline = ASR_DIAR_OFFLINE(params)
asr_model = asr_diar_offline.set_asr_model(params['ASR_model_name'])
###Output
_____no_output_____
###Markdown
We will create folders that we need for storing VAD stamps and ASR/diarization results.Under the folder named ``asr_with_diar``, the following folders will be created.- ``oracle_vad``- ``json_result``- ``transcript_with_speaker_labels``
###Code
asr_diar_offline.create_directories()
print("Folders are created as below.")
print("VAD file path: \n", asr_diar_offline.oracle_vad_dir)
print("JSON result path: \n", asr_diar_offline.json_result_dir)
print("Transcript result path: \n", asr_diar_offline.trans_with_spks_dir)
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate VAD timestamps and decoded words. `run_ASR()` function extracts word sequence, logit values for each frame and timestamps for each token (character). These three types of results are included in ``ASR_output`` variable.
###Code
ASR_output = asr_diar_offline.run_ASR(asr_model, audio_file_list)
print("Decoded word output: \n", ASR_output[0][0])
print("Logit values for each frame: \n",ASR_output[0][1].shape, ASR_output[0][1])
print("Framelevel timestamps for each token: \n", ASR_output[0][2])
###Output
_____no_output_____
###Markdown
The following three varialbes are obtained from `get_speech_labels_list()` function.- words List[str]: contains the sequence of words.- spaces List[int]: contains frame level index of the end of the last word and the start time of the next word. - word_ts List[int]: contains frame level index of the start and the end of each word.
###Code
words, spaces, word_ts = asr_diar_offline.get_speech_labels_list(ASR_output, audio_file_list)
print("Transcribed words: \n", words[0])
print("Spaces between words: \n", spaces[0])
print("Timestamps for the words: \n", word_ts[0])
###Output
_____no_output_____
###Markdown
Then we multiply `params['time_stride']=0.02` to get timestamp in second. Run diarization with extracted VAD timestampWe need to convert ASR based VAD output (*.rttm format) to VAD manifest (*.json format) file. The following function converts the rttm files into manifest file and returns the path for manifest file.
###Code
vad_manifest_path = asr_diar_offline.write_VAD_rttm(asr_diar_offline.oracle_vad_dir, audio_file_list)
print("VAD manifest file path: \n", vad_manifest_path)
print("VAD Manifest file content:")
pp.pprint(read_file(vad_manifest_path))
###Output
_____no_output_____
###Markdown
Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function.
###Code
num_speakers = None # If we know the number of speakers, we can assign "2".
pretrained_speaker_model = 'speakerdiarization_speakernet'
asr_diar_offline.run_diarization(audio_file_list, vad_manifest_path, num_speakers, pretrained_speaker_model)
print(nemo.__file__)
###Output
_____no_output_____
###Markdown
`run_diarization()` function will create `./asr_with_diar/oracle_vad/pred_rttm/an4_diarize_test.rttm` file. Let's see what is written in this `rttm` file.
###Code
predicted_speaker_label_rttm_path = f"{ROOT}/asr_with_diar/oracle_vad/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
###Output
_____no_output_____
###Markdown
Let's check out the diarization output. `get_diarization_labels()` function extracts the estimated speaker label information with timestamps from the predicted rttm file.
###Code
diar_labels = asr_diar_offline.get_diarization_labels(audio_file_list)
print("Diarization Labels:")
pp.pprint(diar_labels[0])
color = get_color(signal, diar_labels[0])
display_waveform(signal,'Audio with Speaker Labels',color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with ASR result and get the final output. `write_json_and_transcript()` function matches diarization output `diar_labels` with `words` using the timestamp information `word_ts`.
###Code
asr_output_dict = asr_diar_offline.write_json_and_transcript(audio_file_list, diar_labels, words, word_ts)
###Output
_____no_output_____
###Markdown
After running `write_json_and_transcript()` function, the transcription output will be located in `./asr_with_diar/transcript_with_speaker_labels` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/transcript_with_speaker_labels/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./asr_with_diar/json_result`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{ROOT}/asr_with_diar/json_result/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include detailed process of getting ASR result or diarization result, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/asr_with_diarization.py). Speaker diarization in ASR pipelineSpeaker diarization result in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are audio_filepath, offset, duration, label and text. For the rest if you would like to evaluate with known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create manifest with the an4 audio and rttm available. If you have more than one files you may also use the script `NeMo/scripts/speaker_tasks/rttm_to_manifest.py` to generate manifest file from list of audio files and optionally rttm files
###Code
# Create a manifest for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Set the parameters required for diarization, here we get voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`
###Code
pretrained_speaker_model='ecapa_tdnn'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# USE VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> ORACLE VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=300
###Output
_____no_output_____
###Markdown
Let's create an instance from ASR_DIAR_OFFLINE class. We pass the ``params`` variable to setup the parameters for both ASR and diarization.
###Code
from nemo.collections.asr.parts.utils.speaker_utils import audio_rttm_map
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer.asr.parameters)
asr_diar_offline.root_path = cfg.diarizer.out_dir
AUDIO_RTTM_MAP = audio_rttm_map(cfg.diarizer.manifest_filepath)
asr_diar_offline.AUDIO_RTTM_MAP = AUDIO_RTTM_MAP
asr_model = asr_diar_offline.set_asr_model(cfg.diarizer.asr.model_path)
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. The following two variables are obtained from `run_ASR()` function. - words List[str]: contains the sequence of words.- word_ts List[int]: contains frame level index of the start and the end of each word.
###Code
word_list, word_ts_list = asr_diar_offline.run_ASR(asr_model)
print("Decoded word output: \n", word_list[0])
print("Word-level timestamps \n", word_ts_list[0])
###Output
_____no_output_____
###Markdown
Run diarization with extracted word timestampsWe need to convert ASR based VAD output (*.rttm format) to VAD manifest (*.json format) file. The following function converts the rttm files into manifest file and returns the path for manifest file. Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function.
###Code
score = asr_diar_offline.run_diarization(cfg, word_ts_list)
###Output
_____no_output_____
###Markdown
`run_diarization()` function creates `an4_diarize_test.rttm` file. Let's see what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with ASR result and get the final output. `write_json_and_transcript()` function matches diarization output `diar_labels` with `word_list` using the timestamp information `word_ts_list`.
###Code
asr_output_dict = asr_diar_offline.write_json_and_transcript(word_list, word_ts_list)
###Output
_____no_output_____
###Markdown
After running `write_json_and_transcript()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include a detailed process of getting ASR results or diarization results, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/offline_diarization_with_asr.py). Speaker diarization in ASR pipelineSpeaker diarization results in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using the speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: `{"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}`2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are `audio_filepath`, `offset`, `duration`, `label` and `text`. For the rest if you would like to evaluate with a known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM to have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathfiles_to_diarize_manifest.py` to generate a manifest file from a list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results.
###Code
# Create a manifest file for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Let's set the parameters required for diarization. In this tutorial, we obtain voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`.
###Code
pretrained_speaker_model='titanet_large'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# Using VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> Not using oracle VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=100 # ASR based VAD threshold: If 100, all silences under 1 sec are ignored.
cfg.diarizer.asr.parameters.decoder_delay_in_sec=0.2 # Decoder delay is compensated for 0.2 sec
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. Let's import `ASR_TIMESTAMPS` class and create `asr_ts_decoder` instance that returns an ASR model. Using this ASR model, the following two variables are obtained from `asr_ts_decoder.run_ASR()` function. - word_hyp Dict[str, List[str]]: contains the sequence of words.- word_ts_hyp Dict[str, List[int]]: contains frame level index of the start and the end of each word.
###Code
from nemo.collections.asr.parts.utils.decoder_timestamps_utils import ASR_TIMESTAMPS
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Let's create an instance `asr_diar_offline` from ASR_DIAR_OFFLINE class, which matches diarization results with ASR outputs. We pass ``cfg.diarizer`` to setup the parameters for both ASR and diarization. We also set `word_ts_anchor_offset` variable that determines the anchor position of each word. Here, we use the default value from `asr_ts_decoder` instance.
###Code
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
`asr_diar_offline` instance is now ready. As a next step, we run diarization. Run diarization with the extracted word timestamps Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function. `run_diarization()` will return two different variables : `diar_hyp` and `diar_score`. `diar_hyp` is diarization inference result which is written in `[start time] [end time] [speaker]` format. `diar_score` contains `None` since we did not provide `rttm_filepath` in the input manifest file.
###Code
diar_hyp, diar_score = asr_diar_offline.run_diarization(cfg, word_ts_hyp)
print("Diarization hypothesis output: \n", diar_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
`run_diarization()` function also creates `an4_diarize_test.rttm` file. Let's check what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the speaker-labeled ASR transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with the ASR result and get the final output. `get_transcript_with_speaker_labels()` function in `asr_diar_offline` matches diarization output `diar_hyp` with `word_hyp` using the timestamp information from `word_ts_hyp`.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
###Output
_____no_output_____
###Markdown
After running `get_transcript_with_speaker_labels()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Optional Features for ASR with Speaker Diarization Beam search decoderBeam-search decoder can be applied to CTC based ASR models. To use this feature, [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) should be installed. [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) supports word timestamp generation and can be applied to speaker diarization. pyctcdecode also requires [KenLM](https://github.com/kpu/kenlm) and KenLM is recommended to be installed using PyPI. Install pyctcdecode in your environment with the following commands:
###Code
!pip install pyctcdecode
!pip install https://github.com/kpu/kenlm/archive/master.zip
###Output
_____no_output_____
###Markdown
You can download publicly available language models (`.arpa` files) at [KALDI Tedlium Language Models](https://kaldi-asr.org/models/m5). Download [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) and provide the model path. Let's download the language model file to `data_dir` folder.
###Code
import gzip
import shutil
def gunzip(file_path,output_path):
with gzip.open(file_path,"rb") as f_in, open(output_path,"wb") as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
ARPA_URL = 'https://kaldi-asr.org/models/5/4gram_big.arpa.gz'
f = wget.download(ARPA_URL, data_dir)
gunzip(f,f.replace(".gz",""))
###Output
_____no_output_____
###Markdown
Provide the downloaded arpa language model file to `cfg.diarizer`.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.ctc_decoder_parameters.pretrained_language_model = arpa_model_path
###Output
_____no_output_____
###Markdown
create a new `asr_ts_decoder` instance with the updated `cfg.diarizer`. The decoder script will launch pyctcdecode for decoding words and timestamps.
###Code
import importlib
import nemo.collections.asr.parts.utils.decoder_timestamps_utils as decoder_timestamps_utils
importlib.reload(decoder_timestamps_utils) # This module should be reloaded after you install pyctcdecode.
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Realign Words with a Language Model (Experimental) Diarization result with ASR transcript can be enhanced by applying a language model. The mapping between speaker labels and words can be realigned by employing language models. The realigning process calculates the probability of the words around the boundary between two hypothetical sentences spoken by different speakers. k-th word: `but` hyp_former: "since i think like tuesday but he's coming back to albuquerque" hyp_latter: "since i think like tuesday but he's coming back to albuquerque"The joint probabilities of words in the sentence are computed for these two hypotheses. In this example, `hyp_former` is likely to get a higher score and thus word `but` will be assigned to the second speaker.To use this feature, python package [arpa](https://pypi.org/project/arpa/) should be installed.
###Code
!pip install arpa
###Output
_____no_output_____
###Markdown
`diarizer.asr.realigning_lm_parameters.logprob_diff_threshold` can be modified to optimize the diarization performance (default value is 1.2). This is a threshold value for the gap between two log-probabilities of two hypotheses. Thus, the lower the threshold, the more changes are expected to be seen in the output transcript. `arpa` package also uses KenLM language models as in pyctcdecode. You can download publicly available [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) model and provide the model path to hydra configuration as follows.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.realigning_lm_parameters.arpa_language_model = arpa_model_path
cfg.diarizer.asr.realigning_lm_parameters.logprob_diff_threshold = 1.2
import importlib
import nemo.collections.asr.parts.utils.diarization_utils as diarization_utils
importlib.reload(diarization_utils) # This module should be reloaded after you install arpa.
# Create a new instance with realigning language model
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
Now that the language model for realigning is set up, you can run `get_transcript_with_speaker_labels()` to get the results with realigning.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include a detailed process of getting ASR results or diarization results, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/offline_diarization_with_asr.py). Speaker diarization in ASR pipelineSpeaker diarization results in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using the speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: `{"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}`2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are `audio_filepath`, `offset`, `duration`, `label` and `text`. For the rest if you would like to evaluate with a known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM to have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathfiles_to_diarize_manifest.py` to generate a manifest file from a list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results.
###Code
# Create a manifest file for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Let's set the parameters required for diarization. In this tutorial, we obtain voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`.
###Code
pretrained_speaker_model='titanet_large'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# Using VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> Not using oracle VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=100 # ASR based VAD threshold: If 100, all silences under 1 sec are ignored.
cfg.diarizer.asr.parameters.decoder_delay_in_sec=0.2 # Decoder delay is compensated for 0.2 sec
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. Let's import `ASR_TIMESTAMPS` class and create `asr_ts_decoder` instance that returns an ASR model. Using this ASR model, the following two variables are obtained from `asr_ts_decoder.run_ASR()` function. - word_hyp Dict[str, List[str]]: contains the sequence of words.- word_ts_hyp Dict[str, List[int]]: contains frame level index of the start and the end of each word.
###Code
from nemo.collections.asr.parts.utils.decoder_timestamps_utils import ASR_TIMESTAMPS
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Let's create an instance `asr_diar_offline` from ASR_DIAR_OFFLINE class, which matches diarization results with ASR outputs. We pass ``cfg.diarizer`` to setup the parameters for both ASR and diarization. We also set `word_ts_anchor_offset` variable that determines the anchor position of each word. Here, we use the default value from `asr_ts_decoder` instance.
###Code
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
`asr_diar_offline` instance is now ready. As a next step, we run diarization. Run diarization with the extracted word timestamps Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function. `run_diarization()` will return two different variables : `diar_hyp` and `diar_score`. `diar_hyp` is diarization inference result which is written in `[start time] [end time] [speaker]` format. `diar_score` contains `None` since we did not provide `rttm_filepath` in the input manifest file.
###Code
diar_hyp, diar_score = asr_diar_offline.run_diarization(cfg, word_ts_hyp)
print("Diarization hypothesis output: \n", diar_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
`run_diarization()` function also creates `an4_diarize_test.rttm` file. Let's check what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the speaker-labeled ASR transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with the ASR result and get the final output. `get_transcript_with_speaker_labels()` function in `asr_diar_offline` matches diarization output `diar_hyp` with `word_hyp` using the timestamp information from `word_ts_hyp`.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
###Output
_____no_output_____
###Markdown
After running `get_transcript_with_speaker_labels()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____
###Markdown
Optional Features for ASR with Speaker Diarization Beam search decoderBeam-search decoder can be applied to CTC based ASR models. To use this feature, [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) should be installed. [pyctcdecode](https://github.com/kensho-technologies/pyctcdecode) supports word timestamp generation and can be applied to speaker diarization. pyctcdecode also requires [KenLM](https://github.com/kpu/kenlm) and KenLM is recommended to be installed using PyPI. Install pyctcdecode in your environment with the following commands:
###Code
!pip install pyctcdecode
!pip install https://github.com/kpu/kenlm/archive/master.zip
###Output
_____no_output_____
###Markdown
You can download publicly available language models (`.arpa` files) at [KALDI Tedlium Language Models](https://kaldi-asr.org/models/m5). Download [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) and provide the model path. Let's download the language model file to `data_dir` folder.
###Code
import gzip
import shutil
def gunzip(file_path,output_path):
with gzip.open(file_path,"rb") as f_in, open(output_path,"wb") as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
f_out.close()
ARPA_URL = 'https://kaldi-asr.org/models/5/4gram_big.arpa.gz'
f = wget.download(ARPA_URL, data_dir)
gunzip(f,f.replace(".gz",""))
###Output
_____no_output_____
###Markdown
Provide the downloaded arpa language model file to `cfg.diarizer`.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.ctc_decoder_parameters.pretrained_language_model = arpa_model_path
###Output
_____no_output_____
###Markdown
create a new `asr_ts_decoder` instance with the updated `cfg.diarizer`. The decoder script will launch pyctcdecode for decoding words and timestamps.
###Code
import importlib
import nemo.collections.asr.parts.utils.decoder_timestamps_utils as decoder_timestamps_utils
importlib.reload(decoder_timestamps_utils) # This module should be reloaded after you install pyctcdecode.
asr_ts_decoder = ASR_TIMESTAMPS(**cfg.diarizer)
asr_model = asr_ts_decoder.set_asr_model()
word_hyp, word_ts_hyp = asr_ts_decoder.run_ASR(asr_model)
print("Decoded word output dictionary: \n", word_hyp['an4_diarize_test'])
print("Word-level timestamps dictionary: \n", word_ts_hyp['an4_diarize_test'])
###Output
_____no_output_____
###Markdown
Realign Words with a Language Model (Experimental) Diarization result with ASR transcript can be enhanced by applying a language model. The mapping between speaker labels and words can be realigned by employing language models. The realigning process calculates the probability of the words around the boundary between two hypothetical sentences spoken by different speakers. k-th word: `but` hyp_former: "since i think like tuesday but he's coming back to albuquerque" hyp_latter: "since i think like tuesday but he's coming back to albuquerque"The joint probabilities of words in the sentence are computed for these two hypotheses. In this example, `hyp_former` is likely to get a higher score and thus word `but` will be assigned to the second speaker.To use this feature, python package [arpa](https://pypi.org/project/arpa/) should be installed.
###Code
!pip install arpa
###Output
_____no_output_____
###Markdown
`diarizer.asr.realigning_lm_parameters.logprob_diff_threshold` can be modified to optimize the diarization performance (default value is 1.2). This is a threshold value for the gap between two log-probabilities of two hypotheses. Thus, the lower the threshold, the more changes are expected to be seen in the output transcript. `arpa` package also uses KenLM language models as in pyctcdecode. You can download publicly available [4-gram Big ARPA](https://kaldi-asr.org/models/5/4gram_big.arpa.gz) model and provide the model path to hydra configuration as follows.
###Code
arpa_model_path = os.path.join(data_dir, '4gram_big.arpa')
cfg.diarizer.asr.realigning_lm_parameters.arpa_language_model = arpa_model_path
cfg.diarizer.asr.realigning_lm_parameters.logprob_diff_threshold = 1.2
import importlib
import nemo.collections.asr.parts.utils.diarization_utils as diarization_utils
importlib.reload(diarization_utils) # This module should be reloaded after you install arpa.
# Create a new instance with realigning language model
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_ts_decoder.word_ts_anchor_offset
###Output
_____no_output_____
###Markdown
Now that the language model for realigning is set up, you can run `get_transcript_with_speaker_labels()` to get the results with realigning.
###Code
asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Automatic Speech Recognition with Speaker Diarization
###Code
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Introduction Speaker diarization lets us figure out "who spoke when" in the transcription. Without speaker diarization, we cannot distinguish the speakers in the transcript generated from automatic speech recognition (ASR). Nowadays, ASR combined with speaker diarization has shown immense use in many tasks, ranging from analyzing meeting transcription to media indexing. In this tutorial, we demonstrate how we can get ASR transcriptions combined with speaker labels. Since we don't include detailed process of getting ASR result or diarization result, please refer to the following links for more in-depth description.If you need detailed understanding of transcribing words with ASR, refer to this [ASR Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb) tutorial.For detailed parameter setting and execution of speaker diarization, refer to this [Diarization Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb) tutorial.An example script that runs ASR and speaker diarization together can be found at [ASR with Diarization](https://github.com/NVIDIA/NeMo/blob/main/examples/speaker_tasks/diarization/asr_with_diarization.py). Speaker diarization in ASR pipelineSpeaker diarization result in ASR pipeline should align well with ASR output. Thus, we use ASR output to create Voice Activity Detection (VAD) timestamps to obtain segments we want to diarize. The segments we obtain from the VAD timestamps are further segmented into sub-segments in the speaker diarization step. Finally, after obtaining the speaker labels from speaker diarization, we match the decoded words with speaker labels to generate a transcript with speaker labels. ASR → VAD timestamps and decoded words → speaker diarization → speaker label matching Import librariesLet's first import nemo asr and other libraries for visualization purposes.
###Code
import nemo.collections.asr as nemo_asr
import numpy as np
from IPython.display import Audio, display
import librosa
import os
import wget
import matplotlib.pyplot as plt
import nemo
from nemo.collections.asr.parts.utils.diarization_utils import ASR_DIAR_OFFLINE
import glob
import pprint
pp = pprint.PrettyPrinter(indent=4)
###Output
_____no_output_____
###Markdown
We demonstrate this tutorial using a merged AN4 audioclip. The merged audioclip contains the speech of two speakers (male and female) reading dates in different formats. Run the following script to download the audioclip and play it.
###Code
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
if not os.path.exists(os.path.join(data_dir,'an4_diarize_test.wav')):
AUDIO_FILENAME = wget.download(an4_audio_url, data_dir)
else:
AUDIO_FILENAME = os.path.join(data_dir,'an4_diarize_test.wav')
audio_file_list = glob.glob(f"{data_dir}/*.wav")
print("Input audio file list: \n", audio_file_list)
signal, sample_rate = librosa.load(AUDIO_FILENAME, sr=None)
display(Audio(signal,rate=sample_rate))
###Output
_____no_output_____
###Markdown
`display_waveform()` and `get_color()` functions are defined for displaying the waveform with diarization results.
###Code
def display_waveform(signal,text='Audio',overlay_color=[]):
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c='k')
if len(overlay_color):
plt.scatter(np.arange(len(signal)),signal,s=1,marker='o',c=overlay_color)
fig.suptitle(text, fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
plt.ylabel('signal strength', fontsize=14);
plt.axis([0,len(signal),-0.5,+0.5])
time_axis,_ = plt.xticks();
plt.xticks(time_axis[:-1],time_axis[:-1]/sample_rate);
COLORS="b g c m y".split()
def get_color(signal,speech_labels,sample_rate=16000):
c=np.array(['k']*len(signal))
for time_stamp in speech_labels:
start,end,label=time_stamp.split()
start,end = int(float(start)*16000),int(float(end)*16000),
if label == "speech":
code = 'red'
else:
code = COLORS[int(label.split('_')[-1])]
c[start:end]=code
return c
###Output
_____no_output_____
###Markdown
Using the above function, we can display the waveform of the example audio clip.
###Code
display_waveform(signal)
###Output
_____no_output_____
###Markdown
Parameter setting for ASR and diarizationFirst, we need to setup the following parameters for ASR and diarization. We start our demonstration by first transcribing the audio recording using our pretrained ASR model `QuartzNet15x5Base-En` and use the CTC output probabilities to get timestamps for the spoken words. We then use these timestamps to get speaker label information using speaker diarizer model.
###Code
from omegaconf import OmegaConf
import shutil
CONFIG_URL = "https://raw.githubusercontent.com/NVIDIA/NeMo/modify_speaker_input/examples/speaker_tasks/diarization/conf/offline_diarization_with_asr.yaml"
if not os.path.exists(os.path.join(data_dir,'offline_diarization_with_asr.yaml')):
CONFIG = wget.download(CONFIG_URL, data_dir)
else:
CONFIG = os.path.join(data_dir,'offline_diarization_with_asr.yaml')
cfg = OmegaConf.load(CONFIG)
print(OmegaConf.to_yaml(cfg))
###Output
_____no_output_____
###Markdown
Speaker Diarization scripts commonly expects following arguments:1. manifest_filepath : Path to manifest file containing json lines of format: {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}2. out_dir : directory where outputs and intermediate files are stored. 3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either 4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. Mandatory fields are audio_filepath, offset, duration, label and text. For the rest if you would like to evaluate with known number of speakers pass the value else `null`. If you would like to score the system with known rttms then that should be passed as well, else `null`. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else `null`.**Note:** we expect audio and corresponding RTTM have **same base name** and the name should be **unique**. For example: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name) Lets create manifest with the an4 audio and rttm available. If you have more than one files you may also use the script `NeMo/scripts/speaker_tasks/rttm_to_manifest.py` to generate manifest file from list of audio files and optionally rttm files
###Code
# Create a manifest for input with below format.
# {"audio_filepath": "/path/to/audio_file", "offset": 0, "duration": null, "label": "infer", "text": "-",
# "num_speakers": null, "rttm_filepath": "/path/to/rttm/file", "uem_filepath"="/path/to/uem/filepath"}
import json
meta = {
'audio_filepath': AUDIO_FILENAME,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': None,
'uem_filepath' : None
}
with open(os.path.join(data_dir,'input_manifest.json'),'w') as fp:
json.dump(meta,fp)
fp.write('\n')
cfg.diarizer.manifest_filepath = os.path.join(data_dir,'input_manifest.json')
!cat {cfg.diarizer.manifest_filepath}
###Output
_____no_output_____
###Markdown
Set the parameters required for diarization, here we get voice activity labels from ASR, which is set through parameter `cfg.diarizer.asr.parameters.asr_based_vad`
###Code
pretrained_speaker_model='ecapa_tdnn'
cfg.diarizer.manifest_filepath = cfg.diarizer.manifest_filepath
cfg.diarizer.out_dir = data_dir #Directory to store intermediate files and prediction outputs
cfg.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
cfg.diarizer.clustering.parameters.oracle_num_speakers=True
# USE VAD generated from ASR timestamps
cfg.diarizer.asr.model_path = 'QuartzNet15x5Base-En'
cfg.diarizer.oracle_vad = False # ----> ORACLE VAD
cfg.diarizer.asr.parameters.asr_based_vad = True
cfg.diarizer.asr.parameters.threshold=300
###Output
_____no_output_____
###Markdown
Let's create an instance from ASR_DIAR_OFFLINE class. We pass the ``params`` variable to setup the parameters for both ASR and diarization.
###Code
from nemo.collections.asr.parts.utils.speaker_utils import audio_rttm_map
asr_diar_offline = ASR_DIAR_OFFLINE(**cfg.diarizer.asr.parameters)
asr_diar_offline.root_path = cfg.diarizer.out_dir
AUDIO_RTTM_MAP = audio_rttm_map(cfg.diarizer.manifest_filepath)
asr_diar_offline.AUDIO_RTTM_MAP = AUDIO_RTTM_MAP
asr_model = asr_diar_offline.set_asr_model(cfg.diarizer.asr.model_path)
###Output
_____no_output_____
###Markdown
Run ASR and get word timestampsBefore we run speaker diarization, we should run ASR and get the ASR output to generate decoded words and timestamps for those words. The following two variables are obtained from `run_ASR()` function. - words List[str]: contains the sequence of words.- word_ts List[int]: contains frame level index of the start and the end of each word.
###Code
word_list, word_ts_list = asr_diar_offline.run_ASR(asr_model)
print("Decoded word output: \n", word_list[0])
print("Word-level timestamps \n", word_ts_list[0])
###Output
_____no_output_____
###Markdown
Run diarization with extracted word timestampsWe need to convert ASR based VAD output (*.rttm format) to VAD manifest (*.json format) file. The following function converts the rttm files into manifest file and returns the path for manifest file. Now that all the components for diarization is ready, let's run diarization by calling `run_diarization()` function.
###Code
score = asr_diar_offline.run_diarization(cfg, word_ts_list)
###Output
_____no_output_____
###Markdown
`run_diarization()` function creates `an4_diarize_test.rttm` file. Let's see what is written in this `rttm` file.
###Code
def read_file(path_to_file):
with open(path_to_file) as f:
contents = f.read().splitlines()
return contents
predicted_speaker_label_rttm_path = f"{data_dir}/pred_rttms/an4_diarize_test.rttm"
pred_rttm = read_file(predicted_speaker_label_rttm_path)
pp.pprint(pred_rttm)
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
pred_labels = rttm_to_labels(predicted_speaker_label_rttm_path)
color = get_color(signal, pred_labels)
display_waveform(signal,'Audio with Speaker Labels', color)
display(Audio(signal,rate=16000))
###Output
_____no_output_____
###Markdown
Check the transcription outputNow we've done all the processes for running ASR and diarization, let's match the diarization result with ASR result and get the final output. `write_json_and_transcript()` function matches diarization output `diar_labels` with `word_list` using the timestamp information `word_ts_list`.
###Code
asr_output_dict = asr_diar_offline.write_json_and_transcript(word_list, word_ts_list)
###Output
_____no_output_____
###Markdown
After running `write_json_and_transcript()` function, the transcription output will be located in `./pred_rttms` folder, which shows **start time to end time of the utterance, speaker ID, and words spoken** during the notified time.
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.txt"
transcript = read_file(transcription_path_to_file)
pp.pprint(transcript)
###Output
_____no_output_____
###Markdown
Another output is transcription output in JSON format, which is saved in `./pred_rttms/an4_diarize_test.json`. In the JSON format output, we include information such as **transcription, estimated number of speakers (variable named `speaker_count`), start and end time of each word and most importantly, speaker label for each word.**
###Code
transcription_path_to_file = f"{data_dir}/pred_rttms/an4_diarize_test.json"
json_contents = read_file(transcription_path_to_file)
pp.pprint(json_contents)
###Output
_____no_output_____ |
Cluster/kmeans-kmedoids/kmeans-kmeroids-iris.ipynb | ###Markdown
todo - add cm- add xgboost to custom.json- add cleaning- add scaling- add PCA , t-SNE- add other datasets- - iris, moon, blobs
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_iris
from sklearn.datasets import make_blobs
from sklearn.datasets import make_moons
iris = load_iris()
data_X, data_y = iris.data,iris.target
plt.scatter(data_X[:, 0], data_X[:, 1], s=50);
%load_ext autoreload
%autoreload 2
packages = !conda list
packages
!pwd
###Output
/docker/photon_experiments
###Markdown
Output registry
###Code
from __future__ import print_function
import sys, os
old__file__ = !pwd
__file__ = !cd ../photon ;pwd
#__file__ = !pwd
__file__ = __file__[0]
__file__
sys.path.append(__file__)
print(sys.path)
os.chdir(old__file__[0])
!pwd
old__file__[0]
import seaborn as sns; sns.set() # for plot styling
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from photonai.base import Hyperpipe, PipelineElement, Preprocessing, OutputSettings
from photonai.optimization import FloatRange, Categorical, IntegerRange
from photonai.base.photon_elements import PhotonRegistry
#from photonai.base.registry.registry import PhotonRegistry
#import pixiedust
def results_to_df(results):
ll = []
for obj in results:
ll.append([obj.operation,
obj.value,
obj.metric_name])
_results=pd.DataFrame(ll).pivot(index=2, columns=0, values=1)
_results.columns=['Mean','STD']
return(_results)
__file__ = "exp1.log"
base_folder = os.path.dirname(os.path.abspath(''))
custom_elements_folder = os.path.join(base_folder, 'custom_elements')
custom_elements_folder
registry = PhotonRegistry(custom_elements_folder=custom_elements_folder)
registry.activate()
registry.PHOTON_REGISTRIES,PhotonRegistry.PHOTON_REGISTRIES
registry.activate()
registry.list_available_elements()
# take off last name
###Output
PhotonCore
ARDRegression sklearn.linear_model.ARDRegression Estimator
AdaBoostClassifier sklearn.ensemble.AdaBoostClassifier Estimator
AdaBoostRegressor sklearn.ensemble.AdaBoostRegressor Estimator
BaggingClassifier sklearn.ensemble.BaggingClassifier Estimator
BaggingRegressor sklearn.ensemble.BaggingRegressor Estimator
BayesianGaussianMixture sklearn.mixture.BayesianGaussianMixture Estimator
BayesianRidge sklearn.linear_model.BayesianRidge Estimator
BernoulliNB sklearn.naive_bayes.BernoulliNB Estimator
BernoulliRBM sklearn.neural_network.BernoulliRBM Estimator
Binarizer sklearn.preprocessing.Binarizer Transformer
CCA sklearn.cross_decomposition.CCA Transformer
ConfounderRemoval photonai.modelwrapper.ConfounderRemoval.ConfounderRemoval Transformer
DecisionTreeClassifier sklearn.tree.DecisionTreeClassifier Estimator
DecisionTreeRegressor sklearn.tree.DecisionTreeRegressor Estimator
DictionaryLearning sklearn.decomposition.DictionaryLearning Transformer
DummyClassifier sklearn.dummy.DummyClassifier Estimator
DummyRegressor sklearn.dummy.DummyRegressor Estimator
ElasticNet sklearn.linear_model.ElasticNet Estimator
ExtraDecisionTreeClassifier sklearn.tree.ExtraDecisionTreeClassifier Estimator
ExtraDecisionTreeRegressor sklearn.tree.ExtraDecisionTreeRegressor Estimator
ExtraTreesClassifier sklearn.ensemble.ExtraTreesClassifier Estimator
ExtraTreesRegressor sklearn.ensemble.ExtraTreesRegressor Estimator
FClassifSelectPercentile photonai.modelwrapper.FeatureSelection.FClassifSelectPercentile Transformer
FRegressionFilterPValue photonai.modelwrapper.FeatureSelection.FRegressionFilterPValue Transformer
FRegressionSelectPercentile photonai.modelwrapper.FeatureSelection.FRegressionSelectPercentile Transformer
FactorAnalysis sklearn.decomposition.FactorAnalysis Transformer
FastICA sklearn.decomposition.FastICA Transformer
FeatureEncoder photonai.modelwrapper.OrdinalEncoder.FeatureEncoder Transformer
FunctionTransformer sklearn.preprocessing.FunctionTransformer Transformer
GaussianMixture sklearn.mixture.GaussianMixture Estimator
GaussianNB sklearn.naive_bayes.GaussianNB Estimator
GaussianProcessClassifier sklearn.gaussian_process.GaussianProcessClassifier Estimator
GaussianProcessRegressor sklearn.gaussian_process.GaussianProcessRegressor Estimator
GenericUnivariateSelect sklearn.feature_selection.GenericUnivariateSelect Transformer
GradientBoostingClassifier sklearn.ensemble.GradientBoostingClassifier Estimator
GradientBoostingRegressor sklearn.ensemble.GradientBoostingRegressor Estimator
HuberRegressor sklearn.linear_model.HuberRegressor Estimator
ImbalancedDataTransformer photonai.modelwrapper.imbalanced_data_transformer.ImbalancedDataTransformer Transformer
IncrementalPCA sklearn.decomposition.IncrementalPCA Transformer
KNeighborsClassifier sklearn.neighbors.KNeighborsClassifier Estimator
KNeighborsRegressor sklearn.neighbors.KNeighborsRegressor Estimator
KerasBaseClassifier photonai.modelwrapper.keras_base_models.KerasBaseClassifier Estimator
KerasBaseRegression photonai.modelwrapper.keras_base_models.KerasBaseRegression Estimator
KerasDnnClassifier photonai.modelwrapper.keras_dnn_classifier.KerasDnnClassifier Estimator
KerasDnnRegressor photonai.modelwrapper.keras_dnn_regressor.KerasDnnRegressor Estimator
KernelCenterer sklearn.preprocessing.KernelCenterer Transformer
KernelPCA sklearn.decomposition.KernelPCA Transformer
KernelRidge sklearn.kernel_ridge.KernelRidge Estimator
LabelEncoder photonai.modelwrapper.LabelEncoder.LabelEncoder Transformer
Lars sklearn.linear_model.Lars Estimator
Lasso sklearn.linear_model.Lasso Estimator
LassoFeatureSelection photonai.modelwrapper.FeatureSelection.LassoFeatureSelection Transformer
LassoLars sklearn.linear_model.LassoLars Estimator
LatentDirichletAllocation sklearn.decomposition.LatentDirichletAllocation Transformer
LinearRegression sklearn.linear_model.LinearRegression Estimator
LinearSVC sklearn.svm.LinearSVC Estimator
LinearSVR sklearn.svm.LinearSVR Estimator
LogisticRegression sklearn.linear_model.LogisticRegression Estimator
MLPClassifier sklearn.neural_network.MLPClassifier Estimator
MLPRegressor sklearn.neural_network.MLPRegressor Estimator
MaxAbsScaler sklearn.preprocessing.MaxAbsScaler Transformer
MinMaxScaler sklearn.preprocessing.MinMaxScaler Transformer
MiniBatchDictionaryLearning sklearn.decomposition.MiniBatchDictionaryLearning Transformer
MiniBatchSparsePCA sklearn.decomposition.MiniBatchSparsePCA Transformer
MultinomialNB sklearn.naive_bayes.MultinomialNB Estimator
NMF sklearn.decompositcion.NMF Transformer
NearestCentroid sklearn.neighbors.NearestCentroid Estimator
Normalizer sklearn.preprocessing.Normalizer Transformer
NuSVC sklearn.svm.NuSVC Estimator
NuSVR sklearn.svm.NuSVR Estimator
OneClassSVM sklearn.svm.OneClassSVM Estimator
PCA sklearn.decomposition.PCA Transformer
PLSCanonical sklearn.cross_decomposition.PLSCanonical Transformer
PLSRegression sklearn.cross_decomposition.PLSRegression Transformer
PLSSVD sklearn.cross_decomposition.PLSSVD Transformer
PassiveAggressiveClassifier sklearn.linear_model.PassiveAggressiveClassifier Estimator
PassiveAggressiveRegressor sklearn.linear_model.PassiveAggressiveRegressor Estimator
Perceptron sklearn.linear_model.Perceptron Estimator
PhotonMLPClassifier photonai.modelwrapper.PhotonMLPClassifier.PhotonMLPClassifier Estimator
PhotonOneClassSVM photonai.modelwrapper.PhotonOneClassSVM.PhotonOneClassSVM Estimator
PhotonTestXPredictor photonai.test.processing_tests.results_tests.XPredictor Estimator
PhotonVotingClassifier photonai.modelwrapper.Voting.PhotonVotingClassifier Estimator
PhotonVotingRegressor photonai.modelwrapper.Voting.PhotonVotingRegressor Estimator
PolynomialFeatures sklearn.preprocessing.PolynomialFeatures Transformer
PowerTransformer sklearn.preprocessing.PowerTransformer Transformer
QuantileTransformer sklearn.preprocessing.QuantileTransformer Transformer
RANSACRegressor sklearn.linear_model.RANSACRegressor Estimator
RFE sklearn.feature_selection.RFE Transformer
RFECV sklearn.feature_selection.RFECV Transformer
RadiusNeighborsClassifier sklearn.neighbors.RadiusNeighborsClassifier Estimator
RadiusNeighborsRegressor sklearn.neighbors.RadiusNeighborsRegressor Estimator
RandomForestClassifier sklearn.ensemble.RandomForestClassifier Estimator
RandomForestRegressor sklearn.ensemble.RandomForestRegressor Estimator
RandomTreesEmbedding sklearn.ensemble.RandomTreesEmbedding Transformer
RangeRestrictor photonai.modelwrapper.RangeRestrictor.RangeRestrictor Estimator
Ridge sklearn.linear_model.Ridge Estimator
RidgeClassifier sklearn.linear_model.RidgeClassifier Estimator
RobustScaler sklearn.preprocessing.RobustScaler Transformer
SGDClassifier sklearn.linear_model.SGDClassifier Estimator
SGDRegressor sklearn.linear_model.SGDRegressor Estimator
SVC sklearn.svm.SVC Estimator
SVR sklearn.svm.SVR Estimator
SamplePairingClassification photonai.modelwrapper.SamplePairing.SamplePairingClassification Transformer
SamplePairingRegression photonai.modelwrapper.SamplePairing.SamplePairingRegression Transformer
SelectFdr sklearn.feature_selection.SelectFdr Transformer
SelectFpr sklearn.feature_selection.SelectFpr Transformer
SelectFromModel sklearn.feature_selection.SelectFromModel Transformer
SelectFwe sklearn.feature_selection.SelectFwe Transformer
SelectKBest sklearn.feature_selection.SelectKBest Transformer
SelectPercentile sklearn.feature_selection.SelectPercentile Transformer
SimpleImputer sklearn.impute.SimpleImputer Transformer
SourceSplitter photonai.modelwrapper.source_splitter.SourceSplitter Transformer
SparseCoder sklearn.decomposition.SparseCoder Transformer
SparsePCA sklearn.decomposition.SparsePCA Transformer
StandardScaler sklearn.preprocessing.StandardScaler Transformer
TheilSenRegressor sklearn.linear_model.TheilSenRegressor Estimator
TruncatedSVD sklearn.decomposition.TruncatedSVD Transformer
VarianceThreshold sklearn.feature_selection.VarianceThreshold Transformer
dict_learning sklearn.decomposition.dict_learning Transformer
dict_learning_online sklearn.decomposition.dict_learning_online Transformer
fastica sklearn.decomposition.fastica Transformer
sparse_encode sklearn.decomposition.sparse_encode Transformer
PhotonCluster
KMeans sklearn.cluster.KMeans Estimator
KMedoids sklearn_extra.cluster.KMedoids Estimator
PhotonNeuro
BrainAtlas photonai.neuro.brain_atlas.BrainAtlas Transformer
BrainMask photonai.neuro.brain_atlas.BrainMask Transformer
PatchImages photonai.neuro.nifti_transformations.PatchImages Transformer
ResampleImages photonai.neuro.nifti_transformations.ResampleImages Transformer
SmoothImages photonai.neuro.nifti_transformations.SmoothImages Transformer
###Markdown
KMedoids iris
###Code
registry.info("KMedoids")
#import pixiedust
#%%pixie_debugger
"""
Example script for KMedoids hopt
"""
X, y = data_X, data_y
# DESIGN YOUR PIPELINE
settings = OutputSettings(project_folder='./tmp/')
my_pipe = Hyperpipe('batching',
optimizer='sk_opt',
# optimizer_params={'n_configurations': 25},
metrics=['ARI', 'MI', 'HCV', 'FM'],
best_config_metric='ARI',
outer_cv=KFold(n_splits=5),
inner_cv=KFold(n_splits=10),
verbosity=0,
output_settings=settings)
my_pipe += PipelineElement('KMedoids', hyperparameters={
'n_clusters': IntegerRange(2, 8),
},random_state=777)
# NOW TRAIN YOUR PIPELINE
my_pipe.fit(X, y)
debug = True
lab= my_pipe.predict(X)
colors = ['red','green','blue','purple']
fig = plt.figure(figsize=(8,8))
plt.scatter(X[:, 0], X[:, 1], s=50, c=lab
,cmap=matplotlib.colors.ListedColormap(colors) );
pd.DataFrame(my_pipe.best_config.items(),columns=['n_clusters', 'k'])
train=results_to_df(my_pipe.results.metrics_train)
train
test = results_to_df(my_pipe.results.metrics_test)
test
test-train
###Output
_____no_output_____
###Markdown
Show kmeans iris
###Code
registry.info("KMeans")
#import pixiedust
#%%pixie_debugger
"""
Example script for kmeans hopt
"""
X, y = data_X, data_y
# DESIGN YOUR PIPELINE
settings = OutputSettings(project_folder='./tmp/')
my_pipe = Hyperpipe('batching',
optimizer='sk_opt',
# optimizer_params={'n_configurations': 25},
metrics=['ARI', 'MI', 'HCV', 'FM'],
best_config_metric='ARI',
outer_cv=KFold(n_splits=5),
inner_cv=KFold(n_splits=10),
verbosity=0,
output_settings=settings)
my_pipe += PipelineElement('KMeans', hyperparameters={
'n_clusters': IntegerRange(2, 8),
},random_state=777)
# NOW TRAIN YOUR PIPELINE
my_pipe.fit(X, y)
debug = True
lab= my_pipe.predict(X)
colors = ['red','green','blue','purple']
fig = plt.figure(figsize=(8,8))
plt.scatter(X[:, 0], X[:, 1], s=50, c=lab
,cmap=matplotlib.colors.ListedColormap(colors) );
pd.DataFrame(my_pipe.best_config.items(),columns=['n_clusters', 'k'])
train=results_to_df(my_pipe.results.metrics_train)
train
test = results_to_df(my_pipe.results.metrics_test)
test
test-train
###Output
_____no_output_____ |
Research2Production/Python/08 Long Short-Term Memory.ipynb | ###Markdown
 Recurrent Neural NetworksRecurrent neural networks (RNN) are an extremely powerful tool in deep learning. These models quite accurately mimic how humans process information and learn. Unlike traditional feedforward neural networks, RNNs have memory. That is, information fed into them persists and the network is able to draw on this to make inferences. Long Short-term MemoryLong Short-term Memory (LSTM) is a type of recurrent neural network. Instead of one layer, LSTM cells generally have four, three of which are part of "gates" -- ways to optionally let information through. The three gates are commonly referred to as the forget, input, and output gates. The forget gate layer is where the model decides what information to keep from prior states. At the input gate layer, the model decides which values to update. Finally, the output gate layer is where the final output of the cell state is decided. Essentially, LSTM separately decides what to remember and the rate at which it should update. Financial ApplicationsLSTM models have produced some great results when applied to time-series prediction. One of the central challenges with conventional time-series models is that, despite trying to account for trends or other non-stationary elements, it is almost impossible to truly predict an outlier like a recession, flash crash, liquidity crisis, etc. By having a long memory, LSTM models are better able to capture these difficult trends in the data without suffering from the level of overfitting a conventional model would need in order to capture the same data.For a very basic application, we're going to use a LSTM model to predict the price movement, a non-stationary time-series, of SPY.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
# Import keras modules
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Sequential
qb = QuantBook()
symbol = qb.AddEquity("SPY").Symbol
# Fetch history
history = qb.History([symbol], 1280, Resolution.Daily)
# Fetch price
total_price = history.loc[symbol].close
training_price = history.loc[symbol].close[:1260]
test_price = history.loc[symbol].close[1260:]
# Transform price
price_array = np.array(training_price).reshape((len(training_price), 1))
# Scale data onto [0,1]
scaler = MinMaxScaler(feature_range = (0, 1))
# Transform our data
spy_training_scaled = scaler.fit_transform(price_array)
# Build feauture and label sets (using number of steps 60, batch size 1200, and hidden size 1)
features_set = []
labels = []
for i in range(60, 1260):
features_set.append(spy_training_scaled[i-60:i, 0])
labels.append(spy_training_scaled[i, 0])
features_set, labels = np.array(features_set), np.array(labels)
features_set = np.reshape(features_set, (features_set.shape[0], features_set.shape[1], 1))
features_set.shape
# Build a Sequential keras model
model = Sequential()
# Add our first LSTM layer - 50 nodes
model.add(LSTM(units = 50, return_sequences=True, input_shape=(features_set.shape[1], 1)))
# Add Dropout layer to avoid overfitting
model.add(Dropout(0.2))
# Add additional layers
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units = 1))
# Compile the model
model.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics=['mae', 'acc'])
# Fit the model to our data, running 50 training epochs
model.fit(features_set, labels, epochs = 50, batch_size = 32)
# Get and transform inputs for testing our predictions
test_inputs = total_price[-80:].values
test_inputs = test_inputs.reshape(-1,1)
test_inputs = scaler.transform(test_inputs)
# Get test features
test_features = []
for i in range(60, 80):
test_features.append(test_inputs[i-60:i, 0])
test_features = np.array(test_features)
test_features = np.reshape(test_features, (test_features.shape[0], test_features.shape[1], 1))
# Make predictions
predictions = model.predict(test_features)
# Transform predictions back to original data-scale
predictions = scaler.inverse_transform(predictions)
# Plot our results!
plt.figure(figsize=(10,6))
plt.plot(test_price.values, color='blue', label='Actual')
plt.plot(predictions , color='red', label='Prediction')
plt.title('Price vs Predicted Price ')
plt.legend()
plt.show()
###Output
_____no_output_____ |
Workshops/04. spark.ipynb | ###Markdown
Workshop 2Learning pysparkGetting familliar with spark's functions --- Installation1. install docker2. docker pull jupyter/all-spark-notebook3. docker run -d --name notebook -p 10000:8888 -e JUPYTER_ENABLE_LAB=yes -v ~/Development/DockerWorkspace:/home/jovyan/work jupyter/all-spark-notebook --- Importing pyspark
###Code
from pyspark import SparkContext
from pyspark.sql import SparkSession
sc = SparkContext()
spark = SparkSession(sc)
# run this cell only once
###Output
_____no_output_____
###Markdown
--- Checking version
###Code
sc.version
###Output
_____no_output_____
###Markdown
--- Spark's RDD
###Code
a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
rdd1 = sc.parallelize(a)
print("\n", rdd1)
###Output
ParallelCollectionRDD[162] at parallelize at PythonRDD.scala:195
###Markdown
--- first(), collect(), count(), take(), max()
###Code
print("\n", rdd1.first())
print("\n", rdd1.collect())
print("\n", rdd1.count())
print("\n", rdd1.take(2))
print("\n", rdd1.max())
###Output
0
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
10
[0, 1]
9
###Markdown
--- reduce()
###Code
def r(x, y):
return y
print("\n", rdd1.reduce(r))
###Output
9
###Markdown
--- map(), filter()
###Code
rdd2 = rdd1.map(lambda x: x * 2 - 10)
print("\n", rdd2.collect())
rdd3 = rdd1.filter(lambda x: x % 2 == 0)
print("\n", rdd3.collect())
###Output
[-10, -8, -6, -4, -2, 0, 2, 4, 6, 8]
[0, 2, 4, 6, 8]
###Markdown
--- flatMap()
###Code
rdd1 = sc.parallelize([(1, [0, 1, 2, 3]), (4, [6, 2, 1, 4, 3, 6]), (2, [0, 3])])
rdd1 = rdd1.flatMap(lambda x: x[1][:3])
print("\n", rdd1.collect())
###Output
[0, 1, 2, 6, 2, 1, 0, 3]
###Markdown
--- union(), intersection(), distinct()
###Code
rdd1 = sc.parallelize([0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9])
rdd2 = sc.parallelize([4, 4, 5, 5, 13, 13, 14, 14])
rdd3 = rdd1.union(rdd2)
print("\n", rdd3.collect())
rdd4 = rdd1.intersection(rdd2)
print("\n", rdd4.collect())
print("\n", rdd2.distinct().collect())
###Output
[0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 4, 4, 5, 5, 13, 13, 14, 14]
[4, 5]
[13, 14, 4, 5]
###Markdown
--- sortByKey(), reduceByKey(), groupByKey()
###Code
rdd1 = sc.parallelize([(2, "Pink Orange"), (1, "Green Apple"), (4, "Yellow Banana"), (2, "Red Orange"), (2, "Yellow Orange")])
rdd2 = rdd1.sortByKey(ascending=True)
print("\n", rdd2.collect())
rdd3 = rdd1.reduceByKey(lambda v1, v2: v1 + " " + v2)
print("\n", rdd3.collect())
rdd4 = rdd1.groupByKey()
print("\n", rdd4.collect())
print("\n", rdd4.map(lambda x: (x[0], list(x[1]))).collect())
###Output
[(1, 'Green Apple'), (2, 'Pink Orange'), (2, 'Red Orange'), (2, 'Yellow Orange'), (4, 'Yellow Banana')]
[(1, 'Green Apple'), (2, 'Pink Orange Red Orange Yellow Orange'), (4, 'Yellow Banana')]
[(1, <pyspark.resultiterable.ResultIterable object at 0x7f1d3514d350>), (2, <pyspark.resultiterable.ResultIterable object at 0x7f1d3514d2d0>), (4, <pyspark.resultiterable.ResultIterable object at 0x7f1d3514d450>)]
[(1, ['Green Apple']), (2, ['Pink Orange', 'Red Orange', 'Yellow Orange']), (4, ['Yellow Banana'])]
###Markdown
--- Multiple functions, then collect()
###Code
rdd1 = sc.parallelize(list(range(100)))
rdd1 = rdd1.map(lambda x: x * 2 - 10).filter(lambda x: x % 3).distinct()
print("\n", rdd1.count())
print("\n", rdd1.collect())
###Output
100
[-6, 0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96, 102, 108, 114, 120, 126, 132, 138, 144, 150, 156, 162, 168, 174, 180, 186, -10, -4, 2, 8, 14, 20, 26, 32, 38, 44, 50, 56, 62, 68, 74, 80, 86, 92, 98, 104, 110, 116, 122, 128, 134, 140, 146, 152, 158, 164, 170, 176, 182, 188, -8, -2, 4, 10, 16, 22, 28, 34, 40, 46, 52, 58, 64, 70, 76, 82, 88, 94, 100, 106, 112, 118, 124, 130, 136, 142, 148, 154, 160, 166, 172, 178, 184]
###Markdown
--- Reading from file
###Code
rdd1 = sc.textFile("text_test.txt")
print("\n", rdd1.collect())
print("\n", rdd1.map(lambda x: x[:2]).flatMap(lambda x: x).reduce(lambda x, y: x + '.' + y))
###Output
h.e.s.e.l.a.b.y
###Markdown
--- Reading from dataframe
###Code
columns = ["firstname", "middlename", "lastname", "year", "gender", "salary"]
data = [
('James', '', 'Smith', '1991-04-01', 'M', 3000),
('Michael', 'Rose', '', '2000-05-19', 'M', 4000),
('Robert', '', 'Williams', '1978-09-05', 'M', 4000),
('Maria', 'Anne', 'Jones', '1967-12-01', 'F', 4000),
('Jen', 'Mary', 'Brown', '1980-02-17', 'F', -1),
]
df = spark.createDataFrame(data=data, schema=columns)
df.show()
###Output
+---------+----------+--------+----------+------+------+
|firstname|middlename|lastname| year|gender|salary|
+---------+----------+--------+----------+------+------+
| James| | Smith|1991-04-01| M| 3000|
| Michael| Rose| |2000-05-19| M| 4000|
| Robert| |Williams|1978-09-05| M| 4000|
| Maria| Anne| Jones|1967-12-01| F| 4000|
| Jen| Mary| Brown|1980-02-17| F| -1|
+---------+----------+--------+----------+------+------+
###Markdown
--- Reading from csv
###Code
df = spark.read.option("header", True).csv("cities.csv")
df.show()
###Output
+-----+-------+-------+-----+-------+-------+-------+-----+------------------+--------+
| LatD| "LatM"| "LatS"| "NS"| "LonD"| "LonM"| "LonS"| "EW"| "City"| "State"|
+-----+-------+-------+-----+-------+-------+-------+-----+------------------+--------+
| 41| 5| 59| "N"| 80| 39| 0| "W"| "Youngstown"| OH|
| 42| 52| 48| "N"| 97| 23| 23| "W"| "Yankton"| SD|
| 46| 35| 59| "N"| 120| 30| 36| "W"| "Yakima"| WA|
| 42| 16| 12| "N"| 71| 48| 0| "W"| "Worcester"| MA|
| 43| 37| 48| "N"| 89| 46| 11| "W"| "Wisconsin Dells"| WI|
| 36| 5| 59| "N"| 80| 15| 0| "W"| "Winston-Salem"| NC|
| 49| 52| 48| "N"| 97| 9| 0| "W"| "Winnipeg"| MB|
| 39| 11| 23| "N"| 78| 9| 36| "W"| "Winchester"| VA|
| 34| 14| 24| "N"| 77| 55| 11| "W"| "Wilmington"| NC|
| 39| 45| 0| "N"| 75| 33| 0| "W"| "Wilmington"| DE|
| 48| 9| 0| "N"| 103| 37| 12| "W"| "Williston"| ND|
| 41| 15| 0| "N"| 77| 0| 0| "W"| "Williamsport"| PA|
| 37| 40| 48| "N"| 82| 16| 47| "W"| "Williamson"| WV|
| 33| 54| 0| "N"| 98| 29| 23| "W"| "Wichita Falls"| TX|
| 37| 41| 23| "N"| 97| 20| 23| "W"| "Wichita"| KS|
| 40| 4| 11| "N"| 80| 43| 12| "W"| "Wheeling"| WV|
| 26| 43| 11| "N"| 80| 3| 0| "W"| "West Palm Beach"| FL|
| 47| 25| 11| "N"| 120| 19| 11| "W"| "Wenatchee"| WA|
| 41| 25| 11| "N"| 122| 23| 23| "W"| "Weed"| CA|
| 31| 13| 11| "N"| 82| 20| 59| "W"| "Waycross"| GA|
+-----+-------+-------+-----+-------+-------+-------+-----+------------------+--------+
only showing top 20 rows
###Markdown
--- Running SQL queries
###Code
df.createOrReplaceTempView("CITY_DATA")
df2 = spark.sql('SELECT * from CITY_DATA')
df2.show()
###Output
+-----+-------+-------+-----+-------+-------+-------+-----+------------------+--------+
| LatD| "LatM"| "LatS"| "NS"| "LonD"| "LonM"| "LonS"| "EW"| "City"| "State"|
+-----+-------+-------+-----+-------+-------+-------+-----+------------------+--------+
| 41| 5| 59| "N"| 80| 39| 0| "W"| "Youngstown"| OH|
| 42| 52| 48| "N"| 97| 23| 23| "W"| "Yankton"| SD|
| 46| 35| 59| "N"| 120| 30| 36| "W"| "Yakima"| WA|
| 42| 16| 12| "N"| 71| 48| 0| "W"| "Worcester"| MA|
| 43| 37| 48| "N"| 89| 46| 11| "W"| "Wisconsin Dells"| WI|
| 36| 5| 59| "N"| 80| 15| 0| "W"| "Winston-Salem"| NC|
| 49| 52| 48| "N"| 97| 9| 0| "W"| "Winnipeg"| MB|
| 39| 11| 23| "N"| 78| 9| 36| "W"| "Winchester"| VA|
| 34| 14| 24| "N"| 77| 55| 11| "W"| "Wilmington"| NC|
| 39| 45| 0| "N"| 75| 33| 0| "W"| "Wilmington"| DE|
| 48| 9| 0| "N"| 103| 37| 12| "W"| "Williston"| ND|
| 41| 15| 0| "N"| 77| 0| 0| "W"| "Williamsport"| PA|
| 37| 40| 48| "N"| 82| 16| 47| "W"| "Williamson"| WV|
| 33| 54| 0| "N"| 98| 29| 23| "W"| "Wichita Falls"| TX|
| 37| 41| 23| "N"| 97| 20| 23| "W"| "Wichita"| KS|
| 40| 4| 11| "N"| 80| 43| 12| "W"| "Wheeling"| WV|
| 26| 43| 11| "N"| 80| 3| 0| "W"| "West Palm Beach"| FL|
| 47| 25| 11| "N"| 120| 19| 11| "W"| "Wenatchee"| WA|
| 41| 25| 11| "N"| 122| 23| 23| "W"| "Weed"| CA|
| 31| 13| 11| "N"| 82| 20| 59| "W"| "Waycross"| GA|
+-----+-------+-------+-----+-------+-------+-------+-----+------------------+--------+
only showing top 20 rows
|
matplotlib/gallery_jupyter/statistics/hist.ipynb | ###Markdown
HistogramsDemonstrates how to plot histograms with matplotlib.
###Code
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
# Fixing random state for reproducibility
np.random.seed(19680801)
###Output
_____no_output_____
###Markdown
Generate data and plot a simple histogram-----------------------------------------To generate a 1D histogram we only need a single vector of numbers. For a 2Dhistogram we'll need a second vector. We'll generate both below, and showthe histogram for each vector.
###Code
N_points = 100000
n_bins = 20
# Generate a normal distribution, center at x=0 and y=5
x = np.random.randn(N_points)
y = .4 * x + np.random.randn(100000) + 5
fig, axs = plt.subplots(1, 2, sharey=True, tight_layout=True)
# We can set the number of bins with the `bins` kwarg
axs[0].hist(x, bins=n_bins)
axs[1].hist(y, bins=n_bins)
###Output
_____no_output_____
###Markdown
Updating histogram colors-------------------------The histogram method returns (among other things) a ``patches`` object. Thisgives us access to the properties of the objects drawn. Using this, we canedit the histogram to our liking. Let's change the color of each barbased on its y value.
###Code
fig, axs = plt.subplots(1, 2, tight_layout=True)
# N is the count in each bin, bins is the lower-limit of the bin
N, bins, patches = axs[0].hist(x, bins=n_bins)
# We'll color code by height, but you could use any scalar
fracs = N / N.max()
# we need to normalize the data to 0..1 for the full range of the colormap
norm = colors.Normalize(fracs.min(), fracs.max())
# Now, we'll loop through our objects and set the color of each accordingly
for thisfrac, thispatch in zip(fracs, patches):
color = plt.cm.viridis(norm(thisfrac))
thispatch.set_facecolor(color)
# We can also normalize our inputs by the total number of counts
axs[1].hist(x, bins=n_bins, density=True)
# Now we format the y-axis to display percentage
axs[1].yaxis.set_major_formatter(PercentFormatter(xmax=1))
###Output
_____no_output_____
###Markdown
Plot a 2D histogram-------------------To plot a 2D histogram, one only needs two vectors of the same length,corresponding to each axis of the histogram.
###Code
fig, ax = plt.subplots(tight_layout=True)
hist = ax.hist2d(x, y)
###Output
_____no_output_____
###Markdown
Customizing your histogram--------------------------Customizing a 2D histogram is similar to the 1D case, you can controlvisual components such as the bin size or color normalization.
###Code
fig, axs = plt.subplots(3, 1, figsize=(5, 15), sharex=True, sharey=True,
tight_layout=True)
# We can increase the number of bins on each axis
axs[0].hist2d(x, y, bins=40)
# As well as define normalization of the colors
axs[1].hist2d(x, y, bins=40, norm=colors.LogNorm())
# We can also define custom numbers of bins for each axis
axs[2].hist2d(x, y, bins=(80, 10), norm=colors.LogNorm())
plt.show()
###Output
_____no_output_____ |
V10/Jupyter_Notebooks_For_Taxi/2_Taxi_Analysis/Prepared/3_Analysis_rides_Corona.ipynb | ###Markdown
Read all Data
###Code
df = spark.read.parquet(f"/taxi/dataset.parquet")
import pyspark.sql.functions as f
df.show(2)
data = (
df.groupBy("year", "month").count().orderBy("year", "month").withColumn("yyyy-mm", f.concat_ws("-", "year", "month"))
).toPandas()
data
data.plot(
x='yyyy-mm', y='count', figsize=(36, 6),
title='Rides in 2016',
legend=False,
kind='bar',
xlabel='Month',
ylabel='Rides'
)
###Output
_____no_output_____
###Markdown
Stopping Spark
###Code
spark.stop()
###Output
_____no_output_____ |
jupyter/pca/Group_Data_Analysis_PCA_10th_adding multiple params.ipynb | ###Markdown
Group_Data_Analysis_PCA_10th_adding multiple params* Version: '0.0.4'* Date: 2021-05-03* Author: Jea Kwon* Description: PCA analysis with multiple params 3D plot
###Code
from avatarpy import Avatar
import os
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import cufflinks as cf
from scipy.stats import zscore
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
cf.go_offline(connected=True)
root = r"C:\Users\Jay\Desktop\avatar_young_adult\data\best1_20210503"
avatars = dict(
wt=dict(
young=[],
adult=[],
),
ko=dict(
young=[],
adult=[],
)
)
for path, subdirs, files in os.walk(root):
for name in files:
if name.lower().endswith('.csv'):
csv_path = os.path.join(path, name)
age = os.path.basename(os.path.dirname(path))
genotype = os.path.basename(os.path.dirname(os.path.dirname(path)))
avatars[genotype][age].append(Avatar(csv_path=csv_path, ID=name))
###Output
_____no_output_____
###Markdown
Create walking event data Definition of walking- Moved more than 5 cm in 1 second(20=Frame)- More details take a look Group_Data_Analysis_PCA_1st_Trial Event Search function
###Code
def get_event_indices(boo, event_length):
"""Returns list of event indices.
ex) [(start 1, end 1), (start 2, end 2), (start 3, end 3), ..., (start N, end N)]
"""
indices = np.arange(len(boo))
condition = np.nonzero(boo[1:] != boo[:-1])[0] + 1
split_indices = np.split(indices, condition)
true_indices = split_indices[0::2] if boo[0] else split_indices[1::2]
event_indice_pair = [(idx[0]-event_length+1, idx[0]+1) for idx in true_indices]
return event_indice_pair
###Output
_____no_output_____
###Markdown
Features
###Code
wt_young_event_data = []
for avatar in avatars['wt']['young']:
boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array
event_indices = get_event_indices(boo, 20)
avatar_aoa = avatar.transform.align_on_axis()
avatar_aop = avatar.transform.align_on_plane()
for i, idx in enumerate(event_indices):
raw_coords = avatar.data.loc[avatar.index[idx[0]:idx[1]]]
aoa_coords = avatar_aoa.data.loc[avatar.index[idx[0]:idx[1]]]
aop_coords = avatar_aop.data.loc[avatar.index[idx[0]:idx[1]]]
velocity = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]]
acceleration = avatar.acceleration.loc[avatar.index[idx[0]:idx[1]]]
angle = avatar.angle.loc[avatar.index[idx[0]:idx[1]]]
angle_diff = avatar.angle.diff().loc[avatar.index[idx[0]:idx[1]]]
vector_length = avatar.vector_length.loc[avatar.index[idx[0]:idx[1]]]
acc_corr = acceleration.corr()
mask = np.triu(np.ones_like(acc_corr, dtype=bool), 1)
acc_corr = acc_corr.values.flatten()[mask.flatten()]
ang_corr = angle_diff.corr()
mask = np.triu(np.ones_like(ang_corr, dtype=bool), 1)
ang_corr = ang_corr.values.flatten()[mask.flatten()]
if raw_coords.shape[0]!=20:continue
# elif aoa_coords.shape[0]!=20:continue
# elif aop_coords.shape[0]!=20:continue
X1 = raw_coords.values.flatten()
X2 = aoa_coords.values.flatten()
X3 = aop_coords.values.flatten()
X4 = velocity.values.flatten()
X5 = acceleration.values.flatten()
X6 = angle.values.flatten()
X7 = angle_diff.values.flatten()
X8 = vector_length.values.flatten()
X9 = acc_corr
X10 = ang_corr
X = np.concatenate([X1,X2,X3,X4,X5,X6,X7,X8,X9,X10])
wt_young_event_data.append(X)
wt_young_event_data = np.stack(wt_young_event_data)
wt_adult_event_data = []
for avatar in avatars['wt']['adult']:
boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array
event_indices = get_event_indices(boo, 20)
avatar_aoa = avatar.transform.align_on_axis()
avatar_aop = avatar.transform.align_on_plane()
for i, idx in enumerate(event_indices):
raw_coords = avatar.data.loc[avatar.index[idx[0]:idx[1]]]
aoa_coords = avatar_aoa.data.loc[avatar.index[idx[0]:idx[1]]]
aop_coords = avatar_aop.data.loc[avatar.index[idx[0]:idx[1]]]
velocity = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]]
acceleration = avatar.acceleration.loc[avatar.index[idx[0]:idx[1]]]
angle = avatar.angle.loc[avatar.index[idx[0]:idx[1]]]
angle_diff = avatar.angle.diff().loc[avatar.index[idx[0]:idx[1]]]
vector_length = avatar.vector_length.loc[avatar.index[idx[0]:idx[1]]]
acc_corr = acceleration.corr()
mask = np.triu(np.ones_like(acc_corr, dtype=bool), 1)
acc_corr = acc_corr.values.flatten()[mask.flatten()]
ang_corr = angle_diff.corr()
mask = np.triu(np.ones_like(ang_corr, dtype=bool), 1)
ang_corr = ang_corr.values.flatten()[mask.flatten()]
if raw_coords.shape[0]!=20:continue
# elif aoa_coords.shape[0]!=20:continue
# elif aop_coords.shape[0]!=20:continue
X1 = raw_coords.values.flatten()
X2 = aoa_coords.values.flatten()
X3 = aop_coords.values.flatten()
X4 = velocity.values.flatten()
X5 = acceleration.values.flatten()
X6 = angle.values.flatten()
X7 = angle_diff.values.flatten()
X8 = vector_length.values.flatten()
X9 = acc_corr
X10 = ang_corr
X = np.concatenate([X1,X2,X3,X4,X5,X6,X7,X8,X9,X10])
wt_adult_event_data.append(X)
wt_adult_event_data = np.stack(wt_adult_event_data)
###Output
c:\users\jay\anaconda3\envs\avatar\lib\site-packages\avatarpy\core.py:22: RuntimeWarning:
invalid value encountered in true_divide
###Markdown
total 1857 events acquired from 5 wt young mice with 5 session. total 2248 events acquired from 5 wt adult mice with 5 session.
###Code
X = np.concatenate([wt_young_event_data, wt_adult_event_data])
X_ = StandardScaler().fit_transform(X)
X_[np.isnan(X_)] = 0
pca = PCA(n_components=3)
pc = pca.fit_transform(X_)
df = pd.DataFrame(pc,columns=['PC1','PC2', 'PC3'])
y = np.concatenate([np.zeros(wt_young_event_data.shape[0]), np.ones(wt_adult_event_data.shape[0])])
lbl = ['young']*wt_young_event_data.shape[0] + ['adult']*wt_adult_event_data.shape[0]
df['class'] = y
df['genotype'] = lbl
import plotly.express as px
fig = px.scatter_3d(df, x='PC1', y='PC2', z='PC3', color='genotype', opacity=0.5,
range_x=[-50, 50], range_y=[-50, 50], range_z=[-50, 50])
fig.update_traces(marker=dict(size=1))
fig.update_layout(scene_aspectmode='cube')
###Output
_____no_output_____ |
site/en-snapshot/tutorials/distribute/multi_worker_with_estimator.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Multi-worker training with Estimator View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook OverviewNote: While you can use Estimators with `tf.distribute` API, it's recommended to use Keras with `tf.distribute`, see [multi-worker training with Keras](multi_worker_with_keras.ipynb). Estimator training with `tf.distribute.Strategy` has limited support.This tutorial demonstrates how `tf.distribute.Strategy` can be used for distributed multi-worker training with `tf.estimator`. If you write your code using `tf.estimator`, and you're interested in scaling beyond a single machine with high performance, this tutorial is for you.Before getting started, please read the [distribution strategy](../../guide/distributed_training.ipynb) guide. The [multi-GPU training tutorial](./keras.ipynb) is also relevant, because this tutorial uses the same model. SetupFirst, setup TensorFlow and the necessary imports.
###Code
import tensorflow_datasets as tfds
import tensorflow as tf
tfds.disable_progress_bar()
import os, json
###Output
_____no_output_____
###Markdown
Input functionThis tutorial uses the MNIST dataset from [TensorFlow Datasets](https://www.tensorflow.org/datasets). The code here is similar to the [multi-GPU training tutorial](./keras.ipynb) with one key difference: when using Estimator for multi-worker training, it is necessary to shard the dataset by the number of workers to ensure model convergence. The input data is sharded by worker index, so that each worker processes `1/num_workers` distinct portions of the dataset.
###Code
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def input_fn(mode, input_context=None):
datasets, info = tfds.load(name='mnist',
with_info=True,
as_supervised=True)
mnist_dataset = (datasets['train'] if mode == tf.estimator.ModeKeys.TRAIN else
datasets['test'])
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
if input_context:
mnist_dataset = mnist_dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
return mnist_dataset.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Another reasonable approach to achieve convergence would be to shuffle the dataset with distinct seeds at each worker. Multi-worker configurationOne of the key differences in this tutorial (compared to the [multi-GPU training tutorial](./keras.ipynb)) is the multi-worker setup. The `TF_CONFIG` environment variable is the standard way to specify the cluster configuration to each worker that is part of the cluster.There are two components of `TF_CONFIG`: `cluster` and `task`. `cluster` provides information about the entire cluster, namely the workers and parameter servers in the cluster. `task` provides information about the current task. The first component `cluster` is the same for all workers and parameter servers in the cluster, and the second component `task` is different on each worker and parameter server and specifies its own `type` and `index`. In this example, the task `type` is `worker` and the task `index` is `0`.For illustration purposes, this tutorial shows how to set a `TF_CONFIG` with 2 workers on `localhost`. In practice, you would create multiple workers on an external IP address and port, and set `TF_CONFIG` on each worker appropriately, i.e. modify the task `index`.Warning: *Do not execute the following code in Colab.* TensorFlow's runtime will attempt to create a gRPC server at the specified IP address and port, which will likely fail.```os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': 0}})``` Define the modelWrite the layers, the optimizer, and the loss function for training. This tutorial defines the model with Keras layers, similar to the [multi-GPU training tutorial](./keras.ipynb).
###Code
LEARNING_RATE = 1e-4
def model_fn(features, labels, mode):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
logits = model(features, training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(labels=labels, predictions=predictions)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels, logits)
loss = tf.reduce_sum(loss) * (1. / BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(
loss, tf.compat.v1.train.get_or_create_global_step()))
###Output
_____no_output_____
###Markdown
Note: Although the learning rate is fixed in this example, in general it may be necessary to adjust the learning rate based on the global batch size. MultiWorkerMirroredStrategyTo train the model, use an instance of `tf.distribute.experimental.MultiWorkerMirroredStrategy`. `MultiWorkerMirroredStrategy` creates copies of all variables in the model's layers on each device across all workers. It uses `CollectiveOps`, a TensorFlow op for collective communication, to aggregate gradients and keep the variables in sync. The [`tf.distribute.Strategy` guide](../../guide/distributed_training.ipynb) has more details about this strategy.
###Code
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
###Output
_____no_output_____
###Markdown
Train and evaluate the modelNext, specify the distribution strategy in the `RunConfig` for the estimator, and train and evaluate by invoking `tf.estimator.train_and_evaluate`. This tutorial distributes only the training by specifying the strategy via `train_distribute`. It is also possible to distribute the evaluation via `eval_distribute`.
###Code
config = tf.estimator.RunConfig(train_distribute=strategy)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir='/tmp/multiworker', config=config)
tf.estimator.train_and_evaluate(
classifier,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn)
)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Multi-worker training with Estimator View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook OverviewNote: While you can use Estimators with `tf.distribute` API, it's recommended to use Keras with `tf.distribute`, see [multi-worker training with Keras](multi_worker_with_keras.ipynb). Estimator training with `tf.distribute.Strategy` has limited support.This tutorial demonstrates how `tf.distribute.Strategy` can be used for distributed multi-worker training with `tf.estimator`. If you write your code using `tf.estimator`, and you're interested in scaling beyond a single machine with high performance, this tutorial is for you.Before getting started, please read the [distribution strategy](../../guide/distributed_training.ipynb) guide. The [multi-GPU training tutorial](./keras.ipynb) is also relevant, because this tutorial uses the same model. SetupFirst, setup TensorFlow and the necessary imports.
###Code
import tensorflow_datasets as tfds
import tensorflow as tf
tfds.disable_progress_bar()
import os, json
###Output
_____no_output_____
###Markdown
Input functionThis tutorial uses the MNIST dataset from [TensorFlow Datasets](https://www.tensorflow.org/datasets). The code here is similar to the [multi-GPU training tutorial](./keras.ipynb) with one key difference: when using Estimator for multi-worker training, it is necessary to shard the dataset by the number of workers to ensure model convergence. The input data is sharded by worker index, so that each worker processes `1/num_workers` distinct portions of the dataset.
###Code
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def input_fn(mode, input_context=None):
datasets, info = tfds.load(name='mnist',
with_info=True,
as_supervised=True)
mnist_dataset = (datasets['train'] if mode == tf.estimator.ModeKeys.TRAIN else
datasets['test'])
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
if input_context:
mnist_dataset = mnist_dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
return mnist_dataset.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Another reasonable approach to achieve convergence would be to shuffle the dataset with distinct seeds at each worker. Multi-worker configurationOne of the key differences in this tutorial (compared to the [multi-GPU training tutorial](./keras.ipynb)) is the multi-worker setup. The `TF_CONFIG` environment variable is the standard way to specify the cluster configuration to each worker that is part of the cluster.There are two components of `TF_CONFIG`: `cluster` and `task`. `cluster` provides information about the entire cluster, namely the workers and parameter servers in the cluster. `task` provides information about the current task. The first component `cluster` is the same for all workers and parameter servers in the cluster, and the second component `task` is different on each worker and parameter server and specifies its own `type` and `index`. In this example, the task `type` is `worker` and the task `index` is `0`.For illustration purposes, this tutorial shows how to set a `TF_CONFIG` with 2 workers on `localhost`. In practice, you would create multiple workers on an external IP address and port, and set `TF_CONFIG` on each worker appropriately, i.e. modify the task `index`.Warning: *Do not execute the following code in Colab.* TensorFlow's runtime will attempt to create a gRPC server at the specified IP address and port, which will likely fail.```os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': 0}})``` Define the modelWrite the layers, the optimizer, and the loss function for training. This tutorial defines the model with Keras layers, similar to the [multi-GPU training tutorial](./keras.ipynb).
###Code
LEARNING_RATE = 1e-4
def model_fn(features, labels, mode):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
logits = model(features, training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(labels=labels, predictions=predictions)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels, logits)
loss = tf.reduce_sum(loss) * (1. / BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(
loss, tf.compat.v1.train.get_or_create_global_step()))
###Output
_____no_output_____
###Markdown
Note: Although the learning rate is fixed in this example, in general it may be necessary to adjust the learning rate based on the global batch size. MultiWorkerMirroredStrategyTo train the model, use an instance of `tf.distribute.experimental.MultiWorkerMirroredStrategy`. `MultiWorkerMirroredStrategy` creates copies of all variables in the model's layers on each device across all workers. It uses `CollectiveOps`, a TensorFlow op for collective communication, to aggregate gradients and keep the variables in sync. The [`tf.distribute.Strategy` guide](../../guide/distributed_training.ipynb) has more details about this strategy.
###Code
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
###Output
_____no_output_____
###Markdown
Train and evaluate the modelNext, specify the distribution strategy in the `RunConfig` for the estimator, and train and evaluate by invoking `tf.estimator.train_and_evaluate`. This tutorial distributes only the training by specifying the strategy via `train_distribute`. It is also possible to distribute the evaluation via `eval_distribute`.
###Code
config = tf.estimator.RunConfig(train_distribute=strategy)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir='/tmp/multiworker', config=config)
tf.estimator.train_and_evaluate(
classifier,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn)
)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Multi-worker training with Estimator View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook OverviewNote: While you can use Estimators with `tf.distribute` API, it's recommended to use Keras with `tf.distribute`, see [multi-worker training with Keras](multi_worker_with_keras.ipynb). Estimator training with `tf.distribute.Strategy` has limited support.This tutorial demonstrates how `tf.distribute.Strategy` can be used for distributed multi-worker training with `tf.estimator`. If you write your code using `tf.estimator`, and you're interested in scaling beyond a single machine with high performance, this tutorial is for you.Before getting started, please read the [distribution strategy](../../guide/distributed_training.ipynb) guide. The [multi-GPU training tutorial](./keras.ipynb) is also relevant, because this tutorial uses the same model. SetupFirst, setup TensorFlow and the necessary imports.
###Code
import tensorflow_datasets as tfds
import tensorflow as tf
import os, json
###Output
_____no_output_____
###Markdown
Note: Starting from TF2.4 multi worker mirrored strategy fails with estimators if run with eager enabled (the default). The error in TF2.4 is `TypeError: cannot pickle '_thread.lock' object`, See [issue 46556](https://github.com/tensorflow/tensorflow/issues/46556) for details. The workaround is to disable eager execution.
###Code
tf.compat.v1.disable_eager_execution()
###Output
_____no_output_____
###Markdown
Input functionThis tutorial uses the MNIST dataset from [TensorFlow Datasets](https://www.tensorflow.org/datasets). The code here is similar to the [multi-GPU training tutorial](./keras.ipynb) with one key difference: when using Estimator for multi-worker training, it is necessary to shard the dataset by the number of workers to ensure model convergence. The input data is sharded by worker index, so that each worker processes `1/num_workers` distinct portions of the dataset.
###Code
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def input_fn(mode, input_context=None):
datasets, info = tfds.load(name='mnist',
with_info=True,
as_supervised=True)
mnist_dataset = (datasets['train'] if mode == tf.estimator.ModeKeys.TRAIN else
datasets['test'])
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
if input_context:
mnist_dataset = mnist_dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
return mnist_dataset.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Another reasonable approach to achieve convergence would be to shuffle the dataset with distinct seeds at each worker. Multi-worker configurationOne of the key differences in this tutorial (compared to the [multi-GPU training tutorial](./keras.ipynb)) is the multi-worker setup. The `TF_CONFIG` environment variable is the standard way to specify the cluster configuration to each worker that is part of the cluster.There are two components of `TF_CONFIG`: `cluster` and `task`. `cluster` provides information about the entire cluster, namely the workers and parameter servers in the cluster. `task` provides information about the current task. The first component `cluster` is the same for all workers and parameter servers in the cluster, and the second component `task` is different on each worker and parameter server and specifies its own `type` and `index`. In this example, the task `type` is `worker` and the task `index` is `0`.For illustration purposes, this tutorial shows how to set a `TF_CONFIG` with 2 workers on `localhost`. In practice, you would create multiple workers on an external IP address and port, and set `TF_CONFIG` on each worker appropriately, i.e. modify the task `index`.Warning: *Do not execute the following code in Colab.* TensorFlow's runtime will attempt to create a gRPC server at the specified IP address and port, which will likely fail. See the [keras version](multi_worker_with_keras.ipynb) of this tutorial for an example of how you can test run multiple workers on a single machine.```os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': 0}})``` Define the modelWrite the layers, the optimizer, and the loss function for training. This tutorial defines the model with Keras layers, similar to the [multi-GPU training tutorial](./keras.ipynb).
###Code
LEARNING_RATE = 1e-4
def model_fn(features, labels, mode):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
logits = model(features, training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(labels=labels, predictions=predictions)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels, logits)
loss = tf.reduce_sum(loss) * (1. / BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(
loss, tf.compat.v1.train.get_or_create_global_step()))
###Output
_____no_output_____
###Markdown
Note: Although the learning rate is fixed in this example, in general it may be necessary to adjust the learning rate based on the global batch size. MultiWorkerMirroredStrategyTo train the model, use an instance of `tf.distribute.experimental.MultiWorkerMirroredStrategy`. `MultiWorkerMirroredStrategy` creates copies of all variables in the model's layers on each device across all workers. It uses `CollectiveOps`, a TensorFlow op for collective communication, to aggregate gradients and keep the variables in sync. The [`tf.distribute.Strategy` guide](../../guide/distributed_training.ipynb) has more details about this strategy.
###Code
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
###Output
_____no_output_____
###Markdown
Train and evaluate the modelNext, specify the distribution strategy in the `RunConfig` for the estimator, and train and evaluate by invoking `tf.estimator.train_and_evaluate`. This tutorial distributes only the training by specifying the strategy via `train_distribute`. It is also possible to distribute the evaluation via `eval_distribute`.
###Code
config = tf.estimator.RunConfig(train_distribute=strategy)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir='/tmp/multiworker', config=config)
tf.estimator.train_and_evaluate(
classifier,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn)
)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Multi-worker training with Estimator View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook > Warning: Estimators are not recommended for new code. Estimators run `v1.Session`-style code which is more difficult to write correctly, and can behave unexpectedly, especially when combined with TF 2 code. Estimators do fall under [compatibility guarantees](https://tensorflow.org/guide/versions), but will receive no fixes other than security vulnerabilities. See the [migration guide](https://tensorflow.org/guide/migrate) for details. OverviewNote: While you can use Estimators with `tf.distribute` API, it's recommended to use Keras with `tf.distribute`, see [multi-worker training with Keras](multi_worker_with_keras.ipynb). Estimator training with `tf.distribute.Strategy` has limited support.This tutorial demonstrates how `tf.distribute.Strategy` can be used for distributed multi-worker training with `tf.estimator`. If you write your code using `tf.estimator`, and you're interested in scaling beyond a single machine with high performance, this tutorial is for you.Before getting started, please read the [distribution strategy](../../guide/distributed_training.ipynb) guide. The [multi-GPU training tutorial](./keras.ipynb) is also relevant, because this tutorial uses the same model. SetupFirst, setup TensorFlow and the necessary imports.
###Code
import tensorflow_datasets as tfds
import tensorflow as tf
import os, json
###Output
_____no_output_____
###Markdown
Note: Starting from TF2.4 multi worker mirrored strategy fails with estimators if run with eager enabled (the default). The error in TF2.4 is `TypeError: cannot pickle '_thread.lock' object`, See [issue 46556](https://github.com/tensorflow/tensorflow/issues/46556) for details. The workaround is to disable eager execution.
###Code
tf.compat.v1.disable_eager_execution()
###Output
_____no_output_____
###Markdown
Input functionThis tutorial uses the MNIST dataset from [TensorFlow Datasets](https://www.tensorflow.org/datasets). The code here is similar to the [multi-GPU training tutorial](./keras.ipynb) with one key difference: when using Estimator for multi-worker training, it is necessary to shard the dataset by the number of workers to ensure model convergence. The input data is sharded by worker index, so that each worker processes `1/num_workers` distinct portions of the dataset.
###Code
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def input_fn(mode, input_context=None):
datasets, info = tfds.load(name='mnist',
with_info=True,
as_supervised=True)
mnist_dataset = (datasets['train'] if mode == tf.estimator.ModeKeys.TRAIN else
datasets['test'])
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
if input_context:
mnist_dataset = mnist_dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
return mnist_dataset.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Another reasonable approach to achieve convergence would be to shuffle the dataset with distinct seeds at each worker. Multi-worker configurationOne of the key differences in this tutorial (compared to the [multi-GPU training tutorial](./keras.ipynb)) is the multi-worker setup. The `TF_CONFIG` environment variable is the standard way to specify the cluster configuration to each worker that is part of the cluster.There are two components of `TF_CONFIG`: `cluster` and `task`. `cluster` provides information about the entire cluster, namely the workers and parameter servers in the cluster. `task` provides information about the current task. The first component `cluster` is the same for all workers and parameter servers in the cluster, and the second component `task` is different on each worker and parameter server and specifies its own `type` and `index`. In this example, the task `type` is `worker` and the task `index` is `0`.For illustration purposes, this tutorial shows how to set a `TF_CONFIG` with 2 workers on `localhost`. In practice, you would create multiple workers on an external IP address and port, and set `TF_CONFIG` on each worker appropriately, i.e. modify the task `index`.Warning: *Do not execute the following code in Colab.* TensorFlow's runtime will attempt to create a gRPC server at the specified IP address and port, which will likely fail. See the [keras version](multi_worker_with_keras.ipynb) of this tutorial for an example of how you can test run multiple workers on a single machine.```os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': 0}})``` Define the modelWrite the layers, the optimizer, and the loss function for training. This tutorial defines the model with Keras layers, similar to the [multi-GPU training tutorial](./keras.ipynb).
###Code
LEARNING_RATE = 1e-4
def model_fn(features, labels, mode):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
logits = model(features, training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(labels=labels, predictions=predictions)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels, logits)
loss = tf.reduce_sum(loss) * (1. / BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(
loss, tf.compat.v1.train.get_or_create_global_step()))
###Output
_____no_output_____
###Markdown
Note: Although the learning rate is fixed in this example, in general it may be necessary to adjust the learning rate based on the global batch size. MultiWorkerMirroredStrategyTo train the model, use an instance of `tf.distribute.experimental.MultiWorkerMirroredStrategy`. `MultiWorkerMirroredStrategy` creates copies of all variables in the model's layers on each device across all workers. It uses `CollectiveOps`, a TensorFlow op for collective communication, to aggregate gradients and keep the variables in sync. The [`tf.distribute.Strategy` guide](../../guide/distributed_training.ipynb) has more details about this strategy.
###Code
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
###Output
_____no_output_____
###Markdown
Train and evaluate the modelNext, specify the distribution strategy in the `RunConfig` for the estimator, and train and evaluate by invoking `tf.estimator.train_and_evaluate`. This tutorial distributes only the training by specifying the strategy via `train_distribute`. It is also possible to distribute the evaluation via `eval_distribute`.
###Code
config = tf.estimator.RunConfig(train_distribute=strategy)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir='/tmp/multiworker', config=config)
tf.estimator.train_and_evaluate(
classifier,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn)
)
###Output
_____no_output_____ |
Sample-Lab/blazing_text_lab.ipynb | ###Markdown
IntroductionText Classification can be used to solve various use-cases like sentiment analysis, spam detection, hashtag prediction etc. This notebook demonstrates the use of SageMaker BlazingText to perform supervised binary/multi class with single or multi label text classification. BlazingText can train the model on more than a billion words in a couple of minutes using a multi-core CPU or a GPU, while achieving performance on par with the state-of-the-art deep learning text classification algorithms. BlazingText extends the fastText text classifier to leverage GPU acceleration using custom CUDA kernels. Initialize Your ResourcesSageMaker needs unique training jobs to run, and we as the users need to be able to see our job! So here we'll provide our name once, and use that to track our resources throughout the lab.
###Code
YOUR_NAME = 'first-last'
import sagemaker
from sagemaker import get_execution_role
import json
import boto3
sess = sagemaker.Session()
role = get_execution_role()
print(role) # This is the role that SageMaker would use to leverage AWS resources (S3, CloudWatch) on your behalf
bucket = sess.default_bucket() # Replace with your own bucket name if needed
print(bucket)
prefix = '{}/blazingtext/supervised'.format(YOUR_NAME) #Replace with the prefix under which you want to store the data if needed
###Output
_____no_output_____
###Markdown
Data PreparationNow we'll download a dataset from the web on which we want to train the text classification model. BlazingText expects a single preprocessed text file with space separated tokens and each line of the file should contain a single sentence and the corresponding label(s) prefixed by "\__label\__".In this example, let us train the text classification model on the [DBPedia Ontology Dataset](https://wiki.dbpedia.org/services-resources/dbpedia-data-set-20142) as done by [Zhang et al](https://arxiv.org/pdf/1509.01626.pdf). The DBpedia ontology dataset is constructed by picking 14 nonoverlapping classes from DBpedia 2014. It has 560,000 training samples and 70,000 testing samples. The fields we used for this dataset contain title and abstract of each Wikipedia article.
###Code
!wget https://github.com/saurabh3949/Text-Classification-Datasets/raw/master/dbpedia_csv.tar.gz
!tar -xzvf dbpedia_csv.tar.gz
###Output
_____no_output_____
###Markdown
Let us inspect the dataset and the classes to get some understanding about how the data and the label is provided in the dataset.
###Code
!head dbpedia_csv/train.csv -n 3
###Output
_____no_output_____
###Markdown
As can be seen from the above output, the CSV has 3 fields - Label index, title and abstract. Let us first create a label index to label name mapping and then proceed to preprocess the dataset for ingestion by BlazingText. Next we will print the labels file (`classes.txt`) to see all possible labels followed by creating an index to label mapping.
###Code
!cat dbpedia_csv/classes.txt
###Output
_____no_output_____
###Markdown
The following code creates the mapping from integer indices to class label which will later be used to retrieve the actual class name during inference.
###Code
index_to_label = {}
with open("dbpedia_csv/classes.txt") as f:
for i,label in enumerate(f.readlines()):
index_to_label[str(i+1)] = label.strip()
print(index_to_label)
###Output
_____no_output_____
###Markdown
Data PreprocessingWe need to preprocess the training data into **space separated tokenized text** format which can be consumed by `BlazingText` algorithm. Also, as mentioned previously, the class label(s) should be prefixed with `__label__` and it should be present in the same line along with the original sentence. We'll use `nltk` library to tokenize the input sentences from DBPedia dataset. Download the nltk tokenizer and other libraries
###Code
from random import shuffle
import multiprocessing
from multiprocessing import Pool
import csv
import nltk
nltk.download('punkt')
def transform_instance(row):
cur_row = []
label = "__label__" + index_to_label[row[0]] #Prefix the index-ed label with __label__
cur_row.append(label)
cur_row.extend(nltk.word_tokenize(row[1].lower()))
cur_row.extend(nltk.word_tokenize(row[2].lower()))
return cur_row
###Output
_____no_output_____
###Markdown
The `transform_instance` will be applied to each data instance in parallel using python's multiprocessing module
###Code
def preprocess(input_file, output_file, keep=1):
all_rows = []
with open(input_file, 'r') as csvinfile:
csv_reader = csv.reader(csvinfile, delimiter=',')
for row in csv_reader:
all_rows.append(row)
shuffle(all_rows)
all_rows = all_rows[:int(keep*len(all_rows))]
pool = Pool(processes=multiprocessing.cpu_count())
transformed_rows = pool.map(transform_instance, all_rows)
pool.close()
pool.join()
with open(output_file, 'w') as csvoutfile:
csv_writer = csv.writer(csvoutfile, delimiter=' ', lineterminator='\n')
csv_writer.writerows(transformed_rows)
%%time
# Preparing the training dataset
# Since preprocessing the whole dataset might take a couple of mintutes,
# we keep 20% of the training dataset for this demo.
# Set keep to 1 if you want to use the complete dataset
preprocess('dbpedia_csv/train.csv', 'dbpedia.train', keep=.2)
# Preparing the validation dataset
preprocess('dbpedia_csv/test.csv', 'dbpedia.validation')
###Output
_____no_output_____
###Markdown
The data preprocessing cell might take a minute to run. After the data preprocessing is complete, we need to upload it to S3 so that it can be consumed by SageMaker to execute training jobs. We'll use Python SDK to upload these two files to the bucket and prefix location that we have set above.
###Code
%%time
train_channel = prefix + '/train'
validation_channel = prefix + '/validation'
sess.upload_data(path='dbpedia.train', bucket=bucket, key_prefix=train_channel)
sess.upload_data(path='dbpedia.validation', bucket=bucket, key_prefix=validation_channel)
s3_train_data = 's3://{}/{}'.format(bucket, train_channel)
s3_validation_data = 's3://{}/{}'.format(bucket, validation_channel)
###Output
_____no_output_____
###Markdown
Next we need to setup an output location at S3, where the model artifact will be dumped. These artifacts are also the output of the algorithm's traning job.
###Code
s3_output_location = 's3://{}/{}/output'.format(bucket, prefix)
###Output
_____no_output_____
###Markdown
TrainingNow that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This estimator will launch the training job.
###Code
region_name = boto3.Session().region_name
container = sagemaker.amazon.amazon_estimator.get_image_uri(region_name, "blazingtext", "latest")
print('Using SageMaker BlazingText container: {} ({})'.format(container, region_name))
###Output
_____no_output_____
###Markdown
Training the BlazingText model for supervised text classification Similar to the original implementation of [Word2Vec](https://arxiv.org/pdf/1301.3781.pdf), SageMaker BlazingText provides an efficient implementation of the continuous bag-of-words (CBOW) and skip-gram architectures using Negative Sampling, on CPUs and additionally on GPU[s]. The GPU implementation uses highly optimized CUDA kernels. To learn more, please refer to [*BlazingText: Scaling and Accelerating Word2Vec using Multiple GPUs*](https://dl.acm.org/citation.cfm?doid=3146347.3146354). Besides skip-gram and CBOW, SageMaker BlazingText also supports the "Batch Skipgram" mode, which uses efficient mini-batching and matrix-matrix operations ([BLAS Level 3 routines](https://software.intel.com/en-us/mkl-developer-reference-fortran-blas-level-3-routines)). This mode enables distributed word2vec training across multiple CPU nodes, allowing almost linear scale up of word2vec computation to process hundreds of millions of words per second. Please refer to [*Parallelizing Word2Vec in Shared and Distributed Memory*](https://arxiv.org/pdf/1604.04661.pdf) to learn more. BlazingText also supports a *supervised* mode for text classification. It extends the FastText text classifier to leverage GPU acceleration using custom CUDA kernels. The model can be trained on more than a billion words in a couple of minutes using a multi-core CPU or a GPU, while achieving performance on par with the state-of-the-art deep learning text classification algorithms. For more information, please refer to the [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext.html). To summarize, the following modes are supported by BlazingText on different types instances:| Modes | cbow (supports subwords training) | skipgram (supports subwords training) | batch_skipgram | supervised ||:----------------------: |:----: |:--------: |:--------------: | :--------------: || Single CPU instance | ✔ | ✔ | ✔ | ✔ || Single GPU instance | ✔ | ✔ | | ✔ (Instance with 1 GPU only) || Multiple CPU instances | | | ✔ | | |Now, let's define the SageMaker `Estimator` with resource configurations and hyperparameters to train Text Classification on *DBPedia* dataset, using "supervised" mode on a `c4.4xlarge` instance.
###Code
bt_model = sagemaker.estimator.Estimator(container,
role,
base_job_name = YOUR_NAME,
train_instance_count=1,
train_instance_type='ml.c4.4xlarge',
train_volume_size = 30,
train_max_run = 360000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sess)
###Output
_____no_output_____
###Markdown
Please refer to [algorithm documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext_hyperparameters.html) for the complete list of hyperparameters.
###Code
bt_model.set_hyperparameters(mode="supervised",
epochs=10,
min_count=2,
learning_rate=0.05,
vector_dim=10,
early_stopping=True,
patience=4,
min_epochs=5,
word_ngrams=2)
###Output
_____no_output_____
###Markdown
Now that the hyper-parameters are setup, let us prepare the handshake between our data channels and the algorithm. To do this, we need to create the `sagemaker.session.s3_input` objects from our data channels. These objects are then put in a simple dictionary, which the algorithm consumes.
###Code
train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
###Output
_____no_output_____
###Markdown
We have our `Estimator` object, we have set the hyper-parameters for this object and we have our data channels linked with the algorithm. The only remaining thing to do is to train the algorithm. The following command will train the algorithm. Training the algorithm involves a few steps. Firstly, the instance that we requested while creating the `Estimator` classes is provisioned and is setup with the appropriate libraries. Then, the data from our channels are downloaded into the instance. Once this is done, the training job begins. The provisioning and data downloading will take some time, depending on the size of the data. Therefore it might be a few minutes before we start getting training logs for our training jobs. The data logs will also print out Accuracy on the validation data for every epoch after training job has executed `min_epochs`. This metric is a proxy for the quality of the algorithm. Once the job has finished a "Job complete" message will be printed. The trained model can be found in the S3 bucket that was setup as `output_path` in the estimator.
###Code
bt_model.fit(inputs=data_channels, logs=True)
###Output
_____no_output_____
###Markdown
Hosting / InferenceOnce the training is done, we can deploy the trained model as an Amazon SageMaker real-time hosted endpoint. This will allow us to make predictions (or inference) from the model. Note that we don't have to host on the same type of instance that we used to train. Because instance endpoints will be up and running for long, it's advisable to choose a cheaper instance for inference.
###Code
text_classifier = bt_model.deploy(initial_instance_count = 1,instance_type = 'ml.m4.xlarge')
###Output
_____no_output_____
###Markdown
Use JSON format for inferenceBlazingText supports `application/json` as the content-type for inference. The payload should contain a list of sentences with the key as "**instances**" while being passed to the endpoint.
###Code
sentences = ["Convair was an american aircraft manufacturing company which later expanded into rockets and spacecraft.",
"Berwick secondary college is situated in the outer melbourne metropolitan suburb of berwick ."]
# using the same nltk tokenizer that we used during data preparation for training
tokenized_sentences = [' '.join(nltk.word_tokenize(sent)) for sent in sentences]
payload = {"instances" : tokenized_sentences}
response = text_classifier.predict(json.dumps(payload))
predictions = json.loads(response)
print(json.dumps(predictions, indent=2))
###Output
_____no_output_____
###Markdown
By default, the model will return only one prediction, the one with the highest probability. For retrieving the top k predictions, you can set `k` in the configuration as shown below:
###Code
payload = {"instances" : tokenized_sentences,
"configuration": {"k": 2}}
response = text_classifier.predict(json.dumps(payload))
predictions = json.loads(response)
print(json.dumps(predictions, indent=2))
###Output
_____no_output_____
###Markdown
Stop / Close the Endpoint (Optional)Finally, we should delete the endpoint before we close the notebook if we don't need to keep the endpoint running for serving realtime predictions.
###Code
# sess.delete_endpoint(text_classifier.endpoint)
###Output
_____no_output_____ |
database/tasks/How to create a QQ-plot/Python, using statsmodels.ipynb | ###Markdown
---author: Elizabeth Czarniak ([email protected])--- We're going to use some fake data here by generating random numbers, but you can replace our fake data with your real data in the code below.
###Code
# Replace this with your data, such as a variable or column in a DataFrame
import numpy as np
values = np.random.normal(0, 1, 50) # 50 random values
###Output
_____no_output_____
###Markdown
If the data is normally distributed, then we expect that the QQ plot will show the observed values (blue dots) falling very clsoe to the red line (the quantiles for the normal distribution).
###Code
import statsmodels.api as sm
import matplotlib.pyplot as plt
sm.qqplot(values, line = '45')
plt.show()
###Output
/opt/conda/lib/python3.9/site-packages/statsmodels/graphics/gofplots.py:993: UserWarning: marker is redundantly defined by the 'marker' keyword argument and the fmt string "bo" (-> marker='o'). The keyword argument will take precedence.
ax.plot(x, y, fmt, **plot_style)
|
tutorials/Image/05_conditional_operations.ipynb | ###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Relational, conditional and Boolean operationsTo perform per-pixel comparisons between images, use relational operators. To extract urbanized areas in an image, this example uses relational operators to threshold spectral indices, combining the thresholds with `And()`: Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.foliumap as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load a Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Create NDVI and NDWI spectral indices.
ndvi = image.normalizedDifference(['B5', 'B4'])
ndwi = image.normalizedDifference(['B3', 'B5'])
# Create a binary layer using logical operations.
bare = ndvi.lt(0.2).And(ndwi.lt(0))
# Mask and display the binary layer.
Map.setCenter(-122.3578, 37.7726, 12)
Map.addLayer(bare.updateMask(bare), {}, 'bare')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
As illustrated by this example, the output of relational and boolean operators is either True (1) or False (0). To mask the 0's, you can mask the resultant binary image with itself. The binary images that are returned by relational and boolean operators can be used with mathematical operators. This example creates zones of urbanization in a nighttime lights image using relational operators and `image.add()`:
###Code
Map = emap.Map()
# Load a 2012 nightlights image.
nl2012 = ee.Image('NOAA/DMSP-OLS/NIGHTTIME_LIGHTS/F182012')
lights = nl2012.select('stable_lights')
Map.addLayer(lights, {}, 'Nighttime lights')
# Define arbitrary thresholds on the 6-bit stable lights band.
zones = lights.gt(30).add(lights.gt(55)).add(lights.gt(62))
# Display the thresholded image as three distinct zones near Paris.
palette = ['000000', '0000FF', '00FF00', 'FF0000']
Map.setCenter(2.373, 48.8683, 8)
Map.addLayer(zones, {'min': 0, 'max': 3, 'palette': palette}, 'development zones')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Note that the code in the previous example is equivalent to using a [ternary operator](http://en.wikipedia.org/wiki/%3F:) implemented by `expression()`:
###Code
Map = emap.Map()
# Create zones using an expression, display.
zonesExp = nl2012.expression(
"(b('stable_lights') > 62) ? 3" +
": (b('stable_lights') > 55) ? 2" +
": (b('stable_lights') > 30) ? 1" +
": 0"
)
Map.addLayer(zonesExp,
{'min': 0, 'max': 3, 'palette': palette},
'development zones (ternary)')
Map.setCenter(2.373, 48.8683, 8)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Observe that in the previous expression example, the band of interest is referenced using the`b()` function, rather than a dictionary of variable names. (Learn more about image expressions on [this page](https://developers.google.com/earth-engine/image_mathexpressions). Using either mathematical operators or an expression, the output is the same and should look something like Figure 2.Another way to implement conditional operations on images is with the `image.where()` operator. Consider the need to replace masked pixels with some other data. In the following example, cloudy pixels are replaced by pixels from a cloud-free image using `where()`:
###Code
Map = emap.Map()
# Load a cloudy Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603')
Map.addLayer(image,
{'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5},
'original image')
# Load another image to replace the cloudy pixels.
replacement = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416')
# Compute a cloud score band.
cloud = ee.Algorithms.Landsat.simpleCloudScore(image).select('cloud')
# Set cloudy pixels to the other image.
replaced = image.where(cloud.gt(10), replacement)
# Display the result.
Map.centerObject(image, 9)
Map.addLayer(replaced,
{'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5},
'clouds replaced')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Relational, conditional and Boolean operationsTo perform per-pixel comparisons between images, use relational operators. To extract urbanized areas in an image, this example uses relational operators to threshold spectral indices, combining the thresholds with `And()`: Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load a Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Create NDVI and NDWI spectral indices.
ndvi = image.normalizedDifference(['B5', 'B4'])
ndwi = image.normalizedDifference(['B3', 'B5'])
# Create a binary layer using logical operations.
bare = ndvi.lt(0.2).And(ndwi.lt(0))
# Mask and display the binary layer.
Map.setCenter(-122.3578, 37.7726, 12)
Map.addLayer(bare.updateMask(bare), {}, 'bare')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
As illustrated by this example, the output of relational and boolean operators is either True (1) or False (0). To mask the 0's, you can mask the resultant binary image with itself. The binary images that are returned by relational and boolean operators can be used with mathematical operators. This example creates zones of urbanization in a nighttime lights image using relational operators and `image.add()`:
###Code
Map = emap.Map()
# Load a 2012 nightlights image.
nl2012 = ee.Image('NOAA/DMSP-OLS/NIGHTTIME_LIGHTS/F182012')
lights = nl2012.select('stable_lights')
Map.addLayer(lights, {}, 'Nighttime lights')
# Define arbitrary thresholds on the 6-bit stable lights band.
zones = lights.gt(30).add(lights.gt(55)).add(lights.gt(62))
# Display the thresholded image as three distinct zones near Paris.
palette = ['000000', '0000FF', '00FF00', 'FF0000']
Map.setCenter(2.373, 48.8683, 8)
Map.addLayer(zones, {'min': 0, 'max': 3, 'palette': palette}, 'development zones')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Note that the code in the previous example is equivalent to using a [ternary operator](http://en.wikipedia.org/wiki/%3F:) implemented by `expression()`:
###Code
Map = emap.Map()
# Create zones using an expression, display.
zonesExp = nl2012.expression(
"(b('stable_lights') > 62) ? 3" +
": (b('stable_lights') > 55) ? 2" +
": (b('stable_lights') > 30) ? 1" +
": 0"
)
Map.addLayer(zonesExp,
{'min': 0, 'max': 3, 'palette': palette},
'development zones (ternary)')
Map.setCenter(2.373, 48.8683, 8)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Observe that in the previous expression example, the band of interest is referenced using the`b()` function, rather than a dictionary of variable names. (Learn more about image expressions on [this page](https://developers.google.com/earth-engine/image_mathexpressions). Using either mathematical operators or an expression, the output is the same and should look something like Figure 2.Another way to implement conditional operations on images is with the `image.where()` operator. Consider the need to replace masked pixels with some other data. In the following example, cloudy pixels are replaced by pixels from a cloud-free image using `where()`:
###Code
Map = emap.Map()
# Load a cloudy Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603')
Map.addLayer(image,
{'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5},
'original image')
# Load another image to replace the cloudy pixels.
replacement = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416')
# Compute a cloud score band.
cloud = ee.Algorithms.Landsat.simpleCloudScore(image).select('cloud')
# Set cloudy pixels to the other image.
replaced = image.where(cloud.gt(10), replacement)
# Display the result.
Map.centerObject(image, 9)
Map.addLayer(replaced,
{'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5},
'clouds replaced')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Relational, conditional and Boolean operationsTo perform per-pixel comparisons between images, use relational operators. To extract urbanized areas in an image, this example uses relational operators to threshold spectral indices, combining the thresholds with `And()`: Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.foliumap as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40, -100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load a Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Create NDVI and NDWI spectral indices.
ndvi = image.normalizedDifference(['B5', 'B4'])
ndwi = image.normalizedDifference(['B3', 'B5'])
# Create a binary layer using logical operations.
bare = ndvi.lt(0.2).And(ndwi.lt(0))
# Mask and display the binary layer.
Map.setCenter(-122.3578, 37.7726, 12)
Map.addLayer(bare.updateMask(bare), {}, 'bare')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
As illustrated by this example, the output of relational and boolean operators is either True (1) or False (0). To mask the 0's, you can mask the resultant binary image with itself. The binary images that are returned by relational and boolean operators can be used with mathematical operators. This example creates zones of urbanization in a nighttime lights image using relational operators and `image.add()`:
###Code
Map = emap.Map()
# Load a 2012 nightlights image.
nl2012 = ee.Image('NOAA/DMSP-OLS/NIGHTTIME_LIGHTS/F182012')
lights = nl2012.select('stable_lights')
Map.addLayer(lights, {}, 'Nighttime lights')
# Define arbitrary thresholds on the 6-bit stable lights band.
zones = lights.gt(30).add(lights.gt(55)).add(lights.gt(62))
# Display the thresholded image as three distinct zones near Paris.
palette = ['000000', '0000FF', '00FF00', 'FF0000']
Map.setCenter(2.373, 48.8683, 8)
Map.addLayer(zones, {'min': 0, 'max': 3, 'palette': palette}, 'development zones')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Note that the code in the previous example is equivalent to using a [ternary operator](http://en.wikipedia.org/wiki/%3F:) implemented by `expression()`:
###Code
Map = emap.Map()
# Create zones using an expression, display.
zonesExp = nl2012.expression(
"(b('stable_lights') > 62) ? 3"
+ ": (b('stable_lights') > 55) ? 2"
+ ": (b('stable_lights') > 30) ? 1"
+ ": 0"
)
Map.addLayer(
zonesExp, {'min': 0, 'max': 3, 'palette': palette}, 'development zones (ternary)'
)
Map.setCenter(2.373, 48.8683, 8)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Observe that in the previous expression example, the band of interest is referenced using the`b()` function, rather than a dictionary of variable names. (Learn more about image expressions on [this page](https://developers.google.com/earth-engine/image_mathexpressions). Using either mathematical operators or an expression, the output is the same and should look something like Figure 2.Another way to implement conditional operations on images is with the `image.where()` operator. Consider the need to replace masked pixels with some other data. In the following example, cloudy pixels are replaced by pixels from a cloud-free image using `where()`:
###Code
Map = emap.Map()
# Load a cloudy Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603')
Map.addLayer(
image, {'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5}, 'original image'
)
# Load another image to replace the cloudy pixels.
replacement = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416')
# Compute a cloud score band.
cloud = ee.Algorithms.Landsat.simpleCloudScore(image).select('cloud')
# Set cloudy pixels to the other image.
replaced = image.where(cloud.gt(10), replacement)
# Display the result.
Map.centerObject(image, 9)
Map.addLayer(
replaced, {'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5}, 'clouds replaced'
)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Relational, conditional and Boolean operationsTo perform per-pixel comparisons between images, use relational operators. To extract urbanized areas in an image, this example uses relational operators to threshold spectral indices, combining the thresholds with `And()`: Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load a Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Create NDVI and NDWI spectral indices.
ndvi = image.normalizedDifference(['B5', 'B4'])
ndwi = image.normalizedDifference(['B3', 'B5'])
# Create a binary layer using logical operations.
bare = ndvi.lt(0.2).And(ndwi.lt(0))
# Mask and display the binary layer.
Map.setCenter(-122.3578, 37.7726, 12)
Map.addLayer(bare.updateMask(bare), {}, 'bare')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
As illustrated by this example, the output of relational and boolean operators is either True (1) or False (0). To mask the 0's, you can mask the resultant binary image with itself. The binary images that are returned by relational and boolean operators can be used with mathematical operators. This example creates zones of urbanization in a nighttime lights image using relational operators and `image.add()`:
###Code
Map = emap.Map()
# Load a 2012 nightlights image.
nl2012 = ee.Image('NOAA/DMSP-OLS/NIGHTTIME_LIGHTS/F182012')
lights = nl2012.select('stable_lights')
Map.addLayer(lights, {}, 'Nighttime lights')
# Define arbitrary thresholds on the 6-bit stable lights band.
zones = lights.gt(30).add(lights.gt(55)).add(lights.gt(62))
# Display the thresholded image as three distinct zones near Paris.
palette = ['000000', '0000FF', '00FF00', 'FF0000']
Map.setCenter(2.373, 48.8683, 8)
Map.addLayer(zones, {'min': 0, 'max': 3, 'palette': palette}, 'development zones')
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Note that the code in the previous example is equivalent to using a [ternary operator](http://en.wikipedia.org/wiki/%3F:) implemented by `expression()`:
###Code
Map = emap.Map()
# Create zones using an expression, display.
zonesExp = nl2012.expression(
"(b('stable_lights') > 62) ? 3" +
": (b('stable_lights') > 55) ? 2" +
": (b('stable_lights') > 30) ? 1" +
": 0"
)
Map.addLayer(zonesExp,
{'min': 0, 'max': 3, 'palette': palette},
'development zones (ternary)')
Map.setCenter(2.373, 48.8683, 8)
Map.addLayerControl()
Map
###Output
_____no_output_____
###Markdown
Observe that in the previous expression example, the band of interest is referenced using the`b()` function, rather than a dictionary of variable names. (Learn more about image expressions on [this page](https://developers.google.com/earth-engine/image_mathexpressions). Using either mathematical operators or an expression, the output is the same and should look something like Figure 2.Another way to implement conditional operations on images is with the `image.where()` operator. Consider the need to replace masked pixels with some other data. In the following example, cloudy pixels are replaced by pixels from a cloud-free image using `where()`:
###Code
Map = emap.Map()
# Load a cloudy Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603')
Map.addLayer(image,
{'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5},
'original image')
# Load another image to replace the cloudy pixels.
replacement = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416')
# Compute a cloud score band.
cloud = ee.Algorithms.Landsat.simpleCloudScore(image).select('cloud')
# Set cloudy pixels to the other image.
replaced = image.where(cloud.gt(10), replacement)
# Display the result.
Map.centerObject(image, 9)
Map.addLayer(replaced,
{'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5},
'clouds replaced')
Map.addLayerControl()
Map
###Output
_____no_output_____ |
examples/tutorials/advanced/Split Neural Network/SplitNN Introduction.ipynb | ###Markdown
Introduction to Split Neural Network (SplitNN)Traditionally, PySyft has been used to facilitate federated learning. However, we can also leverage the tools included in this framework to implement distributed neural networks. What is a SplitNN?The training of a neural network (NN) is 'split' accross one or more hosts. Each model segment is a self contained NN that feeds into the segment in front. In this example Alice has unlabeled training data and the bottom of the network whereas Bob has the corresponding labels and the top of the network. The image below shows this training process where Bob has all the labels and there are multiple alices with X data [[1](https://arxiv.org/abs/1810.06060)]. Once Alice$_1$ has trained she sends a copy of her trained bottom model to the next Alice. This continues until Alice$_n$ has trained.In this case, both parties can train the model without knowing each others data or full details of the model. When Alice is finished training, she passes it to the next person with data. Why use a SplitNN?The SplitNN has been shown to provide a dramatic reduction to the computational burden of training while maintaining higher accuracies when training over large number of clients [[2](https://arxiv.org/abs/1812.00564)]. In the figure below, the Blue line denotes distributed deep learning using splitNN, red line indicate federated learning (FL) and green line indicates Large Batch Stochastic Gradient Descent (LBSGD). Table 1 shows computational resources consumed when training CIFAR 10 over VGG. Theses are a fraction of the resources of FL and LBSGD. Table 2 shows the bandwith usage when training CIFAR 100 over ResNet. Federated learning is less bandwidth intensive with fewer than 100 clients. However, the SplitNN outperforms other approaches as the number of clients grow[ [2](https://arxiv.org/abs/1812.00564)]. Advantages- The accuracy should be identical to a non-split version of the same model, trained locally. - the model is distributed, meaning all segment holders must consent in order to aggregate the model at the end of training.- The scalability of this approach, in terms of both network and computational resources, could make this an a valid alternative to FL and LBSGD, particularly on low power devices.- This could be an effective mechanism for both horizontal and vertical data distributions.- As computational cost is already quite low, the cost of applying homomorphic encryption is also minimised.- Only activation signal gradients are sent/ recieved, meaning that malicious actors cannot use gradients of model parameters to reverse engineer the original values Constraints- A new technique with little surroundung literature, a large amount of comparison and evaluation is still to be performed- This approach requires all hosts to remain online during the entire learning process (less fesible for hand-held devices)- Not as established in privacy-preserving toolkits as FL and LBSGD- Activation signals and their corresponding gradients still have the capacity to leak information, however this is yet to be fully addressed in the literature Tutorial This tutorial demonstrates a basic example of SplitNN which;- Has two paticipants; Alice and Bob. - Bob has labels - Alice has X values- Has two model segments. - Alice has the bottom half - Bob has the top half- Trains on the MNIST dataset.Authors:- Adam J Hall - Twitter: [@AJH4LL](https://twitter.com/AJH4LL) · GitHub: [@H4LL](https://github.com/H4LL)- Théo Ryffel - Twitter: [@theoryffel](https://twitter.com/theoryffel) · GitHub: [@LaRiffle](https://github.com/LaRiffle)
###Code
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
from time import time
from torchvision import datasets, transforms
from torch import nn, optim
import syft as sy
import time
hook = sy.TorchHook(torch)
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
trainset = datasets.MNIST('mnist', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
torch.manual_seed(0)
input_size = 784
hidden_sizes = [128, 640]
output_size = 10
models = [
nn.Sequential(
nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
),
nn.Sequential(
nn.Linear(hidden_sizes[1], output_size),
nn.LogSoftmax(dim=1)
)
]
# Create optimisers for each segment and link to their segment
optimizers = [
optim.SGD(model.parameters(), lr=0.03,)
for model in models
]
# create some workers
alice = sy.VirtualWorker(hook, id="alice")
bob = sy.VirtualWorker(hook, id="bob")
workers = alice, bob
# Send Model Segments to starting locations
model_locations = [alice, bob]
for model, location in zip(models, model_locations):
model.send(location)
def train(x, target, models, optimizers):
# Training Logic
# 1) erase previous gradients (if they exist)
for opt in optimizers:
opt.zero_grad()
# 2) make a prediction
a = models[0](x)
# 3) break the computation graph link, and send the activation signal to the next model
remote_a = a.detach().move(models[1].location).requires_grad_()
# 4) make prediction on next model using recieved signal
pred = models[1](remote_a)
# 5) calculate how much we missed
criterion = nn.NLLLoss()
loss = criterion(pred, target)
# 6) figure out which weights caused us to miss
loss.backward()
# 7) send gradient of the recieved activation signal to the model behind
grad_a = remote_a.grad.copy().move(models[0].location)
# 8) backpropagate on bottom model given this gradient
a.backward(grad_a)
# 9) change the weights
for opt in optimizers:
opt.step()
# 10) print our progress
return loss.detach().get()
epochs = 15
for i in range(epochs):
running_loss = 0
for images, labels in trainloader:
images = images.send(alice)
images = images.view(images.shape[0], -1)
labels = labels.send(bob)
loss = train(images, labels, models, optimizers)
running_loss += loss
else:
print("Epoch {} - Training loss: {}".format(i, running_loss/len(trainloader)))
###Output
Epoch 0 - Training loss: 0.5366485714912415
Epoch 1 - Training loss: 0.2597832679748535
Epoch 2 - Training loss: 0.1963215470314026
Epoch 3 - Training loss: 0.160226508975029
Epoch 4 - Training loss: 0.13446640968322754
Epoch 5 - Training loss: 0.11603944003582001
Epoch 6 - Training loss: 0.10239192098379135
Epoch 7 - Training loss: 0.091356061398983
Epoch 8 - Training loss: 0.08140832185745239
Epoch 9 - Training loss: 0.0746765285730362
Epoch 10 - Training loss: 0.0682755559682846
Epoch 11 - Training loss: 0.06309953331947327
Epoch 12 - Training loss: 0.05793224275112152
Epoch 13 - Training loss: 0.05351302772760391
Epoch 14 - Training loss: 0.049453798681497574
|
Week 7 - Python-Pandas-Practice.ipynb | ###Markdown
Python | Pandas DataFrame What is Pandas? pandas is a software library written for the Python programming language for data manipulation and analysis. In particular, it offers data structures and operations for manipulating numerical tables and time series. What is a Pandas DataFrame? Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). A Data frame is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. Pandas DataFrame consists of three principal components, the data, rows, and columns. A Pandas DataFrame will be created by loading the datasets from existing storage. Storage can be SQL Database, CSV file, and Excel file. Pandas DataFrame can be created from the lists, dictionary, and from a list of dictionary etc.Dataframe can be created in different ways here are some ways by which we create a dataframe: Creating a dataframe using List:
###Code
# import pandas as pd
import pandas as pd
# list of strings
lyst = ['CSC', '102', 'is', 'the', 'best', 'course', 'ever']
# Calling DataFrame constructor on list
df = pd.DataFrame(lyst)
# Print the output.
df
###Output
_____no_output_____
###Markdown
Creating a dataframe using dict of narray/lists:
###Code
import pandas as pd
# intialise data of lists.
data = {'Name':['Angel', 'Precious', 'Kishi', 'Love'],
'Age':[20, 21, 19, 18]}
# Create DataFrame
df = pd.DataFrame(data)
# Print the output.
df
###Output
_____no_output_____
###Markdown
Column Selection:
###Code
# Import pandas package
import pandas as pd
# Define a dictionary containing employee data
data = {'Name':['Clement', 'Prince', 'Karol', 'Adaobi'],
'Age':['27', '24', '22', '32'],
'Address':['Abuja', 'Kano', 'Minna', 'Lagos'],
'Qualification':['Msc', 'MA', 'MCA', 'Phd']}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data)
# select two columns
df[['Name', 'Qualification']]
###Output
_____no_output_____
###Markdown
Row Selection:Pandas provide a unique method to retrieve rows from a Data frame.DataFrame.iloc[] method is used to retrieve rows from Pandas DataFrame.
###Code
import pandas as pd
# Define a dictionary containing employee data
data = {'Name':['Oyinda', 'Maryam', 'Dumebi', 'Bisola'],
'Age':['27', '24', '22', '32'],
'Address':['Asaba', 'Maiduguri', 'Onitsha', 'Kwara'],
'Qualification':['Msc', 'MA', 'MCA', 'Phd']}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data)
# select first row
df.iloc[0]
###Output
_____no_output_____
###Markdown
Read from a file:
###Code
# importing pandas package
import pandas as pd
# making data frame from csv file
data = pd.read_csv("bcg.csv")
# print excel
data
###Output
_____no_output_____
###Markdown
Select first row from file
###Code
# importing pandas package
import pandas as pd
# making data frame from csv file
data = pd.read_csv("bcg.csv")
df = data.iloc[0]
# print excel
df
###Output
_____no_output_____
###Markdown
Selecting Row with Title Header
###Code
# importing pandas package
import pandas as pd
# making data frame from csv file
data = pd.read_csv("bcg.csv")
df = data.head(1)
# print excel
df
###Output
_____no_output_____
###Markdown
Looping over rows and columnsA loop is a general term for taking each item of something, one after another. Pandas DataFrame consists of rows and columns so, in order to loop over dataframe, we have to iterate a dataframe like a dictionary.In order to iterate over rows, we can use two functions iteritems(), iterrows() . These two functions will help in iteration over rows.
###Code
# importing pandas as pd
import pandas as pd
# dictionary of lists
dict = {'name':["Abdurrahman", "Chukwuemeka", "Somebi", "Michael, Dejo"],
'degree': ["MBA", "BCA", "M.Tech", "MBA"],
'score':[90, 40, 80, 98]}
# creating a dataframe from a dictionary
df = pd.DataFrame(dict)
# iterating over rows using iterrows() function
for i, j in df.iterrows():
print(i, j)
print()
###Output
0 name Abdurrahman
degree MBA
score 90
Name: 0, dtype: object
1 name Chukwuemeka
degree BCA
score 40
Name: 1, dtype: object
2 name Somebi
degree M.Tech
score 80
Name: 2, dtype: object
3 name Michael, Dejo
degree MBA
score 98
Name: 3, dtype: object
###Markdown
Looping over Columns :In order to loop over columns, we need to create a list of dataframe columns and then iterating through that list to pull out the dataframe columns.
###Code
# importing pandas as pd
import pandas as pd
# dictionary of lists
dict = {'name':["Bimpe", "Kamara", "Ugochi", "David"],
'degree': ["MBA", "BCA", "M.Tech", "MBA"],
'score':[90, 40, 80, 98]}
# creating a dataframe from a dictionary
df = pd.DataFrame(dict)
# creating a list of dataframe columns
columns = list(df)
for i in columns:
# printing the third element of the column
print (df[i][2])
###Output
Ugochi
M.Tech
80
###Markdown
Saving a DataFrame as CSV file
###Code
# importing pandas as pd
import pandas as pd
# dictionary of lists
blades = {'name':["Ebube", "Kamsi", "Oyinkan", "Chima"],
'degree': ["MBA", "BCA", "M.Tech", "MBA"],
'score':[90, 40, 80, 98]}
# creating a dataframe from a dictionary
df = pd.DataFrame(blades)
# saving the dataframe
df.to_csv('blades1.csv')
###Output
_____no_output_____
###Markdown
Solution to Question II
###Code
# importing pandas as pd
import pandas as pd
data = pd.DataFrame({'Employee Names':['Alegbe Luis', 'Anna Mabuta', 'Karim Kafi', 'Esther Moses', 'Jonah Longe', 'Coins Fagbemi'],
"Years":[5,10,15,8,4,20],
"Assesment Records": [44.5,67.4,23.8,71.1,50.3,63.3],
})
data
points=[]
rewards=[]
len = 6
access_rec = data['Assesment Records']
for i in range(len):
if (access_rec[i] < 40.0 ):
point = 1
elif (access_rec[i] > 39.0 and access_rec[i] < 50.0 ):
point = 2
elif (access_rec[i] > 49.0 and access_rec[i] < 60.0):
point = 3
elif (access_rec[i] > 59.0 and access_rec[i] < 70.0):
point = 4
else:
point = 5
reward = (access_rec[i]*point/6)
points.append(point)
rewards.append("%.2f" % reward)
data['Points']=(points)
data['Rewards']=(rewards)
data
# Save to excel(csv)
data.to_csv('bcg_records.csv')
###Output
_____no_output_____
###Markdown
Class Project I Go to www.kaggle.comKaggle allows users to find and publish data sets, explore and build models in a web-based data-science environment, work with other data scientists and machine learning engineers, and enter competitions to solve data science challenges. Download the following dataset:1. Top Apps in Google Play2. Cryptocurrency Predict Artificial Intelligence V33. Programming Laungages and File Format Detection ClueYou can signin with either Google, facebook or Linkedin account TaskDisplay the first 7 rows of each datasetSelect the first 3 colums of each datasetDisplay only one row and header of each dataset
###Code
import pandas as pd
banana = pd.read_csv('Top-Apps-in-Google-Play.csv')
df = banana.head(7)
df
###Output
_____no_output_____
###Markdown
TASK 2 COLUMNS
###Code
import pandas as pd
monkey = pd.read_csv('Top-Apps-in-Google-Play.csv')
df = monkey.head(7)
df[['App Name','App Id','Category']]
###Output
_____no_output_____
###Markdown
TASK 3
###Code
import pandas as pd
monkey = pd.read_csv('Top-Apps-in-Google-Play.csv')
df = monkey.head(1)
df
import pandas as pd
banana = pd.read_csv('dataset.csv.zip')
df = banana.head(7)
df
###Output
_____no_output_____
###Markdown
TASK 2
###Code
import pandas as pd
banana = pd.read_csv('README.md')
df = banana.head(7)
df[[]]
import pandas as pd
mango = pd.read_csv('dataset.csv.zip')
df = mango.head(7)
df
###Output
_____no_output_____
###Markdown
Python | Pandas DataFrame What is Pandas? pandas is a software library written for the Python programming language for data manipulation and analysis. In particular, it offers data structures and operations for manipulating numerical tables and time series. What is a Pandas DataFrame? Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). A Data frame is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. Pandas DataFrame consists of three principal components, the data, rows, and columns. A Pandas DataFrame will be created by loading the datasets from existing storage. Storage can be SQL Database, CSV file, and Excel file. Pandas DataFrame can be created from the lists, dictionary, and from a list of dictionary etc.Dataframe can be created in different ways here are some ways by which we create a dataframe: Creating a dataframe using List:
###Code
# import pandas as pd
import pandas as pd
# list of strings
lyst = ['CSC', '102', 'is', 'the', 'best', 'course', 'ever']
# Calling DataFrame constructor on list
df = pd.DataFrame(lyst)
# Print the output.
df
###Output
_____no_output_____
###Markdown
Creating a dataframe using dict of narray/lists:
###Code
import pandas as pd
# intialise data of lists.
data = {'Name':['Angel', 'Precious', 'Kishi', 'Love'],
'Age':[20, 21, 19, 18]}
# Create DataFrame
df = pd.DataFrame(data)
# Print the output.
df
###Output
_____no_output_____
###Markdown
Column Selection:
###Code
# Import pandas package
import pandas as pd
# Define a dictionary containing employee data
data = {'Name':['Clement', 'Prince', 'Karol', 'Adaobi'],
'Age':[27, 24, 22, 32],
'Address':['Abuja', 'Kano', 'Minna', 'Lagos'],
'Qualification':['Msc', 'MA', 'MCA', 'Phd']}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data)
# select two columns
df[['Name', 'Qualification']]
###Output
_____no_output_____
###Markdown
Row Selection:Pandas provide a unique method to retrieve rows from a Data frame.DataFrame.iloc[] method is used to retrieve rows from Pandas DataFrame.
###Code
import pandas as pd
# Define a dictionary containing employee data
data = {'Name':['Oyinda', 'Maryam', 'Dumebi', 'Bisola'],
'Age':[27, 24, 22, 32],
'Address':['Asaba', 'Maiduguri', 'Onitsha', 'Kwara'],
'Qualification':['Msc', 'MA', 'MCA', 'Phd']}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data)
# select first row
df.iloc[0]
###Output
_____no_output_____
###Markdown
Read from a file:
###Code
# importing pandas package
import pandas as pd
# making data frame from csv file
data = pd.read_csv("bcg.csv")
# print excel
data
###Output
_____no_output_____
###Markdown
Select first row from file
###Code
# importing pandas package
import pandas as pd
# making data frame from csv file
data = pd.read_csv("bcg.csv")
df=data.iloc[0]
# print excel
df
###Output
_____no_output_____
###Markdown
Selecting Row with Title Header
###Code
# importing pandas package
import pandas as pd
# making data frame from csv file
data = pd.read_csv("bcg.csv")
df=data.head(1)
# print excel
df
###Output
_____no_output_____
###Markdown
Looping over rows and columnsA loop is a general term for taking each item of something, one after another. Pandas DataFrame consists of rows and columns so, in order to loop over dataframe, we have to iterate a dataframe like a dictionary.In order to iterate over rows, we can use two functions iteritems(), iterrows() . These two functions will help in iteration over rows.
###Code
# importing pandas as pd
import pandas as pd
# dictionary of lists
dict = {'name':["Abdurrahman", "Chukwuemeka", "Somebi", "Michael, Dejo"],
'degree': ["MBA", "BCA", "M.Tech", "MBA"],
'score':[90, 40, 80, 98]}
# creating a dataframe from a dictionary
df = pd.DataFrame(dict)
# iterating over rows using iterrows() function
for i, j in df.iterrows():
print(i, j)
print()
###Output
0 name Abdurrahman
degree MBA
score 90
Name: 0, dtype: object
1 name Chukwuemeka
degree BCA
score 40
Name: 1, dtype: object
2 name Somebi
degree M.Tech
score 80
Name: 2, dtype: object
3 name Michael, Dejo
degree MBA
score 98
Name: 3, dtype: object
###Markdown
Looping over Columns :In order to loop over columns, we need to create a list of dataframe columns and then iterating through that list to pull out the dataframe columns.
###Code
# importing pandas as pd
import pandas as pd
# dictionary of lists
dict = {'name':["Bimpe", "Kamara", "Ugochi", "David"],
'degree': ["MBA", "BCA", "M.Tech", "MBA"],
'score':[90, 40, 80, 98]}
# creating a dataframe from a dictionary
df = pd.DataFrame(dict)
# creating a list of dataframe columns
columns = list(df)
for i in columns:
# printing the third element of the column
print (df[i][2])
###Output
Ugochi
M.Tech
80
###Markdown
Saving a DataFrame as CSV file
###Code
# importing pandas as pd
import pandas as pd
# dictionary of lists
blade = {'name':["Ebube", "Kamsi", "Oyinkan", "Chima"],
'degree': ["MBA", "BCA", "M.Tech", "MBA"],
'score':[90, 40, 80, 98]}
# creating a dataframe from a dictionary
df = pd.DataFrame(blade)
# saving the dataframe
df.to_csv('blade.csv')
###Output
_____no_output_____
###Markdown
Solution to Question II
###Code
# importing pandas as pd
import pandas as pd
data = pd.DataFrame({'Employee Names':['Alegbe Luis', 'Anna Mabuta', 'Karim Kafi', 'Esther Moses', 'Jonah Longe', 'Coins Fagbemi'],
"Years":[5,10,15,8,4,20],
"Assesment Records": [44.5,67.4,23.8,71.1,50.3,63.3],
})
data
points=[]
rewards=[]
len = 6
access_rec = data['Assesment Records']
for i in range(len):
if (access_rec[i] < 40.0 ):
point = 1
elif (access_rec[i] > 39.0 and access_rec[i] < 50.0 ):
point = 2
elif (access_rec[i] > 49.0 and access_rec[i] < 60.0):
point = 3
elif (access_rec[i] > 59.0 and access_rec[i] < 70.0):
point = 4
else:
point = 5
reward = (access_rec[i]*point/6)
points.append(point)
rewards.append("%.2f" % reward)
data['Points']=(points)
data['Rewards']=(rewards)
data
# Save to excel(csv)
data.to_csv('bcg_records.csv')
###Output
_____no_output_____
###Markdown
Class Project I Go to www.kaggle.comKaggle allows users to find and publish data sets, explore and build models in a web-based data-science environment, work with other data scientists and machine learning engineers, and enter competitions to solve data science challenges. Download the following dataset:1. Top Apps in Google Play2. Cryptocurrency Predict Artificial Intelligence V33. Programming Laungages and File Format Detection ClueYou can signin with either Google, facebook or Linkedin account TaskDisplay the first 7 rows of each datasetSelect the first 3 colums of each datasetDisplay only one row and header of each dataset
###Code
import pandas as pd
len = 6
data = pd.read_csv("Top-Apps-in-Google-Play.csv")
for table in range (len):
df = data.iloc[table]
print("The first 7 rows", df)
import pandas as pd
data = pd.read_csv("Top-Apps-in-Google-Play.csv")
df = pd.DataFrame(data)
columns = list(df)
for i in columns:
print (df[i][2])
import pandas as pd
data = pd.read_csv("Top-Apps-in-Google-Play.csv")
df = pd.DataFrame(data)
column = list(df)
df[['App Name','App Id','Category']]
###Output
_____no_output_____
###Markdown
Class Project II Cadbury Nigeria Plc manufactures and sells branded fast moving consumer goods to the Nigerian market and exports in West Africa. The Company produces intermediate products, such as cocoa butter, liquor, cake and powder. It exports cocoa butter, cake and liquor to international customers, and cocoa powder locally. It operates through three segments: Refreshment Beverages, Confectionery and Intermediate Cocoa Products. The Refreshment Beverages segment includes the manufacture and sale of Bournvita and Hot Chocolate. The Confectionery segment includes the manufacture and sale of Tom Tom and Buttermint. The Intermediate Cocoa Products segment includes the manufacture and sale of cocoa powder, cocoa butter, cocoa liquor and cocoa cake. The Refreshment Beverages' brands include CADBURY BOURNVITA and CADBURY 3-in-1 HOT CHOCOLATE. The Confectionery's brands include TOMTOM CLASSIC, TOMTOM STRAWBERRY and BUTTERMINT. The Intermediate Cocoa Products' brands include COCOA POWDER and COCOA BUTTER.You have been employed as an expert python developer to create a program to document the consumption categories of their products and brands. Using your knowledge of Pandas DataFrames develop the program that saves the list of products (export, segments and brands) in a .csv excel file.Hint: save the filename as cadbury_market.csv.
###Code
import pandas as pd
cadbury_market = {'EXPORT':["Cocoa butter", "Cake", "Liquor"],
'SEGMENTS': ["Refreshment Beverages", "Confectionary Segment", "Intermediate Cocoa Products"],
'BRAND':[("CADBURY BOURNVITA,CADBURY 3-IN-1 HOT CHOCOLATE"),("TOMTOM CLASSIC,TOMTOM STRAWBERRY,BUTTERMINT"),("COCOA POWDER,COCOA BUTTER")]}
df = pd.DataFrame(cadbury_market)
df.to_csv('cadbury_market.csv')
###Output
_____no_output_____ |
regular-expressions-a-gentle-introduction.ipynb | ###Markdown
Regular expressions: A Gentle IntroductionBy [Allison Parrish](http://www.decontextualize.com/)A [regular expression](https://en.wikipedia.org/wiki/Regular_expression) is more than just a phrase that sounds like a euphemism for what happens when your diet includes enough fiber. It's a way of writing what amount to small programs for matching patterns in text that would otherwise be difficult to match with the regular toolbox of string filtering and searching tools. This tutorial will take you through the basics of using regular expressions in Python. But many (if not most) other programming languages also support regular expressions in some form or other ([like JavaScript](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions)), so the skills you'll learn here will apply to other languages as well. "Escape" sequences in stringsBefore we go into too much detail about regular expressions, I want to review with you how escape sequences work in Python strings.Inside of strings that you type into your Python code, there are certain sequences of characters that have a special meaning. These sequences start with a backslash character (`\`) and allow you to insert into your string characters that would otherwise be difficult to type, or that would go against Python syntax. Here's some code illustrating a few common sequences:
###Code
print("1. include \"double quotes\" (inside of a double-quoted string)")
print('2. include \'single quotes\' (inside of a single-quoted string)')
print("3. one\ttab, two\ttabs")
print("4. new\nline")
print("5. include an actual backslash \\ (two backslashes in the string)")
###Output
1. include "double quotes" (inside of a double-quoted string)
2. include 'single quotes' (inside of a single-quoted string)
3. one tab, two tabs
4. new
line
5. include an actual backslash \ (two backslashes in the string)
###Markdown
Regular expressions[So far, we've discussed how to write Python expressions that are able to check whether strings meet very simple criteria](expressions-and-strings.ipynb), such as “does this string begin with a particular character” or “does this string contain another string”? But imagine writing a program that performs the following task: find and print all ZIP codes in a string (i.e., a five-character sequence of digits). Give up? Here’s my attempt, using only the tools we’ve discussed so far:
###Code
input_str = "here's a zip code: 12345. 567 isn't a zip code, but 45678 is. 23456? yet another zip code."
current = ""
zips = []
for ch in input_str:
if ch in '0123456789':
current += ch
else:
current = ""
if len(current) == 5:
zips.append(current)
current = ""
zips
###Output
_____no_output_____
###Markdown
Basically, we have to iterate over each character in the string, check to see if that character is a digit, append to a string variable if so, continue reading characters until we reach a non-digit character, check to see if we found exactly five digit characters, and add it to a list if so. At the end, we print out the list that has all of our results. Problems with this code: it’s messy; it doesn’t overtly communicate what it’s doing; it’s not easily generalized to other, similar tasks (e.g., if we wanted to write a program that printed out phone numbers from a string, the code would likely look completely different).Our ancient UNIX pioneers had this problem, and in pursuit of a solution, thought to themselves, "Let’s make a tiny language that allows us to write specifications for textual patterns, and match those patterns against strings. No one will ever have to write fiddly code that checks strings character-by-character ever again." And thus regular expressions were born.Here's the code for accomplishing the same task with regular expressions, by the way:
###Code
import re
zips = re.findall(r"\d{5}", input_str)
zips
###Output
_____no_output_____
###Markdown
I’ll allow that the `r"\d{5}"` in there is mighty cryptic (though hopefully it won’t be when you’re done reading this page and/or participating in the associated lecture). But the overall structure of the program is much simpler. Fetching our corpusFor this section of class, we'll be using the subject lines of all e-mails in the [EnronSent corpus](http://verbs.colorado.edu/enronsent/), kindly put into the public domain by the United States Federal Energy Regulatory Commission. Download a copy of [this file](https://raw.githubusercontent.com/ledeprogram/courses/master/databases/data/enronsubjects.txt) and place it in the same directory as this notebook. Matching strings with regular expressionsThe most basic operation that regular expressions perform is matching strings: you’re asking the computer whether a particular string matches some description. We're going to be using regular expressions to print only those lines from our `enronsubjects.txt` corpus that match particular sequences. Let's load our corpus into a list of lines first:
###Code
subjects = [x.strip() for x in open("enronsubjects.txt").readlines()]
###Output
_____no_output_____
###Markdown
We can check whether or not a pattern matches a given string in Python with the `re.search()` function. The first parameter to search is the regular expression you're trying to match; the second parameter is the string you're matching against.Here's an example, using a very simple regular expression. The following code prints out only those lines in our Enron corpus that match the (very simple) regular expression `shipping`:
###Code
import re
[line for line in subjects if re.search("shipping", line)]
###Output
_____no_output_____
###Markdown
At its simplest, a regular expression matches a string if that string contains exactly the characters you've specified in the regular expression. So the expression `shipping` matches strings that contain exactly the sequences of `s`, `h`, `i`, `p`, `p`, `i`, `n`, and `g` in a row. If the regular expression matches, `re.search()` evaluates to `True` and the matching line is included in the evaluation of the list comprehension.> BONUS TECH TIP: `re.search()` doesn't actually evaluate to `True` or `False`---it evaluates to either a `Match` object if a match is found, or `None` if no match was found. Those two count as `True` and `False` for the purposes of an `if` statement, though. Metacharacters: character classesThe "shipping" example is pretty boring. (There was hardly any fan fiction in there at all.) Let's go a bit deeper into detail with what you can do with regular expressions. There are certain characters or strings of characters that we can insert into a regular expressions that have special meaning. For example:
###Code
[line for line in subjects if re.search("sh.pping", line)]
###Output
_____no_output_____
###Markdown
In a regular expression, the character `.` means "match any character here." So, using the regular expression `sh.pping`, we get lines that match `shipping` but also `shopping`. The `.` is an example of a regular expression *metacharacter*---a character (or string of characters) that has a special meaning.Here are a few more metacharacters. These metacharacters allow you to say that a character belonging to a particular *class* of characters should be matched in a particular position:| metacharacter | meaning ||---------------|---------|| `.` | match any character || `\w` | match any alphanumeric ("*w*ord") character (lowercase and capital letters, 0 through 9, underscore) || `\s` | match any whitespace character (i.e., space and tab) || `\S` | match any non-whitespace character (the inverse of \s) || `\d` | match any digit (0 through 9) || `\.` | match a literal `.` |Here, for example, is a (clearly imperfect) regular expression to search for all subject lines containing a time of day:
###Code
[line for line in subjects if re.search(r"\d:\d\d\wm", line)]
###Output
_____no_output_____
###Markdown
Here's that regular expression again: `r"\d:\d\d\wm"`. I'm going to show you how to read this, one unit at a time."Hey, regular expression engine. Tell me if you can find this pattern in the current string. First of all, look for any number (`\d`). If you find that, look for a colon right after it (`:`). If you find that, look for another number right after it (`\d`). If you find *that*, look for any alphanumeric character---you know, a letter, a number, an underscore. If you find that, then look for a `m`. Good? If you found all of those things in a row, then the pattern matched." But what about that weirdo `r""`?Python provides another way to include string literals in your program, in addition to the single- and double-quoted strings we've already discussed. The r"" string literal, or "raw" string, includes all characters inside the quotes literally, without interpolating special escape characters. Here's an example:
###Code
print("1. this is\na test")
print(r"2. this is\na test")
print("3. I love \\ backslashes!")
print(r"4. I love \ backslashes!")
###Output
1. this is
a test
2. this is\na test
3. I love \ backslashes!
4. I love \ backslashes!
###Markdown
As you can see, whereas a double- or single-quoted string literal interprets `\n` as a new line character, the raw quoted string includes those characters as they were literally written. More importantly, for our purposes at least, is the fact that, in the raw quoted string, we only need to write one backslash in order to get a literal backslash in our string.Why is this important? Because regular expressions use backslashes all the time, and we don't want Python to try to interpret those backslashes as special characters. (Inside a regular string, we'd have to write a simple regular expression like `\b\w+\b` as `\\b\\w+\\b`---yecch.)So the basic rule of thumb is this: use r"" to quote any regular expressions in your program. All of the examples you'll see below will use this convention. Character classes in-depthYou can define your own character classes by enclosing a list of characters, or range of characters, inside square brackets:| regex | explanation ||-------|-------------|| `[aeiou]` | matches any vowel || `[02468]` | matches any even digit || `[a-z]` | matches any lower-case letter || `[A-Z]` | matches any upper-case character || `[^0-9]` | matches any non-digit (the ^ inverts the class, matches anything not in the list) || `[Ee]` | matches either `E` or `e` |Let's find every subject line where we have four or more vowels in a row:
###Code
[line for line in subjects if re.search(r"[aeiou][aeiou][aeiou][aeiou]", line)]
###Output
_____no_output_____
###Markdown
Metacharacters: anchorsThe next important kind of metacharacter is the *anchor*. An anchor doesn't match a character, but matches a particular place in a string.| anchor | meaning ||--------|---------|| `^` | match at beginning of string || `$` | match at end of string || `\b` | match at word boundary |> Note: `^` in a character class has a different meaning from `^` outside a character class!> Note 2: If you want to search for a literal dollar sign (`$`), you need to put a backslash in front of it, like so: `\$`Now we have enough regular expression knowledge to do some fairly sophisticated matching. As an example, all the subject lines that begin with the string `New York`, regardless of whether or not the initial letters were capitalized:
###Code
[line for line in subjects if re.search(r"^[Nn]ew [Yy]ork", line)]
###Output
_____no_output_____
###Markdown
Every subject line that ends with an ellipsis (there are a lot of these, so I'm only displaying the first 30):
###Code
[line for line in subjects if re.search(r"\.\.\.$", line)][:30]
###Output
_____no_output_____
###Markdown
The first thirty subject lines containing the word "oil":
###Code
[line for line in subjects if re.search(r"\b[Oo]il\b", line)][:30]
###Output
_____no_output_____
###Markdown
Metacharacters: quantifiersAbove we had a regular expression that looked like this: [aeiou][aeiou][aeiou][aeiou] Typing out all of those things is kind of a pain. Fortunately, there’s a way to specify how many times to match a particular character, using quantifiers. These affect the character that immediately precede them:| quantifier | meaning ||------------|---------|| `{n}` | match exactly n times || `{n,m}` | match at least n times, but no more than m times || `{n,}` | match at least n times || `+` | match at least once (same as {1,}) || `*` | match zero or more times || `?` | match one time or zero times |For example, here's an example of a regular expression that finds subjects that contain at least fifteen capital letters in a row:
###Code
[line for line in subjects if re.search(r"[A-Z]{15,}", line)]
###Output
_____no_output_____
###Markdown
Lines that contain five consecutive vowels:
###Code
[line for line in subjects if re.search(r"[aeiou]{5}", line)]
###Output
_____no_output_____
###Markdown
Count the number of lines that are e-mail forwards, regardless of whether the subject line begins with `Fw:`, `FW:`, `Fwd:` or `FWD:`
###Code
len([line for line in subjects if re.search(r"^F[Ww]d?:", line)])
###Output
_____no_output_____
###Markdown
Lines that have the word `news` in them and end in an exclamation point:
###Code
[line for line in subjects if re.search(r"\b[Nn]ews\b.*!$", line)]
###Output
_____no_output_____
###Markdown
Metacharacters: alternationOne final bit of regular expression syntax: alternation.* `(?:x|y)`: match either x or y* `(?:x|y|z)`: match x, y or z* etc.So for example, if you wanted to count every subject line that begins with either `Re:` or `Fwd:`:
###Code
len([line for line in subjects if re.search(r"^(?:Re|Fwd):", line)])
###Output
_____no_output_____
###Markdown
Every subject line that mentions kinds of cats:
###Code
[line for line in subjects if re.search(r"\b(?:[Cc]at|[Kk]itten|[Kk]itty)\b", line)]
###Output
_____no_output_____
###Markdown
Capturing what matchesThe `re.search()` function allows us to check to see *whether or not* a string matches a regular expression. Sometimes we want to find out not just if the string matches, but also to what, exactly, in the string matched. In other words, we want to *capture* whatever it was that matched.The easiest way to do this is with the `re.findall()` function, which takes a regular expression and a string to match it against, and returns a list of all parts of the string that the regular expression matched. Here's an example:
###Code
import re
re.findall(r"\b\w{5}\b", "alpha beta gamma delta epsilon zeta eta theta")
###Output
_____no_output_____
###Markdown
The regular expression above, `\b\w{5}\b`, is a regular expression that means "find me strings of five non-white space characters between word boundaries"---in other words, find me five-letter words. The `re.findall()` method returns a list of strings---not just telling us whether or not the string matched, but which parts of the string matched.For the following `re.findall()` examples, we'll be operating on the entire file of subject lines as a single string, instead of using a list comprehension for individual subject lines. Here's how to read in the entire file as one string, instead of as a list of strings:
###Code
all_subjects = open("enronsubjects.txt").read()
###Output
_____no_output_____
###Markdown
Having done that, let's write a regular expression that finds all domain names in the subject lines (displaying just the first thirty because the list is long):
###Code
re.findall(r"\b\w+\.(?:com|net|org)", all_subjects)[:30]
###Output
_____no_output_____
###Markdown
Every time the string `New York` is found, along with the word that comes directly afterward:
###Code
re.findall(r"New York \b\w+\b", all_subjects)
###Output
_____no_output_____
###Markdown
And just to bring things full-circle, everything that looks like a zip code, sorted:
###Code
sorted(re.findall(r"\b\d{5}\b", all_subjects))[:30]
###Output
_____no_output_____
###Markdown
Full example: finding the dollar value of the Enron e-mail subject corpusHere's an example that combines our regular expression prowess with our ability to do smaller manipulations on strings. We want to find all dollar amounts in the subject lines, and then figure out what their sum is.To understand what we're working with, let's start by writing a list comprehension that finds strings that just have the dollar sign (`$`) in them:
###Code
[line for line in subjects if re.search(r"\$", line)]
###Output
_____no_output_____
###Markdown
Based on this data, we can guess at the steps we'd need to do in order to figure out these values. We're going to ignore anything that doesn't have "k", "million" or "billion" after it as chump change. So what we need to find is: a dollar sign, followed by any series of numbers (or a period), followed potentially by a space (but sometimes not), followed by a "k", "m" or "b" (which will sometimes start the word "million" or "billion" but sometimes not... so we won't bother looking).Here's how I would translate that into a regular expression: \$[0-9.]+ ?(?:[Kk]|[Mm]|[Bb]) We can use `re.findall()` to capture all instances where we found this regular expression in the text. Here's what that would look like:
###Code
re.findall(r"\$[0-9.]+ ?(?:[Kk]|[Mm]|[Bb])", all_subjects)
###Output
_____no_output_____
###Markdown
If we want to actually make a sum, though, we're going to need to do a little massaging.
###Code
total_value = 0
dollar_amounts = re.findall(r"\$\d+ ?(?:[Kk]|[Mm]|[Bb])", all_subjects)
for amount in dollar_amounts:
# the last character will be 'k', 'm', or 'b'; "normalize" by making lowercase.
multiplier = amount[-1].lower()
# trim off the beginning $ and ending multiplier value
amount = amount[1:-1]
# remove any remaining whitespace
amount = amount.strip()
# convert to a floating-point number
float_amount = float(amount)
# multiply by an amount, based on what the last character was
if multiplier == 'k':
float_amount = float_amount * 1000
elif multiplier == 'm':
float_amount = float_amount * 1000000
elif multiplier == 'b':
float_amount = float_amount * 1000000000
# add to total value
total_value = total_value + float_amount
total_value
###Output
_____no_output_____
###Markdown
That's over one trillion dollars! Nice work, guys. Finer-grained matches with groupingWe used `re.search()` above to check whether or not a string matches a particular regular expression, in a context like this:
###Code
import re
dickens = [
"it was the best of times",
"it was the worst of times"]
[line for line in dickens if re.search(r"best", line)]
###Output
_____no_output_____
###Markdown
But the match object doesn't actually return `True` or `False`. If the search succeeds, the function returns something called a "match object." Let's assign the result of `re.search()` to a variable and see what we can do with it.
###Code
source_string = "this example has been used 423 times"
match = re.search(r"\d\d\d", source_string)
type(match)
###Output
_____no_output_____
###Markdown
It's a value of type `_sre.SRE_Match`. This value has several methods that we can use to access helpful and interesting information about the way the regular expression matched the string. [Read more about the methods of the match object here](https://docs.python.org/2/library/re.htmlmatch-objects). For example, we can see both where the match *started* in the string and where it *ended*, using the `.start()` and `.end()` methods. These methods return the indexes in the string where the regular expression matched.
###Code
match.start()
match.end()
###Output
_____no_output_____
###Markdown
Together, we can use these methods to grab exactly the part of the string that matched the regular expression, by using the start/end values to get a slice:
###Code
source_string[match.start():match.end()]
###Output
_____no_output_____
###Markdown
Because it's so common, there's a shortcut for this operation, which is the match object's `.group()` method:
###Code
match.group()
###Output
_____no_output_____
###Markdown
The `.group()` method of a match object, in other words, returns exactly the part of the string that matched the regular expression.As an example of how to use the match object and its `.group()` method in context, let's revisit the example from above which found every subject line in the Enron corpus that had fifteen or more consecutive capital letters. In that example, we could only display the *entire subject line*. If we wanted to show just the part of the string that matched (i.e., the sequence of fifteen or more capital letters), we could use `.group()`:
###Code
for line in subjects:
match = re.search(r"[A-Z]{15,}", line)
if match:
print(match.group())
###Output
CONGRATULATIONS
CONGRATULATIONS
PLEEEEEEEEEEEEEEEASE
ACCOMPLISHMENTS
ACCOMPLISHMENTS
CONFIDENTIALITY
CONFIDENTIALITY
CONGRATULATIONS
CONGRATULATIONS
ACKNOWLEDGEMENT
ACKNOWLEDGEMENT
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
INTERCONNECTION
INTERCONNECTION
INTERCONNECTION
INTERCONNECTION
INTERCONNECTION
CONGRATULATIONS
WASSSAAAAAAAAAAAAAABI
WASSSAAAAAAAAAAAAAABI
WASSSAAAAAAAAAAAAAABI
WASSSAAAAAAAAAAAAAABI
WASSSAAAAAAAAAAAAAABI
WASSSAAAAAAAAAAAAAABI
WASSSAAAAAAAAAAAAAABI
NOOOOOOOOOOOOOOOO
NOOOOOOOOOOOOOOOO
NOOOOOOOOOOOOOOOO
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONGRATULATIONS
CONFIDENTIALITY
CONFIDENTIALITY
ACCOMPLISHMENTS
ACCOMPLISHMENTS
CONGRATULATIONS
STANDARDIZATION
STANDARDIZATION
STANDARDIZATION
STANDARDIZATION
BRRRRRRRRRRRRRRRRRRRRR
CONGRATULATIONS
CONGRATULATIONS
NETCOTRANSMISSION
NETCOTRANSMISSION
NETCOTRANSMISSION
INTERCONTINENTAL
INTERCONTINENTAL
###Markdown
An important thing to remember about `re.search()` is that it returns `None` if there is no match. For this reason, you always need to check to make sure the object is *not* `None` before you attempt to call the value's `.group()` method. This is the reason that it's difficult to write the above example as a list comprehension---you need to check the result of `re.search()` before you can use it. An attempt to do something like this, for example, will fail:
###Code
[re.search(r"[A-Z]{15,}", line).group() for line in subjects]
###Output
_____no_output_____
###Markdown
Python complains that `NoneType` has no `group()` method. This happens because sometimes the result of `re.search()` is none.We could, of course, write a little function to get around this limitation:
###Code
# make a function
def filter_and_group(source, regex):
return [re.search(regex, item).group() for item in source if re.search(regex, item)]
# now call it
filter_and_group(subjects, r"[A-Z]{15,}")
###Output
_____no_output_____
###Markdown
Multiple groups in one regular expressionSo `re.search()` lets us get the parts of a string that match a regular expression, using the `.group()` method of the match object it returns. You can get even finer-grained matches using a feature of regular expressions called *grouping*.Let's start with a toy example. Say you have a list of University courses in the following format:
###Code
courses = [
"CSCI 105: Introductory Programming for Cat-Lovers",
"LING 214: Pronouncing Things Backwards",
"ANTHRO 342: Theory and Practice of Cheesemongery (Graduate Seminar)",
"CSCI 205: Advanced Programming for Cat-Lovers",
"ENGL 112: Speculative Travel Writing"
]
###Output
_____no_output_____
###Markdown
Let's say you want to extract the following items from this data:* A unique list of all departments (e.g., CSCI, LING, ANTHRO, etc.)* A list of all course names* A dictionary with all of the 100-level classes, 200-level classes, and 300-level classesSomehow we need to get *three* items from each line of data: the department, the number, and the course name. You can do this easily with regular expressions using *grouping*. To use grouping, put parentheses (`()`) around the portions of the regular expression that are of interest to you. You can then use the `.groups()` (note the `s`!) function to get the portion of the string that matched the portion of the regular expression inside the parentheses individually. Here's what it looks like, just operating on the first item of the list:
###Code
first_course = courses[0]
match = re.search(r"(\w+) (\d+): (.+)$", first_course)
match.groups()
###Output
_____no_output_____
###Markdown
The regular expression in `re.search()` above roughly translates as the following:* Find me a sequence of one or more alphanumeric characters. Save this sequence as the first group.* Find a space.* Find me a sequence of one or more digits. Save this as the second group.* Find a colon followed by a space.* Find me one or more characters---I don't care which characters---and save the sequence as the third group.* Match the end of the line.Calling the `.groups()` method returns a tuple containing each of the saved items from the grouping. You can use it like so:
###Code
groups = match.groups()
print("Department:", groups[0]) # department
print("Course number:", groups[1]) # course number
print("Course name:", groups[2]) # course name
###Output
Department: CSCI
Course number: 105
Course name: Introductory Programming for Cat-Lovers
###Markdown
Now let's iterate over the entire list of courses and put them in the data structure as appropriate:
###Code
departments = set()
course_names = []
course_levels = {}
for item in courses:
# search and create match object
match = re.search(r"(\w+) (\d+): (.+)$", item)
if match: # if there's a match...
groups = match.groups() # get the groups: 0 is department, 1 is course number, 2 is name
departments.add(groups[0]) # add to department set (we wanted a list of *unique* departments)
course_names.append(groups[2]) # add to list of courses
level = int(groups[1]) / 100 # get the course "level" by dividing by 100
# add the level/course key-value pair to course_levels
if level not in course_levels:
course_levels[level*100] = []
course_levels[level*100].append(groups[2])
###Output
_____no_output_____
###Markdown
After you run this cell, you can check out the unique list of departments:
###Code
departments
###Output
_____no_output_____
###Markdown
... the list of course names:
###Code
course_names
###Output
_____no_output_____
###Markdown
... and the dictionary that maps course "levels" to a list of courses at that level:
###Code
course_levels
###Output
_____no_output_____
###Markdown
Grouping with multiple matches in the same stringA problem with `re.search()` is that it only returns the *first* match in a string. What if we want to find *all* of the matches? It turns out that `re.findall()` *also* supports the regular expression grouping syntax. If the regular expression you pass to `re.findall()` includes any grouping parentheses, then the function returns not a list of strings, but a list of tuples, where each tuple has elements corresponding in order to the groups in the regular expression.As a quick example, here's a test string with number names and digits, and a regular expression to extract all instances of a series of alphanumeric characters, followed by a space, followed by a single digit:
###Code
test = "one 1 two 2 three 3 four 4 five 5"
re.findall(r"(\w+) (\d)", test)
###Output
_____no_output_____
###Markdown
We can use this to extract every phone number from the Enron subjects corpus, separating out the components of the numbers by group:
###Code
re.findall(r"(\d\d\d)-(\d\d\d)-(\d\d\d\d)", all_subjects)
###Output
_____no_output_____
###Markdown
And then we can do a quick little data analysis on the frequency of area codes in these numbers, using the [Counter](https://docs.python.org/2/library/collections.htmlcounter-objects) object from the `collections` module:
###Code
from collections import Counter
area_codes = [item[0] for item in re.findall(r"(\d\d\d)-(\d\d\d)-(\d\d\d\d)", all_subjects)]
count = Counter(area_codes)
count.most_common(1)
###Output
_____no_output_____
###Markdown
Multiple match objects with `re.finditer()`The `re` library also has a `re.finditer()` function, which returns not a list of matching strings in tuples (like `re.findall()`), but an iterator of *match objects*. This is useful if you need to know not just which text matched, but *where* in the text the match occurs. So, for example, to find the positions in the `all_subjects` corpus where the word "Oregon" occurs, regardless of capitalization:
###Code
[(match.start(), match.end(), match.group()) for match in re.finditer(r"[Oo]regon", all_subjects)]
###Output
_____no_output_____ |
02-CNN/Session2-Practice-HeadPoseAI6.ipynb | ###Markdown
Practice: Head Pose Detector ¡Hola! Vamos de lleno con la Practice de esta semana. El objetivo es detectar dónde está mirando una persona fijándose en una foto de su cara.Para ello, utilizaremos el Dataset que se puede encontrar en http://crowley-coutaz.fr/HeadPoseDataSet/HeadPoseImageDatabase.tar.gz 0. Importación de libreríasComo siempre, recuerda importar las librerías que vayas a necesitar. Te hemos dejado las que podrían serte necesarias para este caso.Es importante que si no sabes para qué puede valer una librería, hagas una búsqueda y entiendas por qué ha sido importada. Es bastante probable que así puedas identificar mejor las que deberás usar en los siguientes pasos.
###Code
#--------------------Librerias--------------------
# os dejamos las básicas, añadid las que convengan
import cv2 # OpenCV 2 for capturing frames from the video
import os # For managing paths and directories in the project
import shutil # High level file operations
import numpy as np # Arrays
import keras # High level NN API
from PIL import Image, ImageOps # For image processing
from pathlib import Path # For easily managing paths
from IPython import display # For displaying images inline with the notebook
from sklearn.model_selection import train_test_split # For train-test splitting
from tqdm import tqdm
import re
import requests
import pandas as pd
import glob
###Output
_____no_output_____
###Markdown
1. Descarga de Dataset: Head Pose Image Database (Gourier, Hall, & Crowley, 2004)Lo siguiente es descargar el dataset de http://crowley-coutaz.fr/HeadPoseDataSet/HeadPoseImageDatabase.tar.gz y estructurar los datos para dejarlos listos para su uso (descompresión del archivo tar.gz, creación de variables, tratamiento de expresiones regulares ...).La información relevante a la construcción del dataset se puede encontrar en http://crowley-coutaz.fr/Head%20Pose%20Image%20Database.html 1.1 Descarga y tratamiento como archivo
###Code
url = 'http://crowley-coutaz.fr/HeadPoseDataSet/HeadPoseImageDatabase.tar.gz'
name = 'HeadPoseImageDatabase.tar.gz'
#Insertar a continuación uso de librería requests
###Output
_____no_output_____
###Markdown
1.2 Descomprime del archivo tar.gz, usando el Linux que hay debajo de Jupyter Notebook, mediante el comando tar
###Code
###Output
_____no_output_____
###Markdown
Formato del archivo:Recordad que en el enlace de información se describe cómo está guardada la información, estando en cada archivo de la imagen los ángulos de inclinación y giro (tilt,pan) y las coordenadas de la cara (x,y), altura y anchura (h,w) dentro del archivo. 1.3 Tratamiento de las expresiones regulares de los títulos de las imágenes para conseguir las caracteristicasOs dejamos esta función para que, dado el path de una imagen, pueda transformarla a un tamaño más trabajable.
###Code
def img_df(image_path, shape):
image = Image.open(image_path)
image_resized = image.resize(shape, Image.ANTIALIAS)
img_array = np.asarray(image_resized)
return img_array
# Cargar los datos en el dataframe, extraidos de cada archivo (nombre del archivo y contenidos)
df = pd.DataFrame()
# Finalmente deberíais tener algo como lo siguiente
df.columns = ["X", "Y", "H", "W", "T", "P", "Image"]
df.X = df.X.astype(int)
df.Y = df.Y.astype(int)
df.H = df.H.astype(int)
df.W = df.W.astype(int)
df = df.reset_index().drop("index", axis=1)
###Output
_____no_output_____
###Markdown
1.4 Separado en X (datos) e Y (a predecir) y su normalización
###Code
X = np.asarray(list(df["Image"]/255.))
Y = np.array(df[["X", "Y", "H", "W", "T", "P"]])/100.
X.shape
###Output
_____no_output_____
###Markdown
1.5 Split de conjuntos finales de x_train, y_train, x_test y y_test
###Code
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
###Output
_____no_output_____
###Markdown
2 Importado de red neuronal MobileNet sin incluir la última capaEl objetivo aquí es importar la red neuronal MobileNet (una arquitectura que ha demostrado ser bastante eficiente para este problema) excluyendo la última capa. Con ello, descartaremos la última capa, para posteriormente crearla nosotros y concatenar las dos partes. 2.1 Importar la red de Keras sin la última capa
###Code
# Nosotros como hemos dicho os recomendamos Mobilenet
from keras.layers import Dense,GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.preprocessing import image
from keras.models import Model
n_classes = 6
base_model = MobileNet(weights='imagenet',include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
###Output
_____no_output_____
###Markdown
2.2 Crear nuestra última capa
###Code
###Output
_____no_output_____
###Markdown
2.3 Juntar la red y la capa
###Code
#Añade el código que falta para unir la red y la capa nueva generada.
model=
for layer in model.layers[:20]:
layer.trainable=False
for layer in model.layers[20:]:
layer.trainable=True
###Output
_____no_output_____
###Markdown
2.4 Compilar (elegir optimizador, funcion de perdida(loss) y métrica de error)
###Code
model.compile(optimizer = '',
loss ='',
metrics = [''])
###Output
_____no_output_____
###Markdown
2.5 Entrenar la red que hemos importado y manipulado con el dataset que hemos tratado
###Code
#--------------------Entrenamiento de nuestra red personalizada--------------------
# Un precioso fit() y a esperar. Unas 10 épocas deberian dar un resultado decente
model.fit(X_train, Y_train, validation_data=[X_test, Y_test], epochs=10, verbose=1)
###Output
_____no_output_____
###Markdown
2.6 Visualizar un diagrama de correlación entre los valores predichos y los valores que debieran ser (usando por ejemplo el RMSE o R2 dado por sklearn)
###Code
from sklearn.metrics import r2_score
###Output
_____no_output_____ |
Final-Report.ipynb | ###Markdown
Communication in Crisis Executive Summary Background AcquireData: [Los Angeles Parking Citations](https://www.kaggle.com/cityofLA/los-angeles-parking-citations)Let's acquire the parking citations data from our file, `parking-citations.csv`.__Initial findings__- `Issue time` is quasi-normally distributed. - It's interesting to see the distribution of our activity on earth follows a normal distribution.- Agencies 50+ write the most parking citations.- Most parking citations are less than $100.00 Prepare- Remove spaces and lowercase all column names.- Cast `Plate Expiry Date` to datetime data type.- Cast `Issue Date` and `Issue Time` to datetime data types.- Drop columns missing >=74.42% of their values. - Drop duplicate values.- Transform Latitude and Longitude columns from NAD1983StatePlaneCaliforniaVFIPS0405 feet projection to EPSG:4326 World Geodetic System 1984: used in GPS [Standard]- Filter the data on these conditions: - Citations issued from 2017-01-01 to 2021-04-12. - Street Sweeping violations where `Violation Description` == __"NO PARK/STREET CLEAN"__
###Code
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import folium.plugins as plugins
from IPython.display import HTML
import datetime
import calplot
import folium
import math
sns.set()
import src
# Prepare the data using a function stored in prepare.py
df = src.prep_sweep_data()
# Display the first two rows
df.head(2)
# Check the column data types and non-null counts.
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2036169 entries, 0 to 2036168
Data columns (total 18 columns):
# Column Dtype
--- ------ -----
0 issue_date object
1 issue_time object
2 rp_state_plate object
3 plate_expiry_date object
4 make object
5 body_style object
6 color object
7 location object
8 route object
9 agency int64
10 violation_description object
11 fine_amount float64
12 latitude float64
13 longitude float64
14 day_of_week object
15 issue_year int64
16 issue_hour int64
17 issue_minute int64
dtypes: float64(3), int64(4), object(11)
memory usage: 279.6+ MB
###Markdown
Exploration--- Parking Enforcement is Enforced Again: Where it all started City Council Demands a PlanThe **Los Angeles City Council** tasked the **Los Angeles Department of Transportation** (LADOT) with creating a phased plan to resume parking enforcement on October 1st. Delayed parking enforcement added to the city's financial strain during the pandemic, with citation revenue 62% below budget.[1] A Plan is Formed: How to Collect Revenue, Detailed. Outreach, VagueOn September 30th city council voted to resume parking enforcement on October 15th. Between October 1st and October 14th, 2020 LADOT was responsible for informing the public [2] using social media and the press.[3]1. `public-records\city-council-documents\LADOT-transition-plan.pdf`2. `public-records\city-council-documents\public-outreach-period.pdf`3. `public-records\LADOT-press-releases\enforcement.pdf`--- Informing the PublicThe Los Angeles Department of Transportation informed the public of steet sweeping violations using flyers on wind shields, the press, and social media. Communication Channels Social Media Flyers Newspapers TV NewsLet's take a look at social engagement Twitter: Tweets from City Officials---- Street Sweeping Citations How much revenue is generated from street sweeper citations daily?
###Code
# Daily street sweeping citation revenue
daily_revenue = df.groupby('issue_date').fine_amount.sum()
daily_revenue.index = pd.to_datetime(daily_revenue.index)
sns.set_context('talk')
# Plot daily revenue from street sweeping citations
daily_revenue.plot(figsize=(14, 7), label='Revenue', color='DodgerBlue')
plt.axhline(daily_revenue.mean(), color='black', label='Average Revenue')
plt.title("Daily Revenue from Street Sweeping Citations")
plt.xlabel('')
plt.ylabel("Revenue (in thousand's)")
plt.xticks(rotation=0, horizontalalignment='center', fontsize=13)
plt.yticks(range(0, 1_000_000, 200_000), ['$0', '$200', '$400', '$600', '$800'])
plt.ylim(0, 1_000_000)
plt.legend(loc=2, framealpha=.8);
###Output
_____no_output_____
###Markdown
> __Anomaly 1__: What happened between July/August of 2019 toh January of 2020?>> __Anomaly 2__: Between March 2020 and October 2020 a Local Emergency was Declared by the Mayor of Los Angeles in response to COVID-19. Street Sweeping was halted to help Angelenos shelter in place. _Street Sweeping resumed on 10/15/2020_. Anomaly 2: Declaration of Local Emergency
###Code
sns.set_context('talk')
# Plot daily revenue from street sweeping citations
daily_revenue.plot(figsize=(14, 7), label='Revenue', color='DodgerBlue')
plt.axvspan('2020-03-16', '2020-10-14', color='grey', alpha=.25)
plt.text('2020-03-29', 890_000, 'Declaration of\nLocal Emergency', fontsize=11)
plt.title("Daily Revenue from Street Sweeping Citations")
plt.xlabel('')
plt.ylabel("Revenue (in thousand's)")
plt.xticks(rotation=0, horizontalalignment='center', fontsize=13)
plt.yticks(range(0, 1_000_000, 200_000), ['$0', '$200', '$400', '$600', '$800'])
plt.ylim(0, 1_000_000)
plt.legend(loc=2, framealpha=.8);
sns.set_context('talk')
# Plot daily revenue from street sweeping citations
daily_revenue.plot(figsize=(14, 7), label='Revenue', color='DodgerBlue')
plt.axhline(daily_revenue.mean(), color='black', label='Average Revenue')
plt.axvline(datetime.datetime(2020, 10, 15), color='red', linestyle="--", label='October 15, 2020', alpha=.2)
plt.title("Daily Revenue from Street Sweeping Citations")
plt.xlabel('')
plt.ylabel("Revenue (in thousand's)")
plt.xticks(rotation=0, horizontalalignment='center', fontsize=13)
plt.yticks(range(0, 1_000_000, 200_000), ['$0', '$200', '$400', '$600', '$800'])
plt.ylim(0, 1_000_000)
plt.legend(loc=2, framealpha=.8);
###Output
_____no_output_____
###Markdown
Twitter Hypothesis Test General InquiryIs the daily citation revenue after 10/15/2020 significantly greater than average? Z-Score$H_0$: The daily citation revenue after 10/15/2020 is less than or equal to the average daily revenue.$H_a$: The daily citation revenue after 10/15/2020 is significantly greater than average.
###Code
confidence_interval = .997
# Directional Test
alpha = (1 - confidence_interval)/2
# Data to calculate z-scores using precovid values to calculate the mean and std
daily_revenue_precovid = df.loc[df.issue_date < '2020-06-01']
daily_revenue_precovid = daily_revenue_precovid.groupby('issue_date').fine_amount.sum()
mean_precovid, std_precovid = daily_revenue_precovid.agg(['mean', 'std']).values
mean, std = daily_revenue.agg(['mean', 'std']).values
# Calculating Z-Scores using precovid mean and std
z_scores_precovid = (daily_revenue - mean_precovid)/std_precovid
z_scores_precovid.index = pd.to_datetime(z_scores_precovid.index)
sig_zscores_pre_covid = z_scores_precovid[z_scores_precovid>3]
# Calculating Z-Scores using entire data
z_scores = (daily_revenue - mean)/std
z_scores.index = pd.to_datetime(z_scores.index)
sig_zscores = z_scores[z_scores>3]
sns.set_context('talk')
plt.figure(figsize=(12, 6))
sns.histplot(data=z_scores_precovid,
bins=50,
label='preCOVID z-scores')
sns.histplot(data=z_scores,
bins=50,
color='orange',
label='z-scores')
plt.title('Daily citation revenue after 10/15/2020 is significantly greater than average', fontsize=16)
plt.xlabel('Standard Deviations')
plt.ylabel('# of Days')
plt.axvline(3, color='Black', linestyle="--", label='3 Standard Deviations')
plt.xticks(np.linspace(-1, 9, 11))
plt.legend(fontsize=13);
a = stats.zscore(daily_revenue)
fig, ax = plt.subplots(figsize=(8, 8))
stats.probplot(a, plot=ax)
plt.xlabel("Quantile of Normal Distribution")
plt.ylabel("z-score");
###Output
_____no_output_____
###Markdown
p-values
###Code
p_values_precovid = z_scores_precovid.apply(stats.norm.cdf)
p_values = z_scores_precovid.apply(stats.norm.cdf)
significant_dates_precovid = p_values_precovid[(1-p_values_precovid) < alpha]
significant_dates = p_values[(1-p_values) < alpha]
# The chance of an outcome occuring by random chance
print(f'{alpha:0.3%}')
###Output
0.150%
###Markdown
Cohen's D
###Code
fractions = [.1, .2, .5, .7, .9]
cohen_d = []
for percentage in fractions:
cohen_d_trial = []
for i in range(10000):
sim = daily_revenue.sample(frac=percentage)
sim_mean = sim.mean()
d = (sim_mean - mean) / (std/math.sqrt(int(len(daily_revenue)*percentage)))
cohen_d_trial.append(d)
cohen_d.append(np.mean(cohen_d_trial))
cohen_d
fractions = [.1, .2, .5, .7, .9]
cohen_d_precovid = []
for percentage in fractions:
cohen_d_trial = []
for i in range(10000):
sim = daily_revenue_precovid.sample(frac=percentage)
sim_mean = sim.mean()
d = (sim_mean - mean_precovid) / (std_precovid/math.sqrt(int(len(daily_revenue_precovid)*percentage)))
cohen_d_trial.append(d)
cohen_d_precovid.append(np.mean(cohen_d_trial))
cohen_d_precovid
###Output
_____no_output_____
###Markdown
Significant Dates with less than a 0.15% chance of occuring- All dates that are considered significant occur after 10/15/2020- In the two weeks following 10/15/2020 significant events occured on __Tuesday's and Wednesday's__.
###Code
dates_precovid = set(list(sig_zscores_pre_covid.index))
dates = set(list(sig_zscores.index))
common_dates = list(dates.intersection(dates_precovid))
common_dates = pd.to_datetime(common_dates).sort_values()
sig_zscores
pd.Series(common_dates.day_name(),
common_dates)
np.random.seed(sum(map(ord, 'calplot')))
all_days = pd.date_range('1/1/2020', '12/22/2020', freq='D')
significant_events = pd.Series(np.ones_like(len(common_dates)), index=common_dates)
for i in significant_events.index:
print(i)
calplot.calplot(significant_events, figsize=(18, 12), cmap='coolwarm_r');
###Output
_____no_output_____
###Markdown
Reject the null hypothesis that daily citation revenue after 10/15/2020 is less than or equal to the average daily revenue.- 2020-10-15- 2020-10-16- 2020-10-19- 2020-10-20- 2020-10-21- 2020-10-22- 2020-10-27- 2020-10-28- 2020-10-29 Which parts of the city were impacted the most?
###Code
df_outliers = df.loc[df.issue_date.isin(list(common_dates.astype('str')))]
df_outliers.reset_index(drop=True, inplace=True)
print(df_outliers.shape)
df_outliers.head()
# m = folium.Map(location=[34.0522, -118.2437],
# min_zoom=8,
# max_bounds=True)
# mc = plugins.MarkerCluster()
# for index, row in df_outliers.iterrows():
# mc.add_child(
# folium.Marker(location=[str(row['latitude']), str(row['longitude'])],
# popup='Cited {} {} at {}'.format(row['day_of_week'],
# row['issue_date'],
# row['issue_time'][:-3]),
# control_scale=True,
# clustered_marker=True
# )
# )
# m.add_child(mc)
###Output
_____no_output_____
###Markdown
SEE: Simple Evolutionary Exploration By Katrina Gensterblum Image from: https://miro.medium.com/ --- Authors$\text{Katrina Gensterblum}^{1}$, $\text{Dirk Colbry}^{1}$, $\text{Cameron Hurley}^{2}$, $\text{Noah Stolz}^{3}$ $^{1}$ Department of Computational Mathematics, Science and Engineering, Michigan State University $^{2}$ Department of Computer Science and Engineering, Michigan State University $^{3}$ School of Science, School of Humanities and Social Sciences, Rensselaer Polytechnic Institute --- AbstractAs the ability to collect image data increases, images are used more and more within a wide range of disciplines. However, processing this kind of data can be difficult and labor-intensive. One of the most time-consuming image processing techniques to perform is image segmentation. As a result, many image segmentation algorithms have been developed to try and accomplish this task automatically, but even finding the best algorithm for a dataset can be time intensive. Here we provide easy-to-use software that utilizes the power of genetic algorithms to automate the process of image segmentation. The software works to find both the best image segmentation algorithm for an image dataset, but also find the best hyperparameters for that segmentation algorithm. ---- Statement of NeedAs technology advances, image data is becoming a common element in a broad scope of research experiments. Studies in everything from self-driving vehicles to plant biology utilize images in some capacity. However, every image analysis problem is different and processing this kind of data and retrieving specific information can be extremely time-consuming. One of the main image processing techniques used today, and one of the most time-consuming, is image segmentation, which attempts to find entire objects within an image. As a way to try and make this process easier, many image processing algorithms have been developed to try and automatically segment an image. However, there are many different options available, and each algorithm may work best for a different image set. Additionally, many of these algorithms have hyperparameters that need to be tuned in order to get the most accurate results. So even if a researcher already possesses knowledge in image understanding and segmentation, it can be time-consuming to run and validate a customized solution for their problem. Thus, if this process could be automated, a significant amount of researcher time could be recovered.The purpose of the Simple Evolutionary Exploration, or SEE, software package is to provide an easy-to-use tool that can achieve this automation for image segmentation problems. By utilizing the power of genetic algorithms, the software can not only find the best image segmentation algorithm to use on an image set, but can also find the optimal parameters for that specific algorithm. ---- Installation InstructionsA list of dependencies for SEE can be found in the [README](README.md) file.These dependencies can be installed individually, or by creating a conda environment using the command below: **With makefile:** `make init` **Manually:** `conda env create --prefix ./envs --file environment.yml` ---In order to build automatic documentation for the project use one of the commands below: **With makefile:** `make doc` **Manually:** `pdoc --force --html --output-dir ./docs see` ---- Unit TestsTesting files for SEE can be found in `.\see\tests\`. In order to run the tests run the cell below, or use one of the following commands: **With makefile:** `make test` **Manually:** `pytest -v see` If the tests ran successfully, an output message should appear stating that $25$ tests were passed and $11$ warnings occurred.
###Code
!pytest -v see
###Output
_____no_output_____
###Markdown
Relazione Finale**Gruppo - Dig Data****Componenti Gruppo - Alexandru Pavel, Simone Garzarella** Indice- [3. Introduzione](introduction) - [3.1. Descrizione Problema](problem-description) - [3.2. Specifiche Software](hw-specs)- [4. Analisi Dataset](data-analysis) - [4.1. Historical Stock Prices](hsp) - [4.2. Historical Stocks](hs)- [5. Job 1](job1) - [5.1. MapReduce](mapreduce1) - [5.2. Hive](hive1) - [5.3. Spark](spark1)- [6. Job 2](job2) - [6.1. MapReduce](mapreduce2) - [6.2. Hive](hive2) - [6.3. Spark](spark2)- [7. Job 3](job3) - [7.1. MapReduce](mapreduce3) - [7.2. Hive](hive3) - [7.3. Spark](spark3)- [8. Risultati](results) - [8.1. Job 1](plot1) - [8.2. Job 2](plot2) - [8.3. Job 3](plot3)- [9. Conclusioni](conclusions) Introduzione Il dataset "Daily Historical Stock Prices" contiene l'andamento delle azioni sulla borsa di New York (NYSE e NASDAQ) dal 1970 al 2018. Due file CSV compongono il dataset:- historical_stock_prices.csv- historical_stocks.csvIl primo contiene i valori dei prezzi e volumi che variano nel tempo per ogni ticker. Il secondo i dati relativi ad ogni ticker, come il settore e l'exchange in cui è quotato. Descrizione Problema Dopo una fase iniziale di analisi e processamento di dati si vogliono eseguire 3 job (descritti nel dettaglio più avanti) con le diverse tecnologie affrontate nel corso (Hadoop, Hive e Apache Spark). Specifiche Hardware I test sono stati eseguiti in locale e su cluster con macchine con queste caratteristiche:- **Locale:** Ubuntu 20.04, CPU i5 2.5GHZ, 8GB Ram e 256GB SSD- **Cluster:** AWS EMR con 1 Master Node e 5 DataNode. Istanze m5.xlarge con 16GB RAM, 4 vCPU e 64GB di spazio. Analisi Dataset Di seguito vengono analizzati i due file del dataset per individuare eventuali preprocessamenti da effettuare. Inoltre viene anche descritto il processo per creare dataset più piccoli o grandi (con sampling) per effettuare i successivi test.
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Historical Stock Prices I campi di questo dataset sono:- `ticker`: simbolo univoco dell’azione (https://en.wikipedia.org/wiki/Ticker_symbol)- `open`: prezzo di apertura- `close`: prezzo di chiusura- `adj_close`: prezzo di chiusura “modificato”- `lowThe`: prezzo minimo- `highThe`: prezzo massimo- `volume`: numero di transazioni- `date`: data nel formato aaaa-mm-gg
###Code
stock_prices = pd.read_csv('dataset/historical_stock_prices.csv')
stock_prices
###Output
_____no_output_____
###Markdown
Ci sono ~21milioni di record per questo file
###Code
stock_prices.isna().sum()
###Output
_____no_output_____
###Markdown
Non sono presenti valori nulli per nessuna delle colonne
###Code
stock_prices.nunique()
###Output
_____no_output_____
###Markdown
In totale ci sono 5685 `ticker` univoci nel dataset
###Code
stock_prices[stock_prices.duplicated(subset=['ticker','date'])].shape
###Output
_____no_output_____
###Markdown
Non ci sono record distinti con valori duplicati di (ticker, data) Creazione di dataset di varie dimensioniSono stati generati dataset di dimensioni (approssimativamente) di 256/512/1024MB e ~4GB, oltre al dataset originale che ha dimensioni ~2GB.I file generati (con relativa dimensione precisa) hanno nome historical_stock_prices[size].csv- historical_stock_prices`256`.csv   (239.75MB)- historical_stock_prices`512`.csv   (479.51MB)- historical_stock_prices`1024`.csv  (959.03MB)- historical_stock_prices.csv        (1909.97MB)- historical_stock_prices`4096`.csv  (3835.92MB)La scelta dei record da includere è effettuata con un sampling randomico (con un seed preimpostato, per la ripetibiltà) ```pythondef sample_all_sizes(historical_stock_prices_df): for size in [0.125, 0.25, 0.5, 2]: sample_n_rows = dataset_row_count * size sampled_df = dataset.sample(sample_n_rows) filename = 'dataset/historical_stock_prices[SIZE].csv' sampled_df.to_csv(filename)``` Historical Stocks Il dataset con le informazioni sui ticker è così strutturato:- `ticker`: simbolo dell’azione- `exchange`: NYSE o NASDAQ- `name`: nome dell’azienda- `sector`: settore dell’azienda- `industry`: industria di riferimento per l’azienda
###Code
stocks = pd.read_csv("dataset/historical_stocks.csv")
stocks
stocks.nunique()
###Output
_____no_output_____
###Markdown
Sono presenti 6460 `ticker` univoci, come il numero di righe del dataset. Il ticker può essere considerato una chiave di questo dataset, il nome dell'azienda `name` invece no, ha delle ripetizioni.
###Code
stocks[stocks.duplicated(subset=['name'])].shape
###Output
_____no_output_____
###Markdown
In particolare sono presenti 998 nomi di azienda duplicati. Nel resto del progetto non si considererà questo campo per identificare record (in particolare per il job3).
###Code
stocks['sector'].unique()
###Output
_____no_output_____
###Markdown
Visualizzando i possibili valori di `sector` si può notare la presenza di un valore nullo.
###Code
stocks.isna().sum()
###Output
_____no_output_____
###Markdown
Il campo `sector` presenta 1440 valori nulli, che vengono eliminati durante il preprocessing di questo dataset.
###Code
stocks_clean = stocks.loc[(stocks['sector'].notna())]
stocks_clean.shape
###Output
_____no_output_____
###Markdown
Il dataset pulito dai valori nulli del campo `sector` ha 5020 record. Verrà salvato come `historical_stocks_clean.csv` ```pythonstocks_clean.to_csv('dataset/historical_stocks_clean.csv')``` Job 1 Deve generare un report contenente, per ciascuna azione:- data prima quotazione (a)- data ultima quotazione (b)- variazione percentuale della quotazione (tra primo e ultimo prezzo di chiusura nel dataset) (c)- prezzo massimo (d) - prezzo minimo (e)Il report deve essere ordinato per valori decrescenti del secondo punto (dalla data di quotazione più recente alla più vecchia). MapReduce Durante la fase di Map dapprima si estraggono i campi (mediante split e parsing) di ticker, closePrice, minPrice, maxPrice e date. Queste righe filtrate vengono mandate al Reducer che farà altre operazioni. ```pythonclass mapper: for row in INPUT: split the current row into fields (ignoring not needed ones) ticker, closePrice, minPrice, maxPrice, date = row write the separated fields to standard output print(ticker, closePrice, minPrice, maxPrice, date)``` Nel Reducer dapprima si definisce una struttura dati dizionario (results) che conterrà, per ogni ticker, un dizionario con i valori richiesti dal job.`results` ha il seguente formato per le sue entry: (ticker): (first_quot_date, last_quot_date, perc_var, min_price, max_price) ```python class reducer: maps each ticker to the required values to calculate for example: {'AAPL': {'min': 1, 'max': 2, ..}, 'AMZN': {'min': 0.5, 'max': 5, ..}} results = {}``` Vengono parsati i valori provenienti dal Mapper, e per ogni ticker se esso non è già presente nel dizionario si inizializzano i suoi valori. ```python for row in INPUT: split the current row into fields ticker, closePrice, minPrice, maxPrice, date = row if the ticker hasn't been seen before, initialize its values in the dictionary if ticker not in results: results[ticker] = { 'first_quot_date': date, 'last_quot_date': date, 'first_quot_price': closePrice, 'last_quot_price': closePrice, 'perc_var': 0, 'min_price': minPrice, 'max_price': maxPrice } continue``` I ticker già contenuti in results verranno aggiornati solo se necessario (ad esempio se si trova un ticker con una data di quotazione antecedente a quella salvata). ```python gets the input ticker's current saved values from the dictionary currTicker = results[ticker] update the saved ticker values with the ones from the input data if date < currTicker['first_quot_date']: currTicker['first_quot_date'] = date currTicker['first_quot_price'] = closePrice if date > currTicker['last_quot_date']: currTicker['last_quot_date'] = date currTicker['last_quot_price'] = closePrice if minPrice < currTicker['min_price']: currTicker['min_price'] = minPrice if maxPrice > currTicker['max_price']: currTicker['max_price'] = maxPrice``` Infine i risultati ottenuti vengono ordinati in senso decrescente sul campo della data dell'ultima quotazione. Vengono poi mandati in output calcolando anche la variazione percentuale del prezzo dell'azione tra la prima e l'ultima data di quotazione. ```python sort the results from the most to the least recent quotation dates sortedResults = sort(results.items(), key='last_quot_date', reverse=True) result is in the format ('TickerName', {'min': 1, 'max': 2}), a tuple for result in sortedResults: perc_var = calculate_percent_variation(first_quot_price, last_quot_price) print(ticker, first_quot_date, last_quot_date, perc_var, min_price, max_price)``` Hive Per eseguire questo job sono state create prima due tabelle esterne:* Una che, per ogni ticker, etrae il prezzo di chiusura alla prima data disponibile per quel ticker nel database:```SQL create table ticker_to_minDate as select d.ticker as min_ticker, d.close_price as min_close_price from historical_stock_prices(size) d join (select ticker as min_ticker, min(price_date) as min_price_date from historical_stock_prices(size) group by ticker) min_table on (d.ticker = min_table.min_ticker and d.price_date <= min_table.min_price_date);``` * L'altra che, per ogni ticker, etrae il prezzo di chiusura allultima data disponibile per quel ticker nel database:```SQL create table ticker_to_maxDate as select d.ticker as max_ticker, d.close_price as max_close_price from historical_stock_prices(size) d join (select ticker as max_ticker, max(price_date) as max_price_date from historical_stock_prices(size) group by ticker) as max_table on (d.ticker = max_table.max_ticker and d.price_date >= max_table.max_price_date);```* Successivamente esse sono state utilizzate per la query finale, in cui si estrae, per ogni ticker, la data della prima quotazione, la data dell’ultima quotazione, la variazione percentuale della quotazione, il prezzo massimo e quello minimo.```SQL CREATE TABLE job1_hive ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n' as select ticker, min(price_date) as first_price_date, max(price_date) as last_price_date, max(( (max_table.max_close_price - min_table.min_close_price) / min_table.min_close_price) * 100) as variation, max(high) as max_price, min(low) as min_price from historical_stock_prices(size) d join ticker_to_maxDate max_table on d.ticker = max_table.max_ticker join ticker_to_minDate min_table on d.ticker = min_table.min_ticker group by ticker order by ticker, last_price_date desc;``` Spark Spark caricherà i dati del dataset `historical_stock_prices` solo nel momento in cui deve effettuare una azione ```pythonhistorical_stock_prices = loadDataFromCsv()``` Viene creato un nuovo RDD con chiave = ticker e valore = (prezzo di chiusura, data) ```pythonticker_date = historical_stock_prices .map(x -> (ticker,(close_price, price_date)))``` Vengono poi creati due RDD per calcolare la data minima e massima della quotazione ```pythonfirst_quot_date = ticker_date.reduceByKey(min_date(a, b))last_quot_date = ticker_date.reduceByKey(max_date(a, b))``` Viene fatto il join tra i due precedenti RDD e poi si mappa per avere (ticker) -> (primo prezzo di chiusura, prima data, ultimo prezzo di chiusura, ultima data). Infine si mappa, calcolando la variazione percentuale, per ottenere (ticker) -> (prima data, ultima data, variazione percentuale) ```pythonpercent_variation = first_quot_date .join(last_quot_date) .map((ticker, ((first_close, first_date), (last_close, last_date))) -> (ticker, (first_close, first_date, last_close, last_date)) .map((ticker, (first_close, first_date, last_close, last_date) -> (ticker, (first_date, last_date, percent_variation(first_close, last_close))))) ``` Viene mappato l'RDD iniziale per ottenere, per ogni ticker, il prezzo minimo ed il prezzo massimo ```python min_price = historical_stock_prices .map(x -> (ticker, min_price)) .reduceByKey(min(a,b))max_price = historical_stock_prices .map(x -> (ticker, max_price)) .reduceByKey(min(a,b))``` Vengono joinati insieme i precedenti risultati per ottenere l'RDD finale ordinato per data, nella forma (ticker) -> data prima quotazione, data ultima quotazione, variazione percentuale, valore minimo e valore massimo ```pythonresults = percent_variation .join(min_price) .join(max_price) .map((ticker, ((first_date, last_date, perc_var), min_price, max_price)) -> (ticker, (first_date, last_date, percent_var, min_price, max_price)) .sortByDate()``` Job 2 Generare un report contenente, per ciascun settore e per ciascun anno del periodo 2009-2018: - variazione percentuale della quotazione del settore nell'anno (somma prezzi di chiusura di tutte le azioni del settore, considerando la prima e l'ultima data di ogni azione aggregate) (a)- azione del settore con incremento percentuale maggiore nell'anno (col valore) (b)- azione del settore con maggior volume di transazioni nell'anno (con valore) (c) Il report deve essere ordinato per nome delsettore. MapReduce Nel Mapper si definisce una struttura `ticker_to_sector` (dizionario) che conterrà, per ogni ticker, il valore del suo settore. In seguito il Mapper invierà al Reducer queste informazioni più quelle sui prezzi, volumi e date relativi al periodo 2009-2018. ```pythonclass mapper: will contain (sector, year): (results) pairs ticker_to_sector = {}``` Per poter ottenere le informazioni sui settori dei ticker (che sono nel secondo file del dataset) si deve effettuare un join. Si è scelto di utilizzare la Distributed Cache di Hadoop per leggere il file `historical_stocks` già preprocessato (senza le righe con settore nullo). ```python with open('historical_stocks_clean.csv') as hs_file: for row in hs_file: ticker, sector = row ticker_to_sector[ticker] = sector``` A questo punto si processano i dati in input provenienti da `historical_stock_prices`, controllando se il ticker della riga abbia un settore corrispondente. Se non lo ha la riga verrà ignorata.Il join viene effettuato dal lato del Mapper, semplicemente aggiungendo l'informazione del settore alle righe del ticker corrispondente.Infine i dati vengono mandati uno per uno al Reducer. ```python for row in INPUT: ticker, closePrice, volume, date = row the ticker had a null sector, ignore it if ticker not in ticker_to_sector: continue if 2009 <= date.year <= 2018: the join adds a column sector sector = ticker_to_sector[ticker] print(sector, ticker, date, closePrice, volume)``` Nel Reducer vengono dapprima definite due strutture dati (dizionari) che serviranno per aggregare i risultati.In particolare esse hanno il seguente formato:- `tickerDataBySectorYear(ticker, sector, year)` - 'first_close_date': 2012-01-01, - 'first_close_value': 50.5, - 'last_close_date': 2012-12-31, - 'last_close_value': 240, - 'total_volume': 300000 - `aggregatedSectorYearData(sector, year)` - 'sum_initial_close': 4000, - 'sum_final_close': 6000, - 'max_perc_var_ticker': 'AAPL', - 'max_perc_var_value': 75, - 'max_total_volume_ticker': 'AAPL', - 'max_total_volume_value': 3000000 ```pythonclass reducer: tickerDataBySectorYear = {} aggregatedSectorYearData = {}``` Per ogni riga proveniente dal Mapper si salvano le informazioni di ogni (ticker, settore e anno) in `tickerDataBySectorYear`. Anche qui se la tripla (ticker, settore e anno) non è mai stata vista essa verrà inizializzata, oppure aggiornata se erano già presenti dei valori. ```pythonfor line in INPUT: sector, ticker, date, closePrice, volume = line save (in memory) the info of each ticker per year and sector (inefficient) if (ticker, sector, date.year) not in tickerDataBySectorYear: newTicker = {'first_close_date': date, 'first_close_value': closePrice, 'last_close_date': date, 'last_close_value': closePrice, 'total_volume': volume} tickerDataBySectorYear[(ticker, sector, date.year)] = newTicker the ticker in that year (with that sector) has been seen, update it else: currTicker = tickerDataBySectorYear[(ticker, sector, date.year)] if date < currTicker['first_close_date']: currTicker['first_close_date'] = date currTicker['first_close_value'] = closePrice if date > currTicker['last_close_date']: currTicker['last_close_date'] = date currTicker['last_close_value'] = closePrice currTicker['total_volume'] += volume``` In modo analogo, iterando su `tickerDataBySectorYear`, si popolerà il dizionario `aggregatedSectorYearData`:- aggregando i valori iniziali e finali di prezzo di chiusura- conservando il ticker con la variazione percentuale massima- conservando il ticker con il maggior volumi di transazioniAnche qui si inizializzerà la entry non presente nel dizionario se necessario. ```python aggregate the single ticker and year data by sectorfor (ticker, sector, year) in tickerDataBySectorYear: currTicker = tickerDataBySectorYear[(ticker, sector, year)] initialClose = currTicker['first_close_value'] finalClose = currTicker['last_close_value'] volume = currTicker['total_volume'] percVar = calculatePercVar(initialClose, finalClose) create a new dict to save the data if (sector, year) not in aggregatedSectorYearData: newData = {'sum_initial_close': initialClose, 'sum_final_close': finalClose, 'max_perc_var_ticker': ticker, 'max_perc_var_value': percVar, 'max_total_volume_ticker': ticker, 'max_total_volume_value': volume} aggregatedSectorYearData[(sector, year)] = newData update the existing data else: currData = aggregatedSectorYearData[(sector, year)] currData['sum_initial_close'] += initialClose currData['sum_final_close'] += finalClose if percVar > currData['max_perc_var_value']: currData['max_perc_var_ticker'] = ticker currData['max_perc_var_value'] = percVar if volume > currData['max_total_volume_value']: currData['max_total_volume_ticker'] = ticker currData['max_total_volume_value'] = volume``` Si ordinano i risultati ottenuti per nome del settore.Infine, iterando su `aggregatedSectorYearData`, si calcola la variazione percentuale del settore nell'anno e si stampa ogni riga in output. ```pythonsortedResults = sorted(aggregatedSectorYearData.items(), key='sector', reverse=False)for result in sortedResults: sector = result[0][0] year = result[0][1] currResult = aggregatedSectorYearData[(sector, year)] initialCloseSum = currResult['sum_initial_close'] finalCloseSum = currResult['sum_final_close'] currResult['total_perc_var'] = calculatePercVar(initialCloseSum, finalCloseSum) print( sector, year, currResult['total_perc_var'], currResult['max_perc_var_ticker'], currResult['max_perc_var_value'], currResult['max_total_volume_ticker'], currResult['max_total_volume_value'])``` Hive Per questo job sono state create diverse tabelle esterne, per chiarezza divise a seconda del task per cui esse hanno un'utilità:UTILI PER IL TASK A* Per ogni settore estrare l'anno dalle date dei prezzi```SQLcreate table sector_2_date asselect distinct d2.sector, extract(year from d1.price_date)from historical_stock_prices(size) as d1 left join historical_stocks_clean as d2 on d1.ticker = d2.tickerorder by d2.sector, `_c1`;alter table sector_2_date change `_c1` year int;```* Per ogni settore, anno e ticker estrae la data della prima e dell'ultima quotazione dell'anno.```SQLcreate table sector_ticker_min_max asselect d2.sector, sd.year, d1.ticker, min(d1.price_date) as first_date, max(d1.price_date) as last_datefrom historical_stock_prices(size) as d1left join historical_stocks_clean as d2 on d1.ticker = d2.tickerleft join sector_2_date as sd on d2.sector = sd.sector and sd.year = extract(year from d1.price_date)where sd.year >=2009 and sd.year <= 2018group by d2.sector, sd.year, d1.tickerorder by sector, year, d1.ticker;```* Per ogni settore e anno calcola la somma di tutte e quotazioni nella prima data dell'anno per quel settore.```SQLcreate table sector_to_min_quot asselect d2.sector, sm.year, sum(d1.close_price) as first_quotfrom historical_stock_prices(size) as d1left join historical_stocks_clean as d2 on d1.ticker = d2.tickerjoin sector_ticker_min_max as sm on d2.sector = sm.sector and sm.year = extract(year from d1.price_date) and d1.ticker = sm.tickerwhere d1.price_date = sm.first_dategroup by d2.sector, sm.yearorder by d2.sector, sm.year;```* Per ogni settore e anno calcola la somma di tutte e quotazioni nell'ultima data dell'anno per quel settore.```SQLcreate table sector_to_max_quot asselect d2.sector, sm.year, sum(d1.close_price) as last_quotfrom historical_stock_prices(size) as d1left join historical_stocks_clean as d2 on d1.ticker = d2.tickerjoin sector_ticker_min_max as sm on d2.sector = sm.sector and sm.year = extract(year from d1.price_date) and d1.ticker = sm.tickerwhere d1.price_date = sm.last_date and d2.sector != "N/A"group by d2.sector, sm.yearorder by d2.sector, sm.year;```UTILI PER IL TASK B* Per ogni settore e anno estrae il ticker con la sua prima quotazione per quel settore e in quell'anno.```SQLcreate table sector_year_to_tickerFirstQuotation asselect d2.sector, sm.year, d1.ticker, close_price as first_quotationfrom historical_stock_prices(size) as d1left join historical_stocks_clean as d2 on d1.ticker = d2.tickerleft join sector_ticker_min_max as sm on d2.sector = sm.sector and d1.ticker = sm.tickerwhere d1.price_date = sm.first_dateorder by d2.sector, sm.year;```* Per ogni settore e anno estrae il ticker con la sua ultima quotazione per quel settore e in quell'anno.```SQLcreate table sector_year_to_tickerLastQuotation asselect d2.sector, sm.year, d1.ticker, close_price as last_quotationfrom historical_stock_prices(size) as d1left join historical_stocks_clean as d2 on d1.ticker = d2.tickerleft join sector_ticker_min_max as sm on d2.sector = sm.sector and d1.ticker = sm.tickerwhere d1.price_date = sm.last_dateorder by d2.sector, sm.year;```* Per ogni settore e anno estrae il ticker con la sua prima e ultima quotazione per quel settore e in quell'anno (Join delle due tabelle precedenti).```SQLcreate table sector_year_to_tickerFirstLastQuotation asselect s1.sector, s1.year, s1.ticker, s1.first_quotation, s2.last_quotationfrom sector_year_to_tickerFirstQuotation as s1left join sector_year_to_tickerLastQuotation as s2 on (s1.sector = s2.sector and s1.year = s2.year and s1.ticker = s2.ticker)order by s1.sector, s1.year;```* Per ogni settore, anno e ticker calcola la variazione percentuale del ticker in quell'anno per quel settore.```SQLcreate table sector_year_to_variation asselect sector, year, ticker, max(((last_quotation - first_quotation)/first_quotation)*100) as variationfrom sector_year_to_tickerFirstLastQuotationgroup by sector, year, ticker;```* Per ogni settore e anno calcola la variazione massima avuta in quell'anno e per quel settore.```SQLcreate table sector_year_to_maxVariation asselect sector, year, max(variation) as max_variationfrom sector_year_to_variationgroup by sector, year;```* Per ogni settore e anno estrae il ticker che ha avuto la variazione percentuale massima in quell'anno e per quel settore, con l'indicazione di tale variazione.```SQLcreate table sector_year_to_maxTicker asselect smax.sector, smax.year, sv.ticker, smax.max_variationfrom sector_year_to_maxVariation as smaxleft join sector_year_to_variation as sv on smax.sector = sv.sector and smax.year = sv.yearwhere max_variation = variation;```UTILI PER IL TASK C* Per ogni settore, anno e ticker calcola la somma dei volumi dei ticker in quell'anno e per quel settore.```SQLcreate table sector_year_ticker_to_volumeSum asselect d2.sector, year(d1.price_date) as price_year, d1.ticker, sum(d1.volume) as volumefrom historical_stock_prices(size) as d1join historical_stocks_clean as d2 on d1.ticker = d2.tickergroup by d2.sector, year(d1.price_date), d1.ticker;```* Per ogni settore e anno estrae la somma di volumi massima in quell'anno e per quel settore.```SQLcreate table sector_year_to_maxVolume asselect sector, price_year, max(volume) as maxVolumefrom sector_year_ticker_to_volumeSumgroup by sector, price_yearorder by sector, price_year;```* Per ogni settore e anno estrae il ticker che ha la somma di volumi massima in quell'anno e per quel settore, con indicazione di tale somma.```SQLcreate table sector_year_toMaxVolumeTicker asselect ayt.sector, ayt.price_year, ayt.ticker as v_ticker, ayt.volumefrom sector_year_ticker_to_volumeSum as aytleft join sector_year_to_maxVolume as aym on ayt.sector = aym.sector and ayt.price_year = aym.price_yearwhere volume = maxVolume;```QUERY FINALE* Mette insieme tutte le precedenti tabelle per estrarre, per ogni settore e anno, la variazione percentuale della quotazione del settore nell’anno, l’azione del settore che ha avuto il maggior incremento percentuale nell’anno (con indicazione dell’incremento), l’azione del settore che ha avuto il maggior volume di transazioni nell’anno(con indicazione del volume).```SQLcreate table job2_hive asselect d2.sector, smin.year, min(((smax.last_quot - smin.first_quot)/smin.first_quot)*100) as variation, max(sy.ticker), max(sy.max_variation), min(v_ticker), max(syv.volume)from historical_stock_prices(size) as d1left join historical_stocks_clean as d2 on d1.ticker = d2.tickerleft join sector_to_min_quot as smin on d2.sector = smin.sector and smin.year = extract(year from d1.price_date)left join sector_to_max_quot as smax on d2.sector = smax.sector and smax.year = extract(year from d1.price_date)left join sector_year_to_maxTicker sy on d2.sector = sy.sector and sy.year = extract(year from d1.price_date)left join sector_year_toMaxVolumeTicker as syv on d2.sector = syv.sector and syv.price_year = extract(year from d1.price_date)where smin.year >=2009 and smin.year <= 2018 and smax.year >=2009 and smax.year <= 2018 and d2.sector != "N/A"group by d2.sector, smin.yearorder by d2.sector, smin.year;``` Spark Spark caricherà i dati dei dataset `historical_stock_prices` o `historical_stocks` solo nel momento in cui deve effettuare una azione ```pythonhistorical_stock_prices = loadDataFromCsv()historical_stocks = loadDataFromCsv()``` Filtra i dati per estrarre soltanto quelli relativi al periodo compreso tra 2009 e 2018Mappa per ottenere (ticker) -> (prezzo di chiusura, volume, data) ```pythonhistorical_stock_prices_filtered = historical_stock_prices .filter(2009 <= year <= 2018) .map(x -> (ticker, (close_price, volume, date)))``` Mappa il secondo dataset per ottenere ticker -> settore (gli unici dati che ci interessano) ```pythonhs = historical_stocks .map(x -> (ticker, sector)) ``` Viene effettuato il join tra i due dataset e viene crato un nuovo RDD mappato per avere (settore, year, ticker) -> (prezzo di chiusura, volume, data) ```pythonhsp_sector = historical_stock_prices_filtered.join(hs) .map((ticker, ((close_price, volume, date), sector)) -> ((sector, year, ticker),(close, volume, date)))``` Vengono create due RDD che per ogni settore, anno e ticker restituiscono il prezzo di chiusura alla prima e all'ultima data dell'anno per quel settore ```pythonfirst_quotation_close = hsp_sector .reduceByKey_minDate() .map(((sector, year, ticker),(close, volume, date)) -> ((sector, year, ticker),close))last_quotation_close = hsp_sector .reduceByKey_maxDate() .map(((sector, year, ticker),(close, volume, date)) -> ((sector, year, ticker),close))``` Viene effettuato il join tra i precedenti due RDD e una map per calcolare e aggiungere, per ogni settore, anno e ticker, la variazione percentuale in quell'anno per quel settore ```pythonticker_percent_variation = first_quotation_close .join(last_quotation_close) .map(((sector, year, ticker),(first_close, last_close)) -> ((sector, year, ticker),(first_close, last_close, percent_variation(first_close, last_close)) ``` Viene effettuata prima una map per avere per ogni settore e anno il ticker e la variazione percentualeViene poi effettuata una reduce by key per avere il ticker con la variazione percentuale massima per quell'anno e in quel settore con indicazione di tale variazione ```pythonticker_max_percent_var = ticker_percent_variation .map(((sector, year, ticker),(first_close, last_close, percent_var )) -> ((sector, year), (ticker, percent_var))) .reduceByKey(max_value(a, b)) ``` Per ogni settore, anno e ticker calcola la somma dei volumi e poi prende il ticker con il massimo valore di somma di volumi per settore e anno ```pythonticker_max_volume = hsp_sector .map(((sector, year, ticker),(close, volume, date)) -> ((sector, year, ticker), volume)) .reduceByKey(volume_a + volume_b) .map(((sector, year, ticker), max_volume) -> ((sector, year), (ticker, max_volume))) .reduceByKey(max_value(a, b))``` Viene effettuata una map rimuove la variazione precentuale dal valore e il ticker dalla chiavePoi una seconda map per avere per ogni settore e anno la variazione percentuale totale ```pythonsector_year_percent_variation = ticker_percent_variation .map(((sector, year, ticker),(first_close, last_close, percent_var )) -> ((sector, year), (first_close, last_close))) .reduceByKey(sum_tuple(a, b)) .map(((sector, year), (sum_first_close, sum_last_close)) -> ((sector, year), (total_percent_variation = (sum_last_close - sum_first_close) /sum_first_close)*100))``` Aggrega tutti i risultati intermedi e calcola i record finali per ottenere, per ogni settore e anno, la massima variazione della quotazione del settore nell'anno, l'azione con incremento maggiore nel settore e l'azione del settore con il maggior volume, con indicazione di tali valori. ```pythonresults = sector_year_percent_variation .join(ticker_max_percent_var) .map(((sector, year), (total_percent_variation, (max_ticker, max_percent_var)) -> ((sector, year), (total_percent_variation, max_ticker, max_percent_var)))) .join(ticker_max_volume) .map(((sector, year), ((total_percent_variation, max_ticker, max_percent_var), (ticker, max_volume))) -> ((sector, year), (total_percent_variation, max_ticker, max_percent_var, ticker, max_volume))) .sortBySectorYear()``` Job 3 Generare le coppie di aziende che si somigliano (sulla base di una soglia = 1%) in termini divariazione percentuale mensile nell’anno 2017. Mostrare l’andamento mensile delle due aziende nel formato:- 1:{Apple, Intel}: - GEN: Apple +2%, Intel +2,5%, - FEB: Apple +3%, Intel +2,7%, - MAR: Apple +0,5%, Intel +1,2%, ...- 2:{Amazon, IBM}: - GEN: Amazon +1%, IBM +0,5%, - FEB: Amazon +0,7%, IBM +0,5%, - MAR: Amazon +1,4%, IBM +0,7%, ... MapReduce Nel primo Mapper si fa il parsing dei campi necessari per il job, e si conservano solo i record con data relativa al 2017. ```pythonclass first_mapper: for row in INPUT: ticker, closePrice, date = row filter out all the years but 2017 if date.year != 2017: continue print(ticker, closePrice, date)``` Come già detto nella sezione sull'analisi del dataset, la scelta dell'utilizzo del `ticker` come chiave invece del `company_name` è per evitare di dover gestire proprio questi ultimi valori duplicati. Sarebbe stato possibile effettuando un preprocessamento, e non sarebbe stato banale aggregare i dati dei ticker con lo stesso nome di azienda.Il Reducer definisce un dizionario `tickerToMonthVar` che conterrà, per ogni ticker, i valori di chiusura iniziali e finali di ogni mese dell'anno. Il formato è indicato nel commento di seguito. ```pythonclass first_reducer: saves the monthly first and last close price for each ticker (along with their dates for comparing) tickerToMonthVar = {'AAPL': { GEN: {'first_close': 15, 'last_close': 20, 'first_date': .., 'last_date': ..}, FEB: {'first_close': 20, 'last_close': 2, ...} ... DIC: {'first_close': 5, 'last_close': 2, ...} ...}tickerToMonthVar = {}``` A questo punto si leggono le righe provenienti dal primo Mapper, inserendo il mese e i corrispondenti valori per ogni ticker. Anche qui, se i dati non sono presenti nel dizionario vanno inizializzati, oppure aggiornati se erano presenti. ```pythonfor row in INPUT: ticker, closePrice, date = row the ticker and month are already in the dict, update them if (ticker in tickerToMonthVar) and (date.month in tickerToMonthVar[ticker]): currTickerMonth = tickerToMonthVar[ticker][date.month] if date < currTickerMonth['first_date']: currTickerMonth['first_close'] = closePrice currTickerMonth['first_date'] = date if date > currTickerMonth['last_date']: currTickerMonth['last_close'] = closePrice currTickerMonth['last_date'] = date insert ticker data in the dict else: currTickerMonth = {'first_close': closePrice, 'last_close': closePrice, 'first_date': date, 'last_date': date} if ticker not in tickerToMonthVar: tickerToMonthVar[ticker] = {date.month: currTickerMonth} else: tickerToMonthVar[ticker][date.month] = currTickerMonth``` Per mandare i dati al prossimo Mapper si è fatta una separazione, per ogni ticker, dei mesi corrispondenti. Si itera su tutti i ticker, e poi su tutti i mesi dei ticker, per stampare la variazione percentuale associata.Ciò introduce una inefficienza, poiché nel secondo Reducer essi verranno di nuovo aggregati. Tuttavia ciò è stato necessario per via del paradigma di programmazione di MapReduce. ```python print the data structure calculating the monthly percent variationfor ticker in tickerToMonthVar: yearData = tickerToMonthVar[ticker] iterate over all months for the ticker for month in yearData: initialClose = yearData[month]['first_close'] finalClose = yearData[month]['last_close'] prints ('AAPL', 3, 25.3) separating each month for the same ticker print(ticker, month, calculatePercVar(initialClose, finalClose))``` Il secondo Mapper restituisce l'identità, non effettuando nessuna operazione. ```pythonclass second_mapper: for line in INPUT: ticker, month, percent_variation = line print(ticker, month, percent_variation)``` Nel secondo Reducer si definiscono due dizionari con le seguenti chiavi e valori:- `tickerToMonthsVar(ticker)` contiene un dizionario, per ogni ticker, coi seguenti valori: - GEN: percent_variation - FEB: percent_variation - ... - DIC: percent_variation- `crossProduct(ticker_1, ticker_2)` contiene un dizionario, per ogni coppia di ticker validi, con: - GEN: (percent_variation_ticker_1, percent_variation_ticker_2) - FEB: (percent_variation_ticker_1, percent_variation_ticker_2) - ... - DIC: (percent_variation_ticker_1, percent_variation_ticker_2) Per semplicità si è adottato un formato più semplice rispetto alle specifiche, che richiedevano di inserire il ticker delle aziende su ogni coppia di variazioni percentuali. Nell'implementazione sono stati inseriti solo all'inizio.Inoltre la soglia di variazione percentuale entro la quale considerare due aziende che si somigliano è stata impostata a THRESHOLD = 1% ```pythonclass second_reducer: THRESHOLD = 1 the dict aggregates all months for each ticker tickerToMonthsVar = {'AAPL': {GEN: 13.5, FEB: 12.0, ... , DIC: -5.3}, ...} tickerToMonthsVar = {} structure to contain the cross product (without duplicates or inverted pairs) crossProduct = {('AAPL', 'AMZN'): {GEN: (2, 2.6), FEB: (-1, 3.7), ... }, ('AAPL', 'BTP'): {GEN: (2, -1), FEB: (-1, 3.4), ...}} crossProduct = {}``` E' stata anche definita una funzione `mergeTickerPair` per combinare due entry di `tickerToMonthsVar`, ottenendo le coppie di variazioni percentuali per ogni mese.La funzione restituisce solo le coppie di aziende che si somigliano, controllando che la soglia dell'1% sia rispettata per tutti i mesi.Una scelta effettuata esclusivamente nell'implementazione MapReduce è stata quella di considerare anche le coppie di aziende che non hanno tutti e 12 i mesi, purché abbiano dati relativi agli stessi mesi. Ad esempio vengono scartate le coppie dove ticker_1 ha solo gennaio e ticker2 solo febbraio, mentre se entrambe avessero gennaio verrebbero conservate.Nelle altre implementazioni (Hive, Spark) si considerano solo le aziende che hanno tutti e 12 i mesi, riducendo il numero di record di output (da circa 600 a 450). ```python generates a merged pair to insert in the crossProduct data structure it will return None if the pair of tickers is not similar enough (or months not consistent) def mergeTickerPair(ticker1, ticker2, tickerData): result = {} ticker1Data = tickerData[ticker1] ticker2Data = tickerData[ticker2] the comparison will fail if the months data are not consistent if ticker1Data.keys() != ticker2Data.keys(): return None for month in ticker1Data: percVar1 = ticker1Data[month] percVar2 = ticker2Data[month] percent_difference = abs(percVar1 - percVar2) if percent_difference <= THRESHOLD: result[month] = (percVar1, percVar2) else: return None return result``` A questo punto il secondo Reducer riceve le coppie (ticker: month_percent_variation) dal secondo Mapper.Come già anticipato esse verranno aggregate in `tickerToMonthsVar`, che conterrà tutti i mesi per ogni ticker. ```python each month is unique for a given ticker (we assume no duplicates) for row in sys.stdin: ticker, month, percVar = row if ticker not in tickerToMonthsVar: tickerToMonthsVar[ticker] = {month: percVar} else: tickerToMonthsVar[ticker][month] = percVar``` Si itera su ogni coppia di ticker effettuando un prodotto cartesiano. Vengono conversate solo le coppie di ticker che sono simili secondo le specifiche, usando la funzione `mergeTickerPair`, che controlla i singoli valori di variazione percentuale e poi unisce i due dizionari.Durante l'iterazione si ignorano le coppie già presenti nel dizionario (anche invertite) e le coppie di un ticker con se stesso. ```python for ticker_1 in tickerToMonthsVar: for ticker_2 in tickerToMonthsVar: if (ticker_1, ticker_2) in crossProduct or (ticker_2, ticker_1) in crossProduct or ticker_1 == ticker_2: continue else: mergedPair = mergeTickerPair(ticker_1, ticker_2, tickerToMonthsVar) the pair was not valid if mergedPair is None: continue else: crossProduct[(ticker_1, ticker_2)] = mergedPair for (ticker_1, ticker_2) in crossProduct: pair = (ticker_1, ticker_2) print(pair, crossProduct[pair])``` Hive Per l'ultimo job vengono create diverse tabelle esterne che saranno poi utilizzate per costruire la tabella finale:* Filtra il database per mostrare solo i dati del 2017```SQLcreate table 2017_data asselect ticker, price_date, extract(month from price_date), close_pricefrom historical_stock_prices(size)where extract(year from price_date) = 2017order by ticker, price_date;alter table 2017_data change `_c2` month int;```* Per ogni ticker e mese estrae il primo e l'ultimo prezzo di chiusura del ticker in quel mese```SQLcreate table ticker_month_to_max_min_date asselect ticker, month, min(price_date) as min_date, max(price_date) as max_datefrom 2017_datagroup by ticker, month;```* Per ogni ticker e mese estrae la prima quotazione di quel ticker in quel mese```SQLcreate table ticker_to_first_month_quotation asselect d.ticker, d.month, d.close_pricefrom 2017_data as dleft join ticker_month_to_max_min_date as tm on d.ticker = tm.tickerwhere price_date = min_date;```* Per ogni ticker e mese estrae l'ultima quotazione di quel ticker in quel mese```SQLcreate table ticker_to_last_month_quotation asselect d.ticker, d.month, d.close_pricefrom 2017_data as dleft join ticker_month_to_max_min_date as tm on d.ticker = tm.tickerwhere price_date = max_date;```* Per ogni ticker e mese estrae la prima e l'ultima quotazione del ticker in quel mese (join tra le due tabelle precedenti)```SQLcreate table ticker_to_first_last_month_quotation asselect first.ticker, first.month, first.close_price as first_quotation, last.close_price as last_quotationfrom ticker_to_first_month_quotation as firstleft join ticker_to_last_month_quotation as laston first.ticker = last.ticker and first.month = last.monthorder by ticker, month;```* Per ogni ticker e mese calcola la variazione percentuale di quel ticker in quel mese```SQLcreate table ticker_month_to_variation asselect ticker, month, (((last_quotation - first_quotation)/first_quotation)*100) as variationfrom ticker_to_first_last_month_quotationorder by ticker, month;```* Per ogni coppia di ticker e per ogni mese estrae la variaizone percentuale del primo e del secondo ticker per quel mese ```SQLcreate table variations_comparison asselect t1.ticker as ticker_1, t2.ticker as ticker_2, t1.month, cast(t1.variation as decimal(10,2)) as variation_1, cast(t2.variation as decimal(10,2)) as variation_2from ticker_month_to_variation as t1, ticker_month_to_variation as t2where t1.ticker > t2.ticker and t1.month = t2.month and (abs(t1.variation - t2.variation) <= 1)order by ticker_1, ticker_2, t1.month;```* Crea la tabella dei risultati raggruppando per coppie di ticker e trasformando i valori della colonna "month" in nuove colonne, ognuna per ogni mese, popolate dai rispettivi valori per quel mese.```SQLcreate table raw_results asselect ticker_1 as t1, ticker_2 as t2,max(case when month="1" then "GEN:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as gen,max(case when month="2" then "FEB:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as feb,max(case when month="3" then "MAR:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as mar,max(case when month="4" then "APR:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as apr,max(case when month="5" then "MAG:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as mag,max(case when month="6" then "GIU:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as giu,max(case when month="7" then "LUG:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as lug,max(case when month="8" then "AGO:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as ago,max(case when month="9" then "SET:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as sep,max(case when month="10" then "OTT:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as ott,max(case when month="11" then "NOV:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as nov,max(case when month="12" then "DIC:"||" "||"("||variation_1||"%"||", "||variation_2||"%"||")" else "" end) as dicfrom variations_comparisongroup by ticker_1, ticker_2;```* Nel mostrare i risutati si filtra la tabella finale affinché mostri soltanto le coppie di ticker che si somigliano in tutti i mesi.```SQLcreate table job3_hive asselect * from raw_resultswhere (gen!="" and feb!="" and mar!="" and apr!="" and mag!="" and giu!="" and lug!="" and ago!="" and sep!="" and ott!="" and nov!="" and dic!="");``` Spark I dati contenuti in `historical_stock_prices` verranno caricati dal .csv in una RDD (e partizionati a seconda dei thread) solo nel momento in cui un'azione viene richiesta. ```pythonhistorical_stock_prices = loadDataFromCsv() ``` Filtra i dati per ottenere soltanto quelli relativi all'anno 2017 ```pythonticker_close_date = historical_stock_prices .filter(x -> year(price_date) == 2017) .map(x -> (ticker, close_price, price_date)) ``` Mappa i valori per avere, per ogni ticker e mese, la tupla originale corrispondente ```pythonticker_month_to_list = ticker_close_date .map((ticker, close_price, price_date) -> ((ticker, month(price_date), (ticker, close_price, price_date))))``` Reduce By Key per ottenere, per ogni ticker e mese, la prima data e il primo prezzo disponibili in quel mese per quel tickerMap per ottenere, per ogni ticker e mese, il prezzo minimo ```pythonticker_month_to_mindate = ticker_month_to_list .reduceByKey(min_price_and_date) .map(((ticker, month), (ticker, min_close_price, min_price_date)) -> ((ticker, month), min_close_price))``` Reduce By Key per ottenere, per ogni ticker e mese, l'ultima data e l'ultimo prezzo disponibili in quel mese per quel tickerMap per ottenere, per ogni ticker e mese, il prezzo massimo ```pythonticker_month_to_maxdate = ticker_month_to_list .reduceByKey(max_price_and_date) .map(((ticker, month), (ticker, max_close_price, max_price_date)) -> ((ticker, month), max_close_price))``` Viene effettuato il join tra i due RDD precedenti e una map per ottere, per ogni ticker e mese, la variazione percentuale per quel ticker in quel mese ```pythonticker_month_variation = ticker_month_to_mindate .join(ticker_month_to_maxdate) .map(((ticker, month), (min_close_price, max_close_price)) -> ((ticker, month), percent_variation(min_close_price, max_close_price))``` Raggruppa le variazioni percentuali di tutti i mesi per ogni ticker Filtra i record non relativi ad un intero annoOrdina per mese ```pythonticker_aggregate_months = ticker_month_variation .map(((ticker, month), perc_variation) -> (ticker, (month, variation))) .groupByKey() .filter(length(list of (month, variation)) == 12) .map((ticker, list of (month, variation)) -> (ticker, sorted list by month))``` Effettua il prodotto cartesiano per individuare tutte le possibili coppie di tickerFiltra i record univoci e quelli che si somigliano in base ad una soglia (1%) in termini di variazione percentuale mensile ```pythonticker_pairs_threshold = ticker_aggregate_months .cartesian(ticker_aggregate_months) .filter(ticker_1 < ticker_2 and abs(variation_1 - variation_2) < 1) .map( ((ticker_1, sorted list by month),(ticker_2, sorted list by month)) -> ((ticker_1, ticker_2), merged list of months))``` Risultati Di seguito vengono illustrati i risultati ottenuti eseguendo le diverse implementazioni dei job. Inoltre vengono confrontati i tempi di esecuzione al variare delle dimensioni del dataset di input (descritte nella prima sezione).
###Code
import seaborn as sns
%matplotlib inline
import matplotlib.pyplot as plt
sns.set()
job1_data = pd.read_csv('./benchmarks/job1_all_data.csv')
job2_data = pd.read_csv('./benchmarks/job2_all_data.csv')
job3_data = pd.read_csv('./benchmarks/job3_all_data.csv')
def plot_job_benchmark(job_data, job_n):
plt.figure(figsize=(14,8))
fig = sns.lineplot(x='variable', y='value', hue='tech', data=job_data, marker="o", linewidth = 2.5,
palette=["C0", "C1", "C2", "C0", "C1", "C2"], style='tech',
dashes=["", "", "", (2, 2), (2, 2), (2, 2)])
plt.xlabel("Dataset Size (MB)")
plt.ylabel("Time (Minutes)")
plt.title("Job"+str(job_n)+" Execution Times")
fig.legend(title='Tecnology')
plt.show(fig)
###Output
_____no_output_____
###Markdown
Job 1 Risultati del primo job ```Ticker Prima Quot. Ultima Quot. Var. Perc. Prezzo Min Prezzo MaxA 1999-11-18 2018-08-24 109.636% 7.510 115.879 AA 1970-01-02 2018-08-24 508.325% 3.604 117.194 AABA 1996-04-12 2018-08-24 4910.909% 0.645 125.031 AAC 2018-01-16 2018-08-24 4.856% 7.789 12.960 AAL 2005-09-27 2018-08-24 101.139% 1.450 63.270 AAME 1980-03-17 2018-08-24 -29.870% 0.375 15.800 AAN 1987-01-20 2018-08-24 4683.263% 0.481 51.529 AAOI 2013-09-26 2018-08-24 330.421% 8.079 103.410 AAON 1992-12-16 2018-08-24 41348.203% 0.089 43.299 AAP 2001-11-29 2018-08-24 1084.149% 12.329 201.240 ``` Di seguito il grafico che confronta i tempi di esecuzione al variare della dimensione del dataset, sia in cluster (linee tratteggiate) che in locale (linee piene).
###Code
plot_job_benchmark(job1_data, 1)
###Output
_____no_output_____
###Markdown
Si può notare un andamento generalmente lineare di tutte le tecnologie usate sia in locale che su cluster.In particolare si nota come Spark in locale sia più onerosa computazionalmente rispetto a MapReduce. Su cluster invece per via della grande disponibilità di risorse hardware è con un buon margine la tecnologia più prestante.Per quanto riguarda MapReduce e Hive si notano miglioramenti minori rispetto alle esecuzioni in locale. Job 2 Risultati del secondo job ```Settore Anno Tot. Var. Ticker Max. Var. Ticker Max. VolumeBASIC INDUSTRIES 2009 3.482 GURE 709.722 FCX 9141685400.0BASIC INDUSTRIES 2010 21.790 BLD 519.802 FCX 6891808600.0BASIC INDUSTRIES 2011 -58.600 ROAD 188.704 FCX 5150807800.0BASIC INDUSTRIES 2012 -68.788 PATK 261.860 VALE 4659766700.0BASIC INDUSTRIES 2013 10.322 XRM 416.927 VALE 4428233700.0BASIC INDUSTRIES 2014 -71.902 BLD 884.599 VALE 5660183200.0BASIC INDUSTRIES 2015 -48.101 SUM 35191.629 FCX 7286761300.0BASIC INDUSTRIES 2016 13.829 TECK 451.790 FCX 10464699500.0BASIC INDUSTRIES 2017 15.279 OPNT 310.178 VALE 7023267600.0BASIC INDUSTRIES 2018 -3.079 XRM 213.817 VALE 3710091900.0``` Di seguito il grafico che confronta i tempi di esecuzione al variare della dimensione del dataset, sia in cluster (linee tratteggiate) che in locale (linee piene).
###Code
plot_job_benchmark(job2_data, 2)
###Output
_____no_output_____
###Markdown
In questo grafico si può apprezzare in modo migliore l'andamento dei tempi di esecuzione al variare della dimensione di input.Hive è di gran lunga la tecnologia più onerosa, e i tempi su cluster risultano essere più del doppio della controparte MapReduce e Spark. In locale mantiene per ogni dimensione di input dei tempi estremamente alti, poco abbattibili per via del prodotto cartesiano che bisogna effettuare per il job.In locale MapReduce e Spark hanno un andamento pressoché identico. Su cluster hanno significativi miglioramenti, dimezzando i tempi di esecuzione di MapReduce, e mostrando prestazioni ancora migliori per Spark. Job 3 This is a sub paragraph, formatted in heading 3 style ```(Ticker1, Ticker2) {Mese: (Var. Perc. Mese 1, Var. Perc. Mese 2), ...}('OSBCP', 'TCRZ'){'GEN': (1.678, 1.768), 'FEB': (0.389, 0.077), 'MAR': (0.0, 0.735), 'APR': (0.875, 1.124), 'MAG': (-0.095, -0.192), 'GIU': (1.156, 0.578), 'LUG': (0.190, -0.307), 'AGO': (-0.382, 0.269), 'SET': (-0.286, -0.546), 'OTT': (0.673, 0.387), 'NOV': (-0.095, -0.424), 'DIC': (-0.382, -0.276)}('OSBCP', 'ISF'){'GEN': (1.678, 0.832), 'FEB': (0.389, 0.512), 'MAR': (0.0, -0.117), 'APR': (0.875, 0.698), 'MAG': (-0.095, -0.885), 'GIU': (1.156, 0.310), 'LUG': (0.190, 0.426), 'AGO': (-0.382, -0.385), 'SET': (-0.286, -0.541), 'OTT': (0.673, 0.194), 'NOV': (-0.095, -0.233), 'DIC': (-0.382, 0.038)}('OXLCO', 'VRIG'){'GEN': (0.078, 0.247), 'FEB': (0.980, 0.159), 'MAR': (-0.583, 0.0), 'APR': (0.352, 0.433), 'MAG': (-0.039, -0.067), 'GIU': (-0.313, -0.118), 'LUG': (0.668, -0.079), 'AGO': (-0.078, -0.015), 'SET': (-0.859, 0.075), 'OTT': (-0.937, -0.051), 'NOV': (-0.828, -0.019), 'DIC': (0.474, 0.003)}('OXLCO', 'VGSH'){'GEN': (0.078, 0.214), 'FEB': (0.980, 0.049), 'MAR': (-0.583, 0.197), 'APR': (0.352, 0.115), 'MAG': (-0.039, 0.164), 'GIU': (-0.313, -0.016), 'LUG': (0.668, 0.214), 'AGO': (-0.078, 0.164), 'SET': (-0.859, -0.131), 'OTT': (-0.937, -0.098), 'NOV': (-0.828, -0.148), 'DIC': (0.474, -0.198)}('OXLCO', 'VCSH'){'GEN': (0.078, 0.441), 'FEB': (0.980, 0.364), 'MAR': (-0.583, 0.163), 'APR': (0.352, 0.288), 'MAG': (-0.039, 0.501), 'GIU': (-0.313, 0.087), 'LUG': (0.668, 0.463), 'AGO': (-0.078, 0.261), 'SET': (-0.859, -0.012), 'OTT': (-0.937, 0.062), 'NOV': (-0.828, -0.250), 'DIC': (0.474, -0.276)}('OXLCO', 'CIU'){'GEN': (0.078, 0.462), 'FEB': (0.980, 0.775), 'MAR': (-0.583, 0.322), 'APR': (0.352, 0.513), 'MAG': (-0.039, 0.704), 'GIU': (-0.313, 0.009), 'LUG': (0.668, 0.813), 'AGO': (-0.078, 0.408), 'SET': (-0.859, -0.054), 'OTT': (-0.937, 0.018), 'NOV': (-0.828, -0.218), 'DIC': (0.474, -0.128)}('OXLCO', 'ECCB'){'GEN': (0.078, 0.196), 'FEB': (0.980, 0.765), 'MAR': (-0.583, 0.308), 'APR': (0.352, 0.193), 'MAG': (-0.039, 0.613), 'GIU': (-0.313, -0.644), 'LUG': (0.668, 1.006), 'AGO': (-0.078, 0.723), 'SET': (-0.859, -0.643), 'OTT': (-0.937, -0.950), 'NOV': (-0.828, -0.076), 'DIC': (0.474, 0.114)}('OXLCO', 'FTSM'){'GEN': (0.078, 0.033), 'FEB': (0.980, 0.083), 'MAR': (-0.583, -0.016), 'APR': (0.352, 0.0), 'MAG': (-0.039, -0.016), 'GIU': (-0.313, 0.008), 'LUG': (0.668, 0.016), 'AGO': (-0.078, 0.016), 'SET': (-0.859, 0.0), 'OTT': (-0.937, 0.049), 'NOV': (-0.828, -0.016), 'DIC': (0.474, -0.049)}('OXLCO', 'HYLS'){'GEN': (0.078, 0.144), 'FEB': (0.980, 1.049), 'MAR': (-0.583, -0.832), 'APR': (0.352, 0.429), 'MAG': (-0.039, 0.386), 'GIU': (-0.313, -0.606), 'LUG': (0.668, 0.567), 'AGO': (-0.078, -0.685), 'SET': (-0.859, -0.406), 'OTT': (-0.937, -0.346), 'NOV': (-0.828, -0.654), 'DIC': (0.474, 0.020)}('OXLCO', 'HYXE'){'GEN': (0.078, 0.434), 'FEB': (0.980, 1.196), 'MAR': (-0.583, -1.427), 'APR': (0.352, 0.868), 'MAG': (-0.039, 0.428), 'GIU': (-0.313, 0.283), 'LUG': (0.668, 0.767), 'AGO': (-0.078, -0.319), 'SET': (-0.859, -0.286), 'OTT': (-0.937, -0.009), 'NOV': (-0.828, -0.440), 'DIC': (0.474, -0.351)}``` Di seguito il grafico che confronta i tempi di esecuzione al variare della dimensione del dataset, sia in cluster (linee tratteggiate) che in locale (linee piene).
###Code
plot_job_benchmark(job3_data, 3)
###Output
_____no_output_____
###Markdown
SEE: Simple Evolutionary Exploration By Katrina Gensterblum Image from: https://miro.medium.com/ --- Authors$\text{Katrina Gensterblum}^{1}$, $\text{Dirk Colbry}^{1}$, $\text{Cameron Hurley}^{2}$, $\text{Noah Stolz}^{3}$ $^{1}$ Department of Computational Mathematics, Science and Engineering, Michigan State University $^{2}$ Department of Computer Science and Engineering, Michigan State University $^{3}$ School of Science, School of Humanities and Social Sciences, Rensselaer Polytechnic Institute --- AbstractAs the ability to collect image data increases, images are used more and more within a wide range of disciplines. However, processing this kind of data can be difficult and labor-intensive. One of the most time-consuming image processing techniques to perform is image segmentation. As a result, many image segmentation algorithms have been developed to try and accomplish this task automatically, but even finding the best algorithm for a dataset can be time intensive. Here we provide easy-to-use software that utilizes the power of genetic algorithms to automate the process of image segmentation. The software works to find both the best image segmentation algorithm for an image dataset, but also find the best hyperparameters for that segmentation algorithm. ---- Statement of NeedAs technology advances, image data is becoming a common element in a broad scope of research experiments. Studies in everything from self-driving vehicles to plant biology utilize images in some capacity. However, every image analysis problem is different and processing this kind of data and retrieving specific information can be extremely time-consuming. One of the main image processing techniques used today, and one of the most time-consuming, is image segmentation, which attempts to find entire objects within an image. As a way to try and make this process easier, many image processing algorithms have been developed to try and automatically segment an image. However, there are many different options available, and each algorithm may work best for a different image set. Additionally, many of these algorithms have hyperparameters that need to be tuned in order to get the most accurate results. So even if a researcher already possesses knowledge in image understanding and segmentation, it can be time-consuming to run and validate a customized solution for their problem. Thus, if this process could be automated, a significant amount of researcher time could be recovered.The purpose of the Simple Evolutionary Exploration, or SEE, software package is to provide an easy-to-use tool that can achieve this automation for image segmentation problems. By utilizing the power of genetic algorithms, the software can not only find the best image segmentation algorithm to use on an image set, but can also find the optimal parameters for that specific algorithm. ---- Installation InstructionsA list of dependencies for SEE can be found in the [README](README.md) file.These dependencies can be installed individually, or by creating a conda environment using the command below: **With makefile:** `make init` **Manually:** `conda env create --prefix ./envs --file environment.yml` ---In order to build automatic documentation for the project use one of the commands below: **With makefile:** `make doc` **Manually:** `pdoc --force --html --output-dir ./docs see` ---- Unit TestsTesting files for SEE can be found in `.\see\tests\`. In order to run the tests run the cell below, or use one of the following commands: **With makefile:** `make test` **Manually:** `pytest -v see` If the tests ran successfully, an output message should appear stating that $25$ tests were passed and $11$ warnings occurred.
###Code
!pytest -v see
###Output
_____no_output_____ |
neural_networks/2-autograd.ipynb | ###Markdown
Autograd
###Code
import torch
x = torch.ones(2, 2, requires_grad=True)
x
y = x + 2
y
y.grad_fn
z = y*y*3
out = z.mean()
print(z)
print(out)
a = torch.rand(3,3)
a = (2*a)+3
print(a)
print(a.requires_grad)
# change requires_grad in place
a.requires_grad_(True)
print(a)
b = (a*a).sum()
print(b)
print(b.grad_fn)
###Output
tensor([[3.1001, 4.3184, 4.5754],
[3.5336, 3.3831, 4.7765],
[3.9711, 3.7767, 3.1712]])
False
tensor([[3.1001, 4.3184, 4.5754],
[3.5336, 3.3831, 4.7765],
[3.9711, 3.7767, 3.1712]], requires_grad=True)
tensor(136.0296, grad_fn=<SumBackward0>)
<SumBackward0 object at 0x7f78ae01e070>
###Markdown
Compute Gradients
###Code
print(out)
out.backward()
# d(out)/dx
x.grad
x = torch.randn(3, requires_grad=True)
print(x)
y = x*2
while y.data.norm() < 1000:
y = y * 2
print(y)
# vector-Jacobian product
v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)
y.backward(v)
print(x.grad)
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)
###Output
True
True
False
###Markdown
More examples
###Code
import torch
t1 = torch.randn((3,5), requires_grad=True)
t1
a = torch.randn((3,3), requires_grad = True)
w1 = torch.randn((3,3), requires_grad = True)
w2 = torch.randn((3,3), requires_grad = True)
w3 = torch.randn((3,3), requires_grad = True)
w4 = torch.randn((3,3), requires_grad = True)
b = w1*a
c = w2*a
d = w3*b + w4*c
L = (10 - d).sum()
print("The grad fn for a is", a.grad_fn)
print("The grad fn for d is", d.grad_fn)
L.backward()
print(b.grad)
###Output
None
|
Task1/Preprocessing - Other Features.ipynb | ###Markdown
General overview
###Code
df.info()
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
corr=df.corr()
sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values)
###Output
_____no_output_____
###Markdown
Checking missing values
###Code
df.isnull().sum()
df_test.isnull().sum()
print(len(df))
print(len(df_test))
print(len(df.City.unique()))
print(len(df.State.unique()))
print(len(df.Type.unique()))
print(len(df.SWM.unique()))
df_test
###Output
_____no_output_____
###Markdown
Now dealing with each feautre individually Removing obvious unecessary features which are not required in prediction
###Code
df=df.drop(['City','State','Popuation [2001]'], axis=1)
df_test=df_test.drop(['City','State','Popuation [2001]'], axis=1)
###Output
_____no_output_____
###Markdown
1.Sex Ratio
###Code
# data distribution
df['Sex Ratio'].hist(bins=50)
# filling missing values with mean
mean_sr= df['Sex Ratio'].mean()
df['Sex Ratio'].fillna(mean_sr,inplace=True)
df_test['Sex Ratio'].fillna(mean_sr,inplace=True)
df['Sex Ratio'].isnull().sum()
# Dividing data points by 1000 to convert them in ratio
df['Sex Ratio']=df['Sex Ratio'].astype('int64')/1000
df_test['Sex Ratio']=df_test['Sex Ratio'].astype('int64')/1000
df['Sex Ratio']
###Output
_____no_output_____
###Markdown
2.SWM
###Code
# # data distribution
df['SWM'].value_counts().plot.bar()
# filling missing values with mode
mode_swm=df['SWM'].mode()
df['SWM'].fillna(mode_swm,inplace=True)
df_test['SWM'].fillna(mode_swm,inplace=True)
df['SWM'].isnull().sum()
# Dealing with categorical values ... encoding them
dummies_train = pd.get_dummies(df['SWM'])
dummies_train.drop(['MEDIUM'],axis=1,inplace=True)
dummies_test = pd.get_dummies(df_test['SWM'])
dummies_test.drop(['MEDIUM'],axis=1,inplace=True)
df = df.join(dummies_train)
df_test = df_test.join(dummies_test)
df_test.drop(['SWM'],axis=1,inplace=True)
df.drop(['SWM'],axis=1,inplace=True)
df
###Output
_____no_output_____
###Markdown
3.Median Age
###Code
df['Median Age'].hist(bins=50)
# filling missing values with median age
median_age=df['Median Age'].median()
df['Median Age'].fillna(median_age,inplace=True)
df_test['Median Age'].fillna(median_age,inplace=True)
df['Median Age'].isnull().sum()
df['Median Age']=df['Median Age'].astype('int64')
df_test['Median Age']=df_test['Median Age'].astype('int64')
###Output
_____no_output_____
###Markdown
4.Avg Temp
###Code
df['Avg Temp'].hist(bins=10)
F= np.log(df['Avg Temp'])
F.hist(bins=10)
## seems like no change when log transformation is applied
# filling missing values with mean temp
mean_temp=df['Avg Temp'].mean()
df['Avg Temp'].fillna(mean_temp,inplace=True)
df_test['Avg Temp'].fillna(mean_temp,inplace=True)
df['Avg Temp'].isnull().sum()
df['Avg Temp']=df['Avg Temp'].astype('int64')
df_test['Avg Temp']=df_test['Avg Temp'].astype('int64')
###Output
_____no_output_____
###Markdown
5.Hospitals
###Code
df['# of hospitals'].hist(bins=50)
# filling missing values with random int between 10-30
df['# of hospitals'].fillna(np.random.randint(low=10, high=30),inplace=True)
df_test['# of hospitals'].fillna(np.random.randint(low=10, high=30),inplace=True)
df['# of hospitals'].isnull().sum()
df['# of hospitals']=df['# of hospitals'].astype('int64')
df_test['# of hospitals']=df_test['# of hospitals'].astype('int64')
###Output
_____no_output_____
###Markdown
6.Toilets Avl
###Code
df['Toilets Avl'].hist(bins=50)
# filling missing values with mean
mean_toilet=df['Toilets Avl'].mean()
df['Toilets Avl'].fillna(mean_toilet,inplace=True)
df_test['Toilets Avl'].fillna(mean_toilet,inplace=True)
df['Toilets Avl'].isnull().sum()
df['Toilets Avl']=df['Toilets Avl'].astype('int64')
df_test['Toilets Avl']=df_test['Toilets Avl'].astype('int64')
###Output
_____no_output_____
###Markdown
7.Water Purity
###Code
df['Water Purity'].hist(bins=50)
# filling missing values with mean
mean_water=df['Water Purity'].mean()
df['Water Purity'].fillna(mean_water,inplace=True)
df_test['Water Purity'].fillna(mean_water,inplace=True)
df['Water Purity'].isnull().sum()
df['Water Purity']=df['Water Purity'].astype('int64')
df_test['Water Purity']=df_test['Water Purity'].astype('int64')
###Output
_____no_output_____
###Markdown
8.H Index
###Code
df['H Index'].hist(bins=50)
# filling missing values with mean
mean_index=df['H Index'].mean()
df['H Index'].fillna(mean_index,inplace=True)
df_test['H Index'].fillna(mean_index,inplace=True)
df['H Index'].isnull().sum()
###Output
_____no_output_____
###Markdown
9.Foreign Visitors
###Code
df['Foreign Visitors'].hist(bins=10)
max_viz=df['Foreign Visitors'].max()
min_viz=df['Foreign Visitors'].min()
min_viz
# filling missing values with random int between 100000-1000000
df['Foreign Visitors'].fillna(np.random.randint(low=100000, high=1000000),inplace=True)
df_test['Foreign Visitors'].fillna(np.random.randint(low=100000, high=1000000),inplace=True)
df['Foreign Visitors'].isnull().sum()
df['Foreign Visitors']=df['Foreign Visitors'].astype('int64')
df_test['Foreign Visitors']=df_test['Foreign Visitors'].astype('int64')
df.head(5)
df_test.head(5)
train_data = #Target filename
test_data = #Target filename
df.to_xlsx(train_data)
df_test.to_xlsx(test_data)
###Output
_____no_output_____ |
experiments/tuned_1v2/oracle.run2.framed/trials/12/trial.ipynb | ###Markdown
PTN TemplateThis notebook serves as a template for single dataset PTN experiments It can be run on its own by setting STANDALONE to True (do a find for "STANDALONE" to see where) But it is intended to be executed as part of a *papermill.py script. See any of the experimentes with a papermill script to get started with that workflow.
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
###Output
_____no_output_____
###Markdown
Required ParametersThese are allowed parameters, not defaultsEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)Papermill uses the cell tag "parameters" to inject the real parameters below this cell.Enable tags to see what I mean
###Code
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"labels_source",
"labels_target",
"domains_source",
"domains_target",
"num_examples_per_domain_per_label_source",
"num_examples_per_domain_per_label_target",
"n_shot",
"n_way",
"n_query",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_transforms_source",
"x_transforms_target",
"episode_transforms_source",
"episode_transforms_target",
"pickle_name",
"x_net",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
"torch_default_dtype"
}
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.0001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["num_examples_per_domain_per_label_source"]=100
standalone_parameters["num_examples_per_domain_per_label_target"]=100
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 100
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "target_accuracy"
standalone_parameters["x_transforms_source"] = ["unit_power"]
standalone_parameters["x_transforms_target"] = ["unit_power"]
standalone_parameters["episode_transforms_source"] = []
standalone_parameters["episode_transforms_target"] = []
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# uncomment for CORES dataset
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
standalone_parameters["labels_source"] = ALL_NODES
standalone_parameters["labels_target"] = ALL_NODES
standalone_parameters["domains_source"] = [1]
standalone_parameters["domains_target"] = [2,3,4,5]
standalone_parameters["pickle_name"] = "cores.stratified_ds.2022A.pkl"
# Uncomment these for ORACLE dataset
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# standalone_parameters["labels_source"] = ALL_SERIAL_NUMBERS
# standalone_parameters["labels_target"] = ALL_SERIAL_NUMBERS
# standalone_parameters["domains_source"] = [8,20, 38,50]
# standalone_parameters["domains_target"] = [14, 26, 32, 44, 56]
# standalone_parameters["pickle_name"] = "oracle.frame_indexed.stratified_ds.2022A.pkl"
# standalone_parameters["num_examples_per_domain_per_label_source"]=1000
# standalone_parameters["num_examples_per_domain_per_label_target"]=1000
# Uncomment these for Metahan dataset
# standalone_parameters["labels_source"] = list(range(19))
# standalone_parameters["labels_target"] = list(range(19))
# standalone_parameters["domains_source"] = [0]
# standalone_parameters["domains_target"] = [1]
# standalone_parameters["pickle_name"] = "metehan.stratified_ds.2022A.pkl"
# standalone_parameters["n_way"] = len(standalone_parameters["labels_source"])
# standalone_parameters["num_examples_per_domain_per_label_source"]=200
# standalone_parameters["num_examples_per_domain_per_label_target"]=100
standalone_parameters["n_way"] = len(standalone_parameters["labels_source"])
# Parameters
parameters = {
"experiment_name": "tuned_1v2:oracle.run2.framed",
"device": "cuda",
"lr": 0.0001,
"labels_source": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"labels_target": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"episode_transforms_source": [],
"episode_transforms_target": [],
"domains_source": [8, 32, 50],
"domains_target": [14, 20, 26, 38, 44],
"num_examples_per_domain_per_label_source": -1,
"num_examples_per_domain_per_label_target": -1,
"n_shot": 3,
"n_way": 16,
"n_query": 2,
"train_k_factor": 3,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float32",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_accuracy",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"pickle_name": "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl",
"x_transforms_source": ["unit_power"],
"x_transforms_target": ["unit_power"],
"dataset_seed": 7,
"seed": 7,
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
# (This is due to the randomized initial weights)
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
###################################
# Build the dataset
###################################
if p.x_transforms_source == []: x_transform_source = None
else: x_transform_source = get_chained_transform(p.x_transforms_source)
if p.x_transforms_target == []: x_transform_target = None
else: x_transform_target = get_chained_transform(p.x_transforms_target)
if p.episode_transforms_source == []: episode_transform_source = None
else: raise Exception("episode_transform_source not implemented")
if p.episode_transforms_target == []: episode_transform_target = None
else: raise Exception("episode_transform_target not implemented")
eaf_source = Episodic_Accessor_Factory(
labels=p.labels_source,
domains=p.domains_source,
num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name),
x_transform_func=x_transform_source,
example_transform_func=episode_transform_source,
)
train_original_source, val_original_source, test_original_source = eaf_source.get_train(), eaf_source.get_val(), eaf_source.get_test()
eaf_target = Episodic_Accessor_Factory(
labels=p.labels_target,
domains=p.domains_target,
num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_target,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name),
x_transform_func=x_transform_target,
example_transform_func=episode_transform_target,
)
train_original_target, val_original_target, test_original_target = eaf_target.get_train(), eaf_target.get_val(), eaf_target.get_test()
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
# Some quick unit tests on the data
from steves_utils.transforms import get_average_power, get_average_magnitude
q_x, q_y, s_x, s_y, truth = next(iter(train_processed_source))
assert q_x.dtype == eval(p.torch_default_dtype)
assert s_x.dtype == eval(p.torch_default_dtype)
print("Visually inspect these to see if they line up with expected values given the transforms")
print('x_transforms_source', p.x_transforms_source)
print('x_transforms_target', p.x_transforms_target)
print("Average magnitude, source:", get_average_magnitude(q_x[0].numpy()))
print("Average power, source:", get_average_power(q_x[0].numpy()))
q_x, q_y, s_x, s_y, truth = next(iter(train_processed_target))
print("Average magnitude, target:", get_average_magnitude(q_x[0].numpy()))
print("Average power, target:", get_average_power(q_x[0].numpy()))
###################################
# Build the model
###################################
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=(2,256))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
###Output
_____no_output_____ |
module2/LS_DS10_232.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 2*--- Wrangle ML datasets 🍌 In today's lesson, we’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)! Setup
###Code
# Download data
import requests
def download(url):
filename = url.split('/')[-1]
print(f'Downloading {url}')
r = requests.get(url)
with open(filename, 'wb') as f:
f.write(r.content)
print(f'Downloaded {filename}')
download('https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz')
# Uncompress data
import tarfile
tarfile.open('instacart_online_grocery_shopping_2017_05_01.tar.gz').extractall()
# Change directory to where the data was uncompressed
%cd instacart_2017_05_01
# Print the csv filenames
from glob import glob
for filename in glob('*.csv'):
print(filename)
###Output
departments.csv
products.csv
order_products__train.csv
aisles.csv
order_products__prior.csv
orders.csv
###Markdown
For each csv file, look at its shape & head
###Code
import pandas as pd
from IPython.display import display
def preview():
for filename in glob('*.csv'):
df = pd.read_csv(filename)
print(filename, df.shape)
display(df.head())
print('\n')
preview()
###Output
departments.csv (21, 2)
###Markdown
The original task was complex ...[The Kaggle competition said,](https://www.kaggle.com/c/instacart-market-basket-analysis/data):> The dataset for this competition is a relational set of files describing customers' orders over time. The goal of the competition is to predict which products will be in a user's next order.> orders.csv: This file tells to which set (prior, train, test) an order belongs. You are predicting reordered items only for the test set orders.Each row in the submission is an order_id from the test set, followed by product_id(s) predicted to be reordered.> sample_submission.csv: ```order_id,products17,39276 2925934,39276 29259137,39276 29259182,39276 29259257,39276 29259``` ... but we can simplify!Simplify the question, from "Which products will be reordered?" (Multi-class, [multi-label](https://en.wikipedia.org/wiki/Multi-label_classification) classification) to **"Will customers reorder this one product?"** (Binary classification)Which product? How about **the most frequently ordered product?** Questions:- What is the most frequently ordered product?- How often is this product included in a customer's next order?- Which customers have ordered this product before?- How can we get a subset of data, just for these customers?- What features can we engineer? We want to predict, will these customers reorder this product on their next order? What was the most frequently ordered product?
###Code
prior = pd.read_csv('order_products__prior.csv')
prior['product_id'].mode()
prior['product_id'].value_counts()
train = pd.read_csv('order_products__train.csv')
train['product_id'].mode()
train['product_id'].value_counts()
products = pd.read_csv('products.csv')
products[products['product_id']==24852]
prior = pd.merge(prior, products, on='product_id')
###Output
_____no_output_____
###Markdown
How often are bananas included in a customer's next order?There are [three sets of data](https://gist.github.com/jeremystan/c3b39d947d9b88b3ccff3147dbcf6c6b):> "prior": orders prior to that users most recent order (3.2m orders) "train": training data supplied to participants (131k orders) "test": test data reserved for machine learning competitions (75k orders)Customers' next orders are in the "train" and "test" sets. (The "prior" set has the orders prior to the most recent orders.)We can't use the "test" set here, because we don't have its labels (only Kaggle & Instacart have them), so we don't know what products were bought in the "test" set orders.So, we'll use the "train" set. It currently has one row per product_id and multiple rows per order_id.But we don't want that. Instead we want one row per order_id, with a binary column: "Did the order include bananas?"Let's wrangle! Technique 1
###Code
df = train.head(16).copy()
df['bananas'] = df['product_id'] == 24852
df.groupby('order_id')['bananas'].any()
train['bananas'] = train['product_id'] == 24852
train.groupby('order_id')['bananas'].any()
train_wrangled = train.groupby('order_id')['bananas'].any().reset_index()
target = 'bananas'
train_wrangled[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Technique 2
###Code
df
# Group by order_id, get a list of product_ids for that order
df.groupby('order_id')['product_id'].apply(list)
# Group by order_id, get a list of product_ids for that order, check if that list includes bananas
def includes_bananas(product_ids):
return 24852 in list(product_ids)
df.groupby('order_id')['product_id'].apply(includes_bananas)
train = (train
.groupby('order_id')
.agg({'product_id': includes_bananas})
.reset_index()
.rename(columns={'product_id': 'bananas'}))
target = 'bananas'
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Which customers have ordered this product before?- Customers are identified by `user_id`- Products are identified by `product_id`Do we have a table with both these id's? (If not, how can we combine this information?)
###Code
preview()
###Output
departments.csv (21, 2)
###Markdown
Answer:No, we don't have a table with both these id's. But:- `orders.csv` has `user_id` and `order_id`- `order_products__prior.csv` has `order_id` and `product_id`- `order_products__train.csv` has `order_id` and `product_id` too
###Code
# In the order_products__prior table, which orders included bananas?
BANANAS = 24852
prior[prior.product_id==BANANAS]
banana_prior_order_ids = prior[prior.product_id==BANANAS].order_id
# Look at the orders table, which orders included bananas?
orders = pd.read_csv('orders.csv')
orders.sample(n=5)
# In the orders table, which orders included bananas?
orders[orders.order_id.isin(banana_prior_order_ids)]
# Check this order id, confirm that yes it includes bananas
prior[prior.order_id==738281]
banana_orders = orders[orders.order_id.isin(banana_prior_order_ids)]
# In the orders table, which users have bought bananas?
banana_user_ids = banana_orders.user_id.unique()
###Output
_____no_output_____
###Markdown
How can we get a subset of data, just for these customers?We want *all* the orders from customers who have *ever* bought bananas.(And *none* of the orders from customers who have *never* bought bananas.)
###Code
# orders table, shape before getting subset
orders.shape
# orders table, shape after getting subset
orders = orders[orders.user_id.isin(banana_user_ids)]
orders.shape
# IDs of *all* the orders from customers who have *ever* bought bananas
subset_order_ids = orders.order_id.unique()
# order_products__prior table, shape before getting subset
prior.shape
# order_products__prior table, shape after getting subset
prior = prior[prior.order_id.isin(subset_order_ids)]
prior.shape
# order_products__train table, shape before getting subset
train.shape
# order_products__train table, shape after getting subset
train = train[train.order_id.isin(subset_order_ids)]
train.shape
# In this subset, how often were bananas reordered in the customer's most recent order?
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
What features can we engineer? We want to predict, will these customers reorder bananas on their next order?- Other fruit they buy- Time between banana orders- Frequency of banana orders by a customer- Organic or not- Time of day
###Code
preview()
train.shape
train.head()
# Merge user_id, order_number, order_dow, order_hour_of_day, and days_since_prior_order
# with the training data
train = pd.merge(train, orders)
train.head()
###Output
_____no_output_____
###Markdown
- Frequency of banana orders - % of orders - Every n days on average - Total orders- Recency of banana orders - n of orders - n days
###Code
USER = 61911
prior = pd.merge(prior, orders[['order_id', 'user_id']])
prior['bananas'] = prior.product_id == BANANAS
# This user has ordered 196 products,
df = prior[prior.user_id==USER]
df
# This person has ordered bananas six times
df['bananas'].sum()
df[df['bananas']]
# How many unique orders for this user?
df['order_id'].nunique()
# What percentage of orders?
df['bananas'].sum() / df['order_id'].nunique()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 2*--- Wrangle ML datasets 🍌 In today's lesson, we’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)! Setup
###Code
# Download data
import requests
def download(url):
filename = url.split('/')[-1]
print(f'Downloading {url}')
r = requests.get(url)
with open(filename, 'wb') as f:
f.write(r.content)
print(f'Downloaded {filename}')
download('https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz')
# Uncompress data
import tarfile
tarfile.open('instacart_online_grocery_shopping_2017_05_01.tar.gz').extractall()
# Change directory to where the data was uncompressed
%cd instacart_2017_05_01
# Print the csv filenames
from glob import glob
for filename in glob('*.csv'):
print(filename)
###Output
departments.csv
products.csv
order_products__train.csv
aisles.csv
order_products__prior.csv
orders.csv
###Markdown
For each csv file, look at its shape & head
###Code
import pandas as pd
from IPython.display import display
def preview():
for filename in glob('*.csv'):
df = pd.read_csv(filename)
print(filename, df.shape)
display(df.head())
print('\n')
preview()
###Output
departments.csv (21, 2)
###Markdown
The original task was complex ...[The Kaggle competition said,](https://www.kaggle.com/c/instacart-market-basket-analysis/data):> The dataset for this competition is a relational set of files describing customers' orders over time. The goal of the competition is to predict which products will be in a user's next order.> orders.csv: This file tells to which set (prior, train, test) an order belongs. You are predicting reordered items only for the test set orders.Each row in the submission is an order_id from the test set, followed by product_id(s) predicted to be reordered.> sample_submission.csv: ```order_id,products17,39276 2925934,39276 29259137,39276 29259182,39276 29259257,39276 29259``` ... but we can simplify!Simplify the question, from "Which products will be reordered?" (Multi-class, [multi-label](https://en.wikipedia.org/wiki/Multi-label_classification) classification) to **"Will customers reorder this one product?"** (Binary classification)Which product? How about **the most frequently ordered product?** Questions:- What is the most frequently ordered product?- How often is this product included in a customer's next order?- Which customers have ordered this product before?- How can we get a subset of data, just for these customers?- What features can we engineer? We want to predict, will these customers reorder this product on their next order? What was the most frequently ordered product?
###Code
prior = pd.read_csv('order_products__prior.csv')
prior['product_id'].mode()
prior['product_id'].value_counts()
train = pd.read_csv('order_products__train.csv')
train['product_id'].mode()
train['product_id'].value_counts()
products = pd.read_csv('products.csv')
products[products['product_id']==24852]
prior = pd.merge(prior, products, on='product_id')
###Output
_____no_output_____
###Markdown
How often are bananas included in a customer's next order?There are [three sets of data](https://gist.github.com/jeremystan/c3b39d947d9b88b3ccff3147dbcf6c6b):> "prior": orders prior to that users most recent order (3.2m orders) "train": training data supplied to participants (131k orders) "test": test data reserved for machine learning competitions (75k orders)Customers' next orders are in the "train" and "test" sets. (The "prior" set has the orders prior to the most recent orders.)We can't use the "test" set here, because we don't have its labels (only Kaggle & Instacart have them), so we don't know what products were bought in the "test" set orders.So, we'll use the "train" set. It currently has one row per product_id and multiple rows per order_id.But we don't want that. Instead we want one row per order_id, with a binary column: "Did the order include bananas?"Let's wrangle! Technique 1
###Code
df = train.head(16).copy()
df['bananas'] = df['product_id'] == 24852
df.groupby('order_id')['bananas'].any()
train['bananas'] = train['product_id'] == 24852
train.groupby('order_id')['bananas'].any()
train_wrangled = train.groupby('order_id')['bananas'].any().reset_index()
target = 'bananas'
train_wrangled[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Technique 2
###Code
df
# Group by order_id, get a list of product_ids for that order
df.groupby('order_id')['product_id'].apply(list)
# Group by order_id, get a list of product_ids for that order, check if that list includes bananas
def includes_bananas(product_ids):
return 24852 in list(product_ids)
df.groupby('order_id')['product_id'].apply(includes_bananas)
train = (train
.groupby('order_id')
.agg({'product_id': includes_bananas})
.reset_index()
.rename(columns={'product_id': 'bananas'}))
target = 'bananas'
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Which customers have ordered this product before?- Customers are identified by `user_id`- Products are identified by `product_id`Do we have a table with both these id's? (If not, how can we combine this information?)
###Code
preview()
###Output
departments.csv (21, 2)
###Markdown
Answer:No, we don't have a table with both these id's. But:- `orders.csv` has `user_id` and `order_id`- `order_products__prior.csv` has `order_id` and `product_id`- `order_products__train.csv` has `order_id` and `product_id` too
###Code
# In the order_products__prior table, which orders included bananas?
BANANAS = 24852
prior[prior.product_id==BANANAS]
banana_prior_order_ids = prior[prior.product_id==BANANAS].order_id
# Look at the orders table, which orders included bananas?
orders = pd.read_csv('orders.csv')
orders.sample(n=5)
# In the orders table, which orders included bananas?
orders[orders.order_id.isin(banana_prior_order_ids)]
# Check this order id, confirm that yes it includes bananas
prior[prior.order_id==738281]
banana_orders = orders[orders.order_id.isin(banana_prior_order_ids)]
# In the orders table, which users have bought bananas?
banana_user_ids = banana_orders.user_id.unique()
###Output
_____no_output_____
###Markdown
How can we get a subset of data, just for these customers?We want *all* the orders from customers who have *ever* bought bananas.(And *none* of the orders from customers who have *never* bought bananas.)
###Code
# orders table, shape before getting subset
orders.shape
# orders table, shape after getting subset
orders = orders[orders.user_id.isin(banana_user_ids)]
orders.shape
# IDs of *all* the orders from customers who have *ever* bought bananas
subset_order_ids = orders.order_id.unique()
# order_products__prior table, shape before getting subset
prior.shape
# order_products__prior table, shape after getting subset
prior = prior[prior.order_id.isin(subset_order_ids)]
prior.shape
# order_products__train table, shape before getting subset
train.shape
# order_products__train table, shape after getting subset
train = train[train.order_id.isin(subset_order_ids)]
train.shape
# In this subset, how often were bananas reordered in the customer's most recent order?
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
What features can we engineer? We want to predict, will these customers reorder bananas on their next order?- Other fruit they buy- Time between banana orders- Frequency of banana orders by a customer- Organic or not- Time of day
###Code
preview()
train.shape
train.head()
# Merge user_id, order_number, order_dow, order_hour_of_day, and days_since_prior_order
# with the training data
train = pd.merge(train, orders)
train.head()
###Output
_____no_output_____
###Markdown
- Frequency of banana orders - % of orders - Every n days on average - Total orders- Recency of banana orders - n of orders - n days
###Code
USER = 61911
prior = pd.merge(prior, orders[['order_id', 'user_id']])
prior['bananas'] = prior.product_id == BANANAS
# This user has ordered 196 products,
df = prior[prior.user_id==USER]
df
# This person has ordered bananas six times
df['bananas'].sum()
df[df['bananas']]
# How many unique orders for this user?
df['order_id'].nunique()
# What percentage of orders?
df['bananas'].sum() / df['order_id'].nunique()
###Output
_____no_output_____
###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 2*--- Wrangle ML datasets 🍌 In today's lesson, we’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)! Setup
###Code
# Download data
import requests
def download(url):
filename = url.split('/')[-1]
print(f'Downloading {url}')
r = requests.get(url)
with open(filename, 'wb') as f:
f.write(r.content)
print(f'Downloaded {filename}')
download('https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz')
# Uncompress data
import tarfile
tarfile.open('instacart_online_grocery_shopping_2017_05_01.tar.gz').extractall()
# Change directory to where the data was uncompressed
%cd instacart_2017_05_01
# Print the csv filenames
from glob import glob
for filename in glob('*.csv'):
print(filename)
###Output
departments.csv
products.csv
order_products__train.csv
aisles.csv
order_products__prior.csv
orders.csv
###Markdown
For each csv file, look at its shape & head
###Code
import pandas as pd
from IPython.display import display
def preview():
for filename in glob('*.csv'):
df = pd.read_csv(filename)
print(filename, df.shape)
display(df.head())
print('\n')
preview()
###Output
departments.csv (21, 2)
###Markdown
The original task was complex ...[The Kaggle competition said,](https://www.kaggle.com/c/instacart-market-basket-analysis/data):> The dataset for this competition is a relational set of files describing customers' orders over time. The goal of the competition is to predict which products will be in a user's next order.> orders.csv: This file tells to which set (prior, train, test) an order belongs. You are predicting reordered items only for the test set orders.Each row in the submission is an order_id from the test set, followed by product_id(s) predicted to be reordered.> sample_submission.csv: ```order_id,products17,39276 2925934,39276 29259137,39276 29259182,39276 29259257,39276 29259``` ... but we can simplify!Simplify the question, from "Which products will be reordered?" (Multi-class, [multi-label](https://en.wikipedia.org/wiki/Multi-label_classification) classification) to **"Will customers reorder this one product?"** (Binary classification)Which product? How about **the most frequently ordered product?** Questions:- What is the most frequently ordered product?- How often is this product included in a customer's next order?- Which customers have ordered this product before?- How can we get a subset of data, just for these customers?- What features can we engineer? We want to predict, will these customers reorder this product on their next order? What was the most frequently ordered product?
###Code
prior = pd.read_csv('order_products__prior.csv')
prior['product_id'].mode()
prior['product_id'].value_counts()
train = pd.read_csv('order_products__train.csv')
train['product_id'].mode()
train['product_id'].value_counts()
products = pd.read_csv('products.csv')
products[products['product_id']==24852]
prior = pd.merge(prior, products, on='product_id')
###Output
_____no_output_____
###Markdown
How often are bananas included in a customer's next order?There are [three sets of data](https://gist.github.com/jeremystan/c3b39d947d9b88b3ccff3147dbcf6c6b):> "prior": orders prior to that users most recent order (3.2m orders) "train": training data supplied to participants (131k orders) "test": test data reserved for machine learning competitions (75k orders)Customers' next orders are in the "train" and "test" sets. (The "prior" set has the orders prior to the most recent orders.)We can't use the "test" set here, because we don't have its labels (only Kaggle & Instacart have them), so we don't know what products were bought in the "test" set orders.So, we'll use the "train" set. It currently has one row per product_id and multiple rows per order_id.But we don't want that. Instead we want one row per order_id, with a binary column: "Did the order include bananas?"Let's wrangle! Technique 1
###Code
df = train.head(16).copy()
df['bananas'] = df['product_id'] == 24852
df.groupby('order_id')['bananas'].any()
train['bananas'] = train['product_id'] == 24852
train.groupby('order_id')['bananas'].any()
train_wrangled = train.groupby('order_id')['bananas'].any().reset_index()
target = 'bananas'
train_wrangled[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Technique 2
###Code
df
# Group by order_id, get a list of product_ids for that order
df.groupby('order_id')['product_id'].apply(list)
# Group by order_id, get a list of product_ids for that order, check if that list includes bananas
def includes_bananas(product_ids):
return 24852 in list(product_ids)
df.groupby('order_id')['product_id'].apply(includes_bananas)
train = (train
.groupby('order_id')
.agg({'product_id': includes_bananas})
.reset_index()
.rename(columns={'product_id': 'bananas'}))
target = 'bananas'
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Which customers have ordered this product before?- Customers are identified by `user_id`- Products are identified by `product_id`Do we have a table with both these id's? (If not, how can we combine this information?)
###Code
preview()
###Output
departments.csv (21, 2)
###Markdown
Answer:No, we don't have a table with both these id's. But:- `orders.csv` has `user_id` and `order_id`- `order_products__prior.csv` has `order_id` and `product_id`- `order_products__train.csv` has `order_id` and `product_id` too
###Code
# In the order_products__prior table, which orders included bananas?
BANANAS = 24852
prior[prior.product_id==BANANAS]
banana_prior_order_ids = prior[prior.product_id==BANANAS].order_id
# Look at the orders table, which orders included bananas?
orders = pd.read_csv('orders.csv')
orders.sample(n=5)
# In the orders table, which orders included bananas?
orders[orders.order_id.isin(banana_prior_order_ids)]
# Check this order id, confirm that yes it includes bananas
prior[prior.order_id==738281]
banana_orders = orders[orders.order_id.isin(banana_prior_order_ids)]
# In the orders table, which users have bought bananas?
banana_user_ids = banana_orders.user_id.unique()
###Output
_____no_output_____
###Markdown
How can we get a subset of data, just for these customers?We want *all* the orders from customers who have *ever* bought bananas.(And *none* of the orders from customers who have *never* bought bananas.)
###Code
# orders table, shape before getting subset
orders.shape
# orders table, shape after getting subset
orders = orders[orders.user_id.isin(banana_user_ids)]
orders.shape
# IDs of *all* the orders from customers who have *ever* bought bananas
subset_order_ids = orders.order_id.unique()
# order_products__prior table, shape before getting subset
prior.shape
# order_products__prior table, shape after getting subset
prior = prior[prior.order_id.isin(subset_order_ids)]
prior.shape
# order_products__train table, shape before getting subset
train.shape
# order_products__train table, shape after getting subset
train = train[train.order_id.isin(subset_order_ids)]
train.shape
# In this subset, how often were bananas reordered in the customer's most recent order?
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
What features can we engineer? We want to predict, will these customers reorder bananas on their next order?- Other fruit they buy- Time between banana orders- Frequency of banana orders by a customer- Organic or not- Time of day
###Code
preview()
train.shape
train.head()
# Merge user_id, order_number, order_dow, order_hour_of_day, and days_since_prior_order
# with the training data
train = pd.merge(train, orders)
train.head()
###Output
_____no_output_____
###Markdown
- Frequency of banana orders - % of orders - Every n days on average - Total orders- Recency of banana orders - n of orders - n days
###Code
USER = 61911
prior = pd.merge(prior, orders[['order_id', 'user_id']])
prior['bananas'] = prior.product_id == BANANAS
# This user has ordered 196 products,
df = prior[prior.user_id==USER]
df
# This person has ordered bananas six times
df['bananas'].sum()
df[df['bananas']]
# How many unique orders for this user?
df['order_id'].nunique()
# What percentage of orders?
df['bananas'].sum() / df['order_id'].nunique()
###Output
_____no_output_____ |
glycompare/.ipynb_checkpoints/clustering_enrichment-checkpoint.ipynb | ###Markdown
Table relative abd
###Code
# abundance_data_table = json_utility.load_json("../intermediate_file/NBT_dict_name_abundance_cross_profile.json")
# load glycoprofile Mass Spectrum m/z and glycan structure info
# load CHO paper abundance table
mz_abd_table = glycan_profile.load_cho_mz_abundance()
# load glycoprofile Mass Spectrum m/z and glycan structure info
profile_mz_to_id = glycan_profile.load_glycan_profile_dic()
# normalize CHO abundance table
norm_mz_abd_dict = glycan_profile.get_norm_mz_abd_table(mz_abd_table)
# load match_dict
match_dict = json_utility.load_json(__init__.json_address + "match_dict.json")
# digitalize the glycoprofile
glycoprofile_list = glycan_profile.get_glycoprofile_list(profile_mz_to_id, norm_mz_abd_dict, match_dict)
# generate table
table_generator = glycan_profile.MotifAbdTableGenerator(glycoprofile_list)
motif_abd_table = table_generator.table_against_wt_relative_abd()
# motif_abd_table.head()
# load motif vector and return edge_list
# motif_vector = json_utility.load_json("../intermediate_file/Unicarbkb_motif_vec_12259.json")
# motif_lib = gc_glycan_motif.GlycanMotifLib(motif_dict)
motif_lib = motif_class.MotifLabNGlycan(json_utility.load_json(__init__.merged_motif_dict_addr)) # unicarbkb_motifs_12259.json
tree_type_dp, edge_list = motif_lib.motif_dependence_tree()
dropper = motif_class.NodesDropper(motif_lib, motif_class.get_weight_dict(motif_abd_table))
# hier_enrich_glycoprofile_occurence(glycoprofile, scoredMotifs_occurence_vector, np.array(edge_list),motif_vector)
reload(__init__)
reload(extract_motif)
reload(motif_class)
reload(glycan_profile)
reload(plot_glycan_utilities)
reload(clustering_analysis_pip)
dropper = motif_class.NodesDropper(motif_lib, motif_class.get_weight_dict(motif_abd_table))
import seaborn as sns
# sns.set("RdBu_r", 7)
dropper.drop_node()
print("", len(dropper.drop_node()))
df_ncore = motif_abd_table[motif_abd_table.index.isin(dropper.nodes_kept)]
# draw plot
# motif_with_n_glycan_core_all_motif(motif_, _table, weight_dict)
""" with n_glycan_core using jaccard for binary and use braycurtis for float
"""
df_ncore.to_csv(__init__.json_address + r"abundance_matrix.txt")
name_prefix = 'dropped'
# sns.palplot(sns.color_palette("RdBu_r", 7))
g = sns.clustermap(df_ncore.T, metric="braycurtis",method='single',cmap=sns.diverging_palette(247,10,99,54,1,20),linewidths=.01,figsize=(20,20),linecolor='black')
draw_profile_cluster(g, df_ncore, profile_name, name_prefix, color_threshold=0.95)
cccluster_dict = draw_motif_cluster(g, df_ncore, name_prefix, color_threshold=0.23)
sns.choose_diverging_palette()
247,10,99,33,1,10
import numpy as np
from scipy import stats
a = np.array([1,2,3,4,5,6,7,8,9,0])
a.mean()
a.var()
tt = (1-a.mean())/np.sqrt(a.var()/8)
stats.t.sf(np.abs(tt), len(a)-1)*2
from scipy.cluster import hierarchy
ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,400., 754., 564., 138., 219., 869., 669.])
Z = hierarchy.linkage(ytdist, 'single')
###Output
_____no_output_____
###Markdown
Table Existance
###Code
motif_exist_table = table_generator.table_existance()
# motif_lib = motif_class.MotifLabNGlycan(json_utility.load_json(__init__.merged_motif_dict_addr)) # unicarbkb_motifs_12259.json
# tree_type_dp, edge_list = motif_lib.motif_dependence_tree()
import hierarchical_enrichment
scoredMotifs_occurence_vector=[sum(i) for i in np.array(motif_exist_table)]
method='chi_squared'
relative='child'
motif_hierarchy = np.array(edge_list)
motif_vec= motif_lib.motif_vec
hierarchical_enrichment.hier_enrich_glycoprofile_occurence(glycoprofile_list, scoredMotifs_occurence_vector, np.array(edge_list), motif_vec)
motif_hierarchy
motif_exist_table
###Output
_____no_output_____ |
Cox Done.ipynb | ###Markdown
The first step in any data analysis is acquiring and munging the dataOur starting data set can be found here: http://jakecoltman.com in the pyData postIt is designed to be roughly similar to the output from DCM's path to conversionDownload the file and transform it into something with the columns: id,lifetime,age,male,event,search,brand where lifetime is the total time that we observed someone not convert for and event should be 1 if we see a conversion and 0 if we don't. Note that all values should be converted into intsIt is useful to note that end_date = datetime.datetime(2016, 5, 3, 20, 36, 8, 92165)
###Code
running_id = 0
output = [[0]]
with open("E:/output.txt") as file_open:
for row in file_open.read().split("\n"):
cols = row.split(",")
if cols[0] == output[-1][0]:
output[-1].append(cols[1])
output[-1].append(True)
else:
output.append(cols)
output = output[1:]
for row in output:
if len(row) == 6:
row += [datetime(2016, 5, 3, 20, 36, 8, 92165), False]
output = output[1:-1]
def convert_to_days(dt):
day_diff = dt / np.timedelta64(1, 'D')
if day_diff == 0:
return 23.0
else:
return day_diff
df = pd.DataFrame(output, columns=["id", "advert_time", "male","age","search","brand","conversion_time","event"])
df["lifetime"] = pd.to_datetime(df["conversion_time"]) - pd.to_datetime(df["advert_time"])
df["lifetime"] = df["lifetime"].apply(convert_to_days)
df["male"] = df["male"].astype(int)
df["search"] = df["search"].astype(int)
df["brand"] = df["brand"].astype(int)
df["age"] = df["age"].astype(int)
df["event"] = df["event"].astype(int)
df = df.drop('advert_time', 1)
df = df.drop('conversion_time', 1)
df = df.set_index("id")
df = df.dropna(thresh=2)
df.median()
###Parametric Bayes
#Shout out to Cam Davidson-Pilon
## Example fully worked model using toy data
## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html
## Note that we've made some corrections
N = 2500
##Generate some random data
lifetime = pm.rweibull( 2, 5, size = N )
birth = pm.runiform(0, 10, N)
censor = ((birth + lifetime) >= 10)
lifetime_ = lifetime.copy()
lifetime_[censor] = 10 - birth[censor]
alpha = pm.Uniform('alpha', 0, 20)
beta = pm.Uniform('beta', 0, 20)
@pm.observed
def survival(value=lifetime_, alpha = alpha, beta = beta ):
return sum( (1-censor)*(log( alpha/beta) + (alpha-1)*log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(50000, 30000)
pm.Matplot.plot(mcmc)
mcmc.trace("alpha")[:]
###Output
_____no_output_____
###Markdown
Problems: 1 - Try to fit your data from section 1 2 - Use the results to plot the distribution of the median Note that the media of a Weibull distribution is:$$β(log 2)^{1/α}$$
###Code
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
alpha = pm.Uniform("alpha", 0,50)
beta = pm.Uniform("beta", 0,50)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000)
def weibull_median(alpha, beta):
return beta * ((log(2)) ** ( 1 / alpha))
plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))])
###Output
_____no_output_____
###Markdown
Problems: 4 - Try adjusting the number of samples for burning and thinnning 5 - Try adjusting the prior and see how it affects the estimate
###Code
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
alpha = pm.Uniform("alpha", 0,50)
beta = pm.Uniform("beta", 0,50)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000, burn = 3000, thin = 20)
pm.Matplot.plot(mcmc)
#Solution to Q5
## Adjusting the priors impacts the overall result
## If we give a looser, less informative prior then we end up with a broader, shorter distribution
## If we give much more informative priors, then we get a tighter, taller distribution
censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist())
## Note the narrowing of the prior
alpha = pm.Normal("alpha", 1.7, 10000)
beta = pm.Normal("beta", 18.5, 10000)
####Uncomment this to see the result of looser priors
## Note this ends up pretty much the same as we're already very loose
#alpha = pm.Uniform("alpha", 0, 30)
#beta = pm.Uniform("beta", 0, 30)
@pm.observed
def survival(value=df["lifetime"], alpha = alpha, beta = beta ):
return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha))
mcmc = pm.MCMC([alpha, beta, survival ] )
mcmc.sample(10000, burn = 5000, thin = 20)
pm.Matplot.plot(mcmc)
#plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))])
###Output
[-----------------100%-----------------] 10000 of 10000 complete in 18.4 secPlotting beta
Plotting alpha
###Markdown
Problems: 7 - Try testing whether the median is greater than a different values
###Code
medians = [weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]
testing_value = 14.9
number_of_greater_samples = sum([x >= testing_value for x in medians])
100 * (number_of_greater_samples / len(medians))
###Output
_____no_output_____
###Markdown
If we want to look at covariates, we need a new approach. We'll use Cox proprtional hazards, a very popular regression model.To fit in python we use the module lifelines:http://lifelines.readthedocs.io/en/latest/
###Code
#Fitting solution
cf = lifelines.CoxPHFitter()
cf.fit(df, 'lifetime', event_col = 'event')
cf.summary
###Output
C:\Users\j.coltman\AppData\Local\Continuum\Anaconda3\lib\site-packages\lifelines\fitters\coxph_fitter.py:285: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)
df.sort(duration_col, inplace=True)
###Markdown
Once we've fit the data, we need to do something useful with it. Try to do the following things: 1 - Plot the baseline survival function 2 - Predict the functions for a particular set of features 3 - Plot the survival function for two different set of features 4 - For your results in part 3 caculate how much more likely a death event is for one than the other for a given period of time
###Code
#Solution to 1
fig, axis = plt.subplots(nrows=1, ncols=1)
cf.baseline_survival_.plot(ax = axis, title = "Baseline Survival")
regressors = np.array([[1,45,0,0]])
survival = cf.predict_survival_function(regressors)
survival.head()
#Solution to plotting multiple regressors
fig, axis = plt.subplots(nrows=1, ncols=1, sharex=True)
regressor1 = np.array([[1,45,0,1]])
regressor2 = np.array([[1,23,1,1]])
survival_1 = cf.predict_survival_function(regressor1)
survival_2 = cf.predict_survival_function(regressor2)
plt.plot(survival_1,label = "45 year old male - search")
plt.plot(survival_2,label = "45 year old male - display")
plt.legend(loc = "upper right")
odds = survival_1 / survival_2
plt.plot(odds, c = "red")
###Output
_____no_output_____
###Markdown
Model selectionDifficult to do with classic tools (here)Problem: 1 - Calculate the BMA coefficient values 2 - Try running with different priors
###Code
#### BMA Coefficient values
#### Different priors
###Output
_____no_output_____ |
10 - Pandas Crash Course.ipynb | ###Markdown
Pandas Crash CoursePandas is a Python package that aims to make working with data as easy and intuitive as possible. It fills the role of a foundational real world data manipulation library and interfaces with many other Python packages.By the end of this file you should have seen simple examples of:1. Pandas Series and DataFrame objects1. Data IO1. Data types1. Indexing and setting data1. Dealing with missing data1. Concatinating and merging data1. Grouping Operations1. Operations on Pandas data objects1. Applying any function to Pandas data objects1. PlottingFurther Reading: http://pandas.pydata.org/pandas-docs/stable/10min.html https://pandas.pydata.org/pandas-docs/stable/comparison_with_sql.htmlcompare-with-sql-join Image Credit: David Jenkins at Bifengxia Panda Reserve in Chengdu
###Code
# Python imports
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Pandas Series and DataFrame objectsThere are two main data structures in pandas:- Series (1 dimensional data)- Dataframes (2 dimensional data)- There are other, lesser used data structures used for higher dimensional data, but are less frequently used - Panel (3 dimensional data) - panel will be removed from future versions of Pandas and replaced with xarray - Xarray (>2 dimensions)Here, the 1- and 2-dimensional data sets are the focus of this lesson.Pandas DataFrames are analogus to R's data.frame, but aim to provide additional functionality. Both dataframes and series data structures have indicies, which are shown on the left:
###Code
series1 = pd.Series([1,2,3,4])
print(series1)
###Output
0 1
1 2
2 3
3 4
dtype: int64
###Markdown
Dataframes use the IPython display method to look pretty, but will show just fine when printed also. (There's a way to make all of the dataframes print pretty via the IPython.display.display method, but this isn't necessary to view the values):
###Code
df1 = pd.DataFrame([[1,2,3,4],[10,20,30,40]])
print(df1)
df1
###Output
0 1 2 3
0 1 2 3 4
1 10 20 30 40
###Markdown
Indices can be named:
###Code
# Rename the columns
df1.columns = ['A','B','C','D']
df1.index = ['zero','one']
df1
# Create the dataframe with the columns
df1 = pd.DataFrame([[1,2,3,4],[10,20,30,40]], columns=['A','B','C',"D"], index=['zero','one'])
df1
###Output
_____no_output_____
###Markdown
Data Input Output
###Code
df1 = pd.DataFrame(np.random.randn(5,4), columns = ['A','B','C','D'], index=['zero','one','two','three','four'])
print(df1)
###Output
A B C D
zero -0.373621 -0.247423 -0.040302 0.033477
one -0.424199 -0.417990 -1.301303 0.908326
two -0.448194 -0.470595 1.020852 -0.576712
three 1.321021 -1.004106 0.956355 -0.185157
four 0.732942 2.053800 -1.436492 0.348923
###Markdown
CSV Files
###Code
df1.to_csv('datafiles/pandas_df1.csv')
!ls datafiles
df2 = pd.read_csv('datafiles/pandas_df1.csv', index_col=0)
print(df2)
###Output
A B C D
zero -0.373621 -0.247423 -0.040302 0.033477
one -0.424199 -0.417990 -1.301303 0.908326
two -0.448194 -0.470595 1.020852 -0.576712
three 1.321021 -1.004106 0.956355 -0.185157
four 0.732942 2.053800 -1.436492 0.348923
###Markdown
hdf5 files
###Code
df1.to_hdf('datafiles/pandas_df1.h5', 'df')
!ls datafiles
df2 = pd.read_hdf('datafiles/pandas_df1.h5', 'df')
print(df2)
###Output
A B C D
zero -0.373621 -0.247423 -0.040302 0.033477
one -0.424199 -0.417990 -1.301303 0.908326
two -0.448194 -0.470595 1.020852 -0.576712
three 1.321021 -1.004106 0.956355 -0.185157
four 0.732942 2.053800 -1.436492 0.348923
###Markdown
Data typesShow the datatypes of each column:
###Code
df2.dtypes
###Output
_____no_output_____
###Markdown
We can create dataframes of multiple datatypes:
###Code
col1 = range(6)
col2 = np.random.rand(6)
col3 = ['zero','one','two','three','four','five']
col4 = ['blue', 'cow','blue', 'cow','blue', 'cow']
df_types = pd.DataFrame( {'integers': col1, 'floats': col2, 'words': col3, 'cow color': col4} )
print(df_types)
df_types.dtypes
###Output
_____no_output_____
###Markdown
We can also set the 'cow color' column to a category:
###Code
df_types['cow color'] = df_types['cow color'].astype("category")
df_types.dtypes
###Output
_____no_output_____
###Markdown
Indexing and Setting DataPandas does a *lot* of different operations, here are the meat and potatoes. The following describes the indexing of data, but setting the data is as simple as a reassignment.
###Code
time_stamps = pd.date_range(start='2000-01-01', end='2000-01-20', freq='D') # Define index of time stamps
df1 = pd.DataFrame(np.random.randn(20,4), columns = ['A','B','C','D'], index=time_stamps)
print(df1)
###Output
A B C D
2000-01-01 -2.148320 -0.333352 -1.955087 -0.031653
2000-01-02 -0.363028 -0.735354 -1.003570 2.917665
2000-01-03 -0.070073 -1.502237 0.357330 0.293532
2000-01-04 -0.658409 -0.519531 0.372368 -0.082892
2000-01-05 -0.347255 -1.877360 1.798925 -1.196501
2000-01-06 0.910050 -1.860890 0.950236 1.729865
2000-01-07 0.274789 -1.605664 -0.550596 0.409954
2000-01-08 -1.692789 -0.353392 0.088221 -0.483079
2000-01-09 0.354391 0.950867 -0.641271 1.498960
2000-01-10 0.416353 0.307605 0.098817 -1.084056
2000-01-11 2.144184 -0.885058 2.406441 0.060464
2000-01-12 -0.314988 1.611354 -0.120403 -0.712474
2000-01-13 -1.149183 0.154171 -0.350990 -0.598516
2000-01-14 -1.522168 -0.481107 -0.472934 -0.844703
2000-01-15 0.867458 0.351842 1.367980 -0.729122
2000-01-16 -1.172752 0.513646 1.562067 0.769301
2000-01-17 -0.607056 -0.455895 -0.544137 -0.360197
2000-01-18 -0.706161 -2.056215 0.109552 0.662488
2000-01-19 1.731916 0.340571 0.170681 -2.129683
2000-01-20 -0.012410 0.357174 -0.095124 -0.010638
###Markdown
Head and Tail Print the beginning and ending entries of a pandas data structure
###Code
df1.head(3) # Show the first n rows, default is 5
df1.tail() # Show the last n rows
###Output
_____no_output_____
###Markdown
We can also separate the metadata (labels, etc) from the data, yielding a numpy-like output.
###Code
df1.columns
df1.values
###Output
_____no_output_____
###Markdown
Indexing DataPandas provides the means to index data via named columns, or as numpy like indices. Indexing is [row, column], just as it was in numpy.Data is visible via column:
###Code
df1['A'].head() # df1.A.head() is equivalent
###Output
_____no_output_____
###Markdown
Note that tab completion is enabled for column names:
###Code
df1.A
###Output
_____no_output_____
###Markdown
We can specify row ranges:
###Code
df1[:2]
###Output
_____no_output_____
###Markdown
Label based indexing (.loc)Slice based on the labels.
###Code
df1.loc[:'2000-01-5',"A"] # Note that this includes the upper index
###Output
_____no_output_____
###Markdown
Integer based indexing (.iloc)Slice based on the index number.
###Code
df1.iloc[:3,0] # Note that this does not include the upper index like numpy
###Output
_____no_output_____
###Markdown
Fast single element label indexing (.at) - fast .locIntended for fast, single indexes.
###Code
index_timestamp = pd.Timestamp('2000-01-03') # Create a timestamp object to index
df1.at[index_timestamp,"A"] # Index using timestamp (vs string)
###Output
_____no_output_____
###Markdown
Fast single element label indexing (.iat) - fast .ilocIntended for fast, single indexes.
###Code
df1.iat[3,0]
###Output
_____no_output_____
###Markdown
Logical indexingA condition is used to select the values within a slice or the entire Pandas object. Using a conditional statement, a true/false DataFrame is produced:
###Code
df1.head()>0.5
###Output
_____no_output_____
###Markdown
That matrix can then be used to index the DataFrame:
###Code
df1[df1>0.5].head() # Note that the values that were 'False' are 'NaN'
###Output
_____no_output_____
###Markdown
Logical indexing via `isin`It's also possible to filter via the index value:
###Code
df_types
bool_series = df_types['cow color'].isin(['blue'])
print(bool_series) # Show the logical indexing
df_types[bool_series] # Index where the values are true
###Output
0 True
1 False
2 True
3 False
4 True
5 False
Name: cow color, dtype: bool
###Markdown
Sorting by column
###Code
df_types.sort_values(by="floats")
###Output
_____no_output_____
###Markdown
Dealing with Missing DataBy convention, pandas uses the `NaN` value to represent missing data. There are a few functions surrounding the handling of `NaN` values:
###Code
df_nan = pd.DataFrame(np.random.rand(6,2), columns = ['A','B'])
df_nan
df_nan['B'] = df_nan[df_nan['B']>0.5] # Prints NaN Where ['B'] <= 0.5
print(df_nan)
###Output
A B
0 0.647968 0.647968
1 0.224838 NaN
2 0.989680 NaN
3 0.125777 NaN
4 0.947133 0.947133
5 0.962330 0.962330
###Markdown
Print a logical DataFrame where `NaN` is located:
###Code
df_nan.isnull()
###Output
_____no_output_____
###Markdown
Drop all rows with `NaN`:
###Code
df_nan.dropna(how = 'any')
###Output
_____no_output_____
###Markdown
Replace `NaN` entries:
###Code
df_nan.fillna(value = -1)
###Output
_____no_output_____
###Markdown
Concatenating and Merging DataBringing together DataFrames or Series objects: Concatenate
###Code
df1 = pd.DataFrame(np.zeros([3,3], dtype=np.int))
df1
df2 = pd.concat([df1, df1], axis=0)
df2 = df2.reset_index(drop=True) # Renumber indexing
df2
###Output
_____no_output_____
###Markdown
AppendAdding an additional group after the first group:
###Code
newdf = pd.DataFrame({0: [1], 1:[1], 2:[1]})
print(newdf)
df3 = df2.append(newdf, ignore_index=True)
df3
###Output
0 1 2
0 1 1 1
###Markdown
SQL-like mergingPandas can do structured query language (SQL) like merges of data:
###Code
left = pd.DataFrame({'numbers': ['K0', 'K1', 'K2', 'K3'],
'English': ['one', 'two', 'three', 'four'],
'Spanish': ['uno', 'dos', 'tres', 'quatro'],
'German': ['erste', 'zweite','dritte','vierte']})
left
right = pd.DataFrame({'numbers': ['K0', 'K1', 'K2', 'K3'],
'French': ['un', 'deux', 'trois', 'quatre'],
'Afrikaans': ['een', 'twee', 'drie', 'vier']})
right
result = pd.merge(left, right, on='numbers')
result
###Output
_____no_output_____
###Markdown
Grouping OperationsOften, there is a need to summarize the data or change the output of the data to make it easier to work with, especially for categorical data types.
###Code
dfg = pd.DataFrame({'A': ['clogs','sandals','jellies']*2,
'B': ['socks','footies']*3,
'C': [1,1,1,3,2,2],
'D': np.random.rand(6)})
dfg
###Output
_____no_output_____
###Markdown
Pivot TableWithout changing the data in any way, summarize the output in a different format. Specify the indicies, columns, and values:
###Code
dfg.pivot_table(index=['A','B'], columns=['C'], values='D')
###Output
_____no_output_____
###Markdown
StackingColumn labels can be brought into the rows.
###Code
dfg.stack()
###Output
_____no_output_____
###Markdown
GroupbyGroupby groups values, creating a Python object to which functions can be applied:
###Code
dfg.groupby(['B']).count()
dfg.groupby(['A']).mean()
###Output
_____no_output_____
###Markdown
Operations on Pandas Data ObjectsWether it's the entire data frame or a series within a single dataframe, there are a variety of methods that can be applied. Here's a list of a few helpful ones: Simple statistics (mean, stdev, etc).
###Code
dfg['D'].mean()
###Output
_____no_output_____
###Markdown
Rotation Note that the values rotated out leave `NaN` behind:
###Code
dfg['D']
dfg_Ds = dfg['D'].shift(2)
dfg_Ds
###Output
_____no_output_____
###Markdown
Add, subtract, multiply, divide:Operations are element-wise:
###Code
dfg['D'].div(dfg_Ds )
###Output
_____no_output_____
###Markdown
Histogram
###Code
dfg
dfg['C'].value_counts()
###Output
_____no_output_____
###Markdown
Describe Excluding NaN values, print some descriptive statistics about the collection of values.
###Code
df_types.describe()
###Output
_____no_output_____
###Markdown
TransposeExchange the rows and columns (flip about the diagonal):
###Code
df_types.T
###Output
_____no_output_____
###Markdown
Applying Any Function to Pandas Data ObjectsPandas objects have methods that allow function to be applied with greater control, namely the `.apply` function:
###Code
def f(x): # Define function
return x + 1
dfg['C'].apply(f)
###Output
_____no_output_____
###Markdown
Lambda functions may also be used
###Code
dfg['C'].apply(lambda x: x + 1)
###Output
_____no_output_____
###Markdown
String functions:Pandas has access to string methods:
###Code
dfg['A'].str.title() # Make the first letter uppercase
###Output
_____no_output_____
###Markdown
PlottingPandas exposes the matplotlib library for use.
###Code
n = 100
X = np.linspace(0, 5, n)
Y1,Y2 = np.log((X)**2+2), np.sin(X)+2
dfp = pd.DataFrame({'X' : X, 'Y1': Y1, 'Y2': Y2})
dfp.head()
dfp.plot(x = 'X')
plt.show()
###Output
_____no_output_____
###Markdown
Matplotlib styles are available too:
###Code
style_name = 'classic'
plt.style.use(style_name)
dfp.plot(x = 'X')
plt.title('Log($x^2$) and Sine', fontsize=16)
plt.xlabel('X Label', fontsize=16)
plt.ylabel('Y Label', fontsize=16)
plt.show()
mpl.rcdefaults() # Reset matplotlib rc defaults
###Output
_____no_output_____ |
ddsp_48kHz_stereo.ipynb | ###Markdown
Copyright 2020 Google LLC.Licensed under the Apache License, Version 2.0 (the "License");
###Code
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
Train & Timbre Transfer--DDSP Autoencoder on GPU--48kHz/StereoMade by [Google Magenta](https://magenta.tensorflow.org/)--altered by [Demon Flex Council](https://soundcloud.com/demonflexcouncil)This notebook demonstrates how to install the DDSP library and train it for synthesis based on your own data using command-line scripts. If run inside of Colab, it will automatically use a free Google Cloud GPU.**A Little Background**A producer friend of mine turned me on to Magenta’s DDSP, and I’m glad he did. In my mind it represents the way forward for AI music. Finally we have a glimpse inside the black box, with access to musical parameters as well as neural net hyperparameters. And DDSP leverages decades of studio knowledge by utilizing traditional processors like synthesizers and effects. One can envision a time when DDSP-like elements will sit at the heart of production DAWs.DDSP will accept most audio sample rates and formats. However, native 48kHz/stereo datasets and primers will sound best. Output files are always 48kHz/stereo. You can upload datasets and primers via the browser or use Google Drive.The algorithm was designed to model single instruments played monophonically, but it can also produce interesting results with denser, polyphonic material and percussion. **Note that we prefix bash commands with a `!` inside of Colab, but you would leave them out if running directly in a terminal.** Install DependenciesFirst we install the required dependencies with `pip`.
###Code
%tensorflow_version 2.x
# !pip install -qU ddsp[data_preparation]
!pip install -qU git+https://github.com/DemonFlexCouncil/ddsp@ddsp
# Initialize global path for using google drive.
DRIVE_DIR = ''
# Helper Functions
sample_rate = 48000
n_fft = 6144
###Output
###Markdown
Setup Google Drive (Optional, Recommeded)This notebook requires uploading audio and saving checkpoints. While you can do this with direct uploads / downloads, it is recommended to connect to your google drive account. This will enable faster file transfer, and regular saving of checkpoints so that you do not lose your work if the colab kernel restarts (common for training more than 12 hours). Login and mount your driveThis will require an authentication code. You should then be able to see your drive in the file browser on the left panel.
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
###Markdown
Set your base directory* In drive, put all of the audio files with which you would like to train in a single folder. * Typically works well with 10-20 minutes of audio from a single monophonic source (also, one acoustic environment). * Use the file browser in the left panel to find a folder with your audio, right-click **"Copy Path", paste below**, and run the cell.
###Code
#@markdown (ex. `/content/drive/My Drive/...`) Leave blank to skip loading from Drive.
DRIVE_DIR = '' #@param {type: "string"}
import os
assert os.path.exists(DRIVE_DIR)
print('Drive Folder Exists:', DRIVE_DIR)
###Output
###Markdown
Make directories to save model and data
###Code
#@markdown Check the box below if you'd like to train with latent vectors.
LATENT_VECTORS = False #@param{type:"boolean"}
!git clone https://github.com/DemonFlexCouncil/gin.git
if LATENT_VECTORS:
GIN_FILE = 'gin/solo_instrument.gin'
else:
GIN_FILE = 'gin/solo_instrument_noz.gin'
AUDIO_DIR_LEFT = 'data/audio-left'
AUDIO_DIR_RIGHT = 'data/audio-right'
MODEL_DIR_LEFT = 'data/model-left'
MODEL_DIR_RIGHT = 'data/model-right'
AUDIO_FILEPATTERN_LEFT = AUDIO_DIR_LEFT + '/*'
AUDIO_FILEPATTERN_RIGHT = AUDIO_DIR_RIGHT + '/*'
!mkdir -p $AUDIO_DIR_LEFT $AUDIO_DIR_RIGHT $MODEL_DIR_LEFT $MODEL_DIR_RIGHT
if DRIVE_DIR:
SAVE_DIR_LEFT = os.path.join(DRIVE_DIR, 'ddsp-solo-instrument-left')
SAVE_DIR_RIGHT = os.path.join(DRIVE_DIR, 'ddsp-solo-instrument-right')
INPUT_DIR = os.path.join(DRIVE_DIR, 'dataset-input')
PRIMERS_DIR = os.path.join(DRIVE_DIR, 'primers')
OUTPUT_DIR = os.path.join(DRIVE_DIR, 'resynthesis-output')
!mkdir -p "$SAVE_DIR_LEFT" "$SAVE_DIR_RIGHT" "$INPUT_DIR" "$PRIMERS_DIR" "$OUTPUT_DIR"
###Output
###Markdown
Upload training audioUpload training audio to the "dataset-input" folder inside the DRIVE_DIR folder if using Drive (otherwise prompts local upload.)
###Code
!pip install note_seq
import glob
import os
from ddsp.colab import colab_utils
from google.colab import files
import librosa
import numpy as np
from scipy.io.wavfile import write as write_audio
if DRIVE_DIR:
wav_files = glob.glob(os.path.join(INPUT_DIR, '*.wav'))
aiff_files = glob.glob(os.path.join(INPUT_DIR, '*.aiff'))
aif_files = glob.glob(os.path.join(INPUT_DIR, '*.aif'))
ogg_files = glob.glob(os.path.join(INPUT_DIR, '*.ogg'))
flac_files = glob.glob(os.path.join(INPUT_DIR, '*.flac'))
mp3_files = glob.glob(os.path.join(INPUT_DIR, '*.mp3'))
audio_files = wav_files + aiff_files + aif_files + ogg_files + flac_files + mp3_files
else:
uploaded_files = files.upload()
audio_files = list(uploaded_files.keys())
for fname in audio_files:
# Convert to 48kHz.
audio, unused_sample_rate = librosa.load(fname, sr=48000, mono=False)
if (audio.ndim == 2):
audio = np.swapaxes(audio, 0, 1)
# Mono to stereo.
if (audio.ndim == 1):
print('Converting mono to stereo.')
audio = np.stack((audio, audio), axis=-1)
target_name_left = os.path.join(AUDIO_DIR_LEFT,
os.path.basename(fname).replace(' ', '_').replace('aiff', 'wav').replace('aif', 'wav').replace('ogg', 'wav').replace('flac', 'wav').replace('mp3', 'wav'))
target_name_right = os.path.join(AUDIO_DIR_RIGHT,
os.path.basename(fname).replace(' ', '_').replace('aiff', 'wav').replace('aif', 'wav').replace('ogg', 'wav').replace('flac', 'wav').replace('mp3', 'wav'))
# Split to dual mono.
write_audio(target_name_left, sample_rate, audio[:, 0])
write_audio(target_name_right, sample_rate, audio[:, 1])
###Output
###Markdown
Preprocess raw audio into TFRecord datasetWe need to do some preprocessing on the raw audio you uploaded to get it into the correct format for training. This involves turning the full audio into short (4-second) examples, inferring the fundamental frequency (or "pitch") with [CREPE](http://github.com/marl/crepe), and computing the loudness. These features will then be stored in a sharded [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord) file for easier loading. Depending on the amount of input audio, this process usually takes a few minutes.* (Optional) Transfer dataset from drive. If you've already created a dataset, from a previous run, this cell will skip the dataset creation step and copy the dataset from `$DRIVE_DIR/data`
###Code
!pip install apache_beam
import glob
import os
TRAIN_TFRECORD_LEFT = 'data/train-left.tfrecord'
TRAIN_TFRECORD_RIGHT = 'data/train-right.tfrecord'
TRAIN_TFRECORD_FILEPATTERN_LEFT = TRAIN_TFRECORD_LEFT + '*'
TRAIN_TFRECORD_FILEPATTERN_RIGHT = TRAIN_TFRECORD_RIGHT + '*'
# Copy dataset from drive if dataset has already been created.
drive_data_dir = os.path.join(DRIVE_DIR, 'data')
drive_dataset_files = glob.glob(drive_data_dir + '/*')
if DRIVE_DIR and len(drive_dataset_files) > 0:
!cp "$drive_data_dir"/* data/
else:
# Make a new dataset.
if (not glob.glob(AUDIO_FILEPATTERN_LEFT)) or (not glob.glob(AUDIO_FILEPATTERN_RIGHT)):
raise ValueError('No audio files found. Please use the previous cell to '
'upload.')
!ddsp_prepare_tfrecord \
--input_audio_filepatterns=$AUDIO_FILEPATTERN_LEFT \
--output_tfrecord_path=$TRAIN_TFRECORD_LEFT \
--num_shards=10 \
--sample_rate=$sample_rate \
--alsologtostderr
!ddsp_prepare_tfrecord \
--input_audio_filepatterns=$AUDIO_FILEPATTERN_RIGHT \
--output_tfrecord_path=$TRAIN_TFRECORD_RIGHT \
--num_shards=10 \
--sample_rate=$sample_rate \
--alsologtostderr
# Copy dataset to drive for safe-keeping.
if DRIVE_DIR:
!mkdir "$drive_data_dir"/
print('Saving to {}'.format(drive_data_dir))
!cp $TRAIN_TFRECORD_FILEPATTERN_LEFT "$drive_data_dir"/
!cp $TRAIN_TFRECORD_FILEPATTERN_RIGHT "$drive_data_dir"/
###Output
###Markdown
Save dataset statistics for timbre transferQuantile normalization helps match loudness of timbre transfer inputs to the loudness of the dataset, so let's calculate it here and save in a pickle file.
###Code
from ddsp.colab import colab_utils
import ddsp.training
data_provider_left = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_LEFT, sample_rate=sample_rate)
data_provider_right = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_RIGHT, sample_rate=sample_rate)
dataset_left = data_provider_left.get_dataset(shuffle=False)
dataset_right = data_provider_right.get_dataset(shuffle=False)
if DRIVE_DIR:
PICKLE_FILE_PATH_LEFT = os.path.join(SAVE_DIR_LEFT, 'dataset_statistics_left.pkl')
PICKLE_FILE_PATH_RIGHT = os.path.join(SAVE_DIR_RIGHT, 'dataset_statistics_right.pkl')
else:
PICKLE_FILE_PATH_LEFT = os.path.join(MODEL_DIR_LEFT, 'dataset_statistics_left.pkl')
PICKLE_FILE_PATH_RIGHT = os.path.join(MODEL_DIR_RIGHT, 'dataset_statistics_right.pkl')
colab_utils.save_dataset_statistics(data_provider_left, PICKLE_FILE_PATH_LEFT, batch_size=1)
colab_utils.save_dataset_statistics(data_provider_right, PICKLE_FILE_PATH_RIGHT, batch_size=1)
###Output
###Markdown
Let's load the dataset in the `ddsp` library and have a look at one of the examples.
###Code
from ddsp.colab import colab_utils
import ddsp.training
from matplotlib import pyplot as plt
import numpy as np
data_provider_left = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_LEFT, sample_rate=sample_rate)
dataset_left = data_provider_left.get_dataset(shuffle=False)
data_provider_right = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_RIGHT, sample_rate=sample_rate)
dataset_right = data_provider_right.get_dataset(shuffle=False)
try:
ex_left = next(iter(dataset_left))
except StopIteration:
raise ValueError(
'TFRecord contains no examples. Please try re-running the pipeline with '
'different audio file(s).')
try:
ex_right = next(iter(dataset_right))
except StopIteration:
raise ValueError(
'TFRecord contains no examples. Please try re-running the pipeline with '
'different audio file(s).')
print('Top: Left, Bottom: Right')
colab_utils.specplot(ex_left['audio'])
colab_utils.specplot(ex_right['audio'])
f, ax = plt.subplots(6, 1, figsize=(14, 12))
x = np.linspace(0, 4.0, 1000)
ax[0].set_ylabel('loudness_db L')
ax[0].plot(x, ex_left['loudness_db'])
ax[1].set_ylabel('loudness_db R')
ax[1].plot(x, ex_right['loudness_db'])
ax[2].set_ylabel('F0_Hz L')
ax[2].set_xlabel('seconds')
ax[2].plot(x, ex_left['f0_hz'])
ax[3].set_ylabel('F0_Hz R')
ax[3].set_xlabel('seconds')
ax[3].plot(x, ex_right['f0_hz'])
ax[4].set_ylabel('F0_confidence L')
ax[4].set_xlabel('seconds')
ax[4].plot(x, ex_left['f0_confidence'])
ax[5].set_ylabel('F0_confidence R')
ax[5].set_xlabel('seconds')
ax[5].plot(x, ex_right['f0_confidence'])
###Output
###Markdown
Train ModelWe will now train a "solo instrument" model. This means the model is conditioned only on the fundamental frequency (f0) and loudness with no instrument ID or latent timbre feature. If you uploaded audio of multiple instruemnts, the neural network you train will attempt to model all timbres, but will likely associate certain timbres with different f0 and loudness conditions. First, let's start up a [TensorBoard](https://www.tensorflow.org/tensorboard) to monitor our loss as training proceeds. Initially, TensorBoard will report `No dashboards are active for the current data set.`, but once training begins, the dashboards should appear.
###Code
%reload_ext tensorboard
import tensorboard as tb
if DRIVE_DIR:
tb.notebook.start('--logdir "{}"'.format(SAVE_DIR_LEFT))
tb.notebook.start('--logdir "{}"'.format(SAVE_DIR_RIGHT))
else:
tb.notebook.start('--logdir "{}"'.format(MODEL_DIR_LEFT))
tb.notebook.start('--logdir "{}"'.format(MODEL_DIR_RIGHT))
###Output
_____no_output_____
###Markdown
We will now begin training. Note that we specify [gin configuration](https://github.com/google/gin-config) files for the both the model architecture ([solo_instrument.gin](TODO)) and the dataset ([tfrecord.gin](TODO)), which are both predefined in the library. You could also create your own. We then override some of the spefic params for `batch_size` (which is defined in in the model gin file) and the tfrecord path (which is defined in the dataset file). Training Notes:* Models typically perform well when the loss drops to the range of ~5.0-7.0.* Depending on the dataset this can take anywhere from 5k-40k training steps usually.* On the colab GPU, this can take from around 3-24 hours. * We **highly recommend** saving checkpoints directly to your drive account as colab will restart naturally after about 12 hours and you may lose all of your checkpoints.* By default, checkpoints will be saved every 250 steps with a maximum of 10 checkpoints (at ~60MB/checkpoint this is ~600MB). Feel free to adjust these numbers depending on the frequency of saves you would like and space on your drive.* If you're restarting a session and `DRIVE_DIR` points a directory that was previously used for training, training should resume at the last checkpoint.
###Code
#@markdown Enter number of steps to train. Restart runtime to interrupt training.
NUM_STEPS = 1000 #@param {type:"slider", min: 1000, max:40000, step:1000}
NUM_LOOPS = int(NUM_STEPS / 1000)
if DRIVE_DIR:
TRAIN_DIR_LEFT = SAVE_DIR_LEFT
TRAIN_DIR_RIGHT = SAVE_DIR_RIGHT
else:
TRAIN_DIR_LEFT = MODEL_DIR_LEFT
TRAIN_DIR_RIGHT = MODEL_DIR_RIGHT
for i in range (0, NUM_LOOPS):
!ddsp_run \
--mode=train \
--alsologtostderr \
--save_dir="$TRAIN_DIR_LEFT" \
--gin_file="$GIN_FILE" \
--gin_file=datasets/tfrecord.gin \
--gin_param="TFRecordProvider.file_pattern='$TRAIN_TFRECORD_FILEPATTERN_LEFT'" \
--gin_param="batch_size=6" \
--gin_param="train_util.train.num_steps=1000" \
--gin_param="train_util.train.steps_per_save=250" \
--gin_param="trainers.Trainer.checkpoints_to_keep=10"
!ddsp_run \
--mode=train \
--alsologtostderr \
--save_dir="$TRAIN_DIR_RIGHT" \
--gin_file="$GIN_FILE" \
--gin_file=datasets/tfrecord.gin \
--gin_param="TFRecordProvider.file_pattern='$TRAIN_TFRECORD_FILEPATTERN_RIGHT'" \
--gin_param="batch_size=6" \
--gin_param="train_util.train.num_steps=1000" \
--gin_param="train_util.train.steps_per_save=250" \
--gin_param="trainers.Trainer.checkpoints_to_keep=10"
# Remove extra gin files.
if DRIVE_DIR:
!cd "$SAVE_DIR_LEFT" && mv "operative_config-0.gin" "$DRIVE_DIR"
!cd "$SAVE_DIR_LEFT" && rm operative_config*
!cd "$DRIVE_DIR" && mv "operative_config-0.gin" "$SAVE_DIR_LEFT"
!cd "$SAVE_DIR_RIGHT" && mv "operative_config-0.gin" "$DRIVE_DIR"
!cd "$SAVE_DIR_RIGHT" && rm operative_config*
!cd "$DRIVE_DIR" && mv "operative_config-0.gin" "$SAVE_DIR_RIGHT"
else:
!cd "$MODEL_DIR_LEFT" && mv "operative_config-0.gin" "$AUDIO_DIR_LEFT"
!cd "$MODEL_DIR_LEFT" && rm operative_config*
!cd "$AUDIO_DIR_LEFT" && mv "operative_config-0.gin" "$MODEL_DIR_LEFT"
!cd "$MODEL_DIR_RIGHT" && mv "operative_config-0.gin" "$AUDIO_DIR_RIGHT"
!cd "$MODEL_DIR_RIGHT" && rm operative_config*
!cd "$AUDIO_DIR_RIGHT" && mv "operative_config-0.gin" "$MODEL_DIR_RIGHT"
###Output
###Markdown
ResynthesisCheck how well the model reconstructs the training data
###Code
!pip install note_seq
from ddsp.colab.colab_utils import play, specplot, download
import ddsp.training
import gin
from matplotlib import pyplot as plt
import numpy as np
from scipy.io.wavfile import write as write_audio
data_provider_left = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_LEFT, sample_rate=sample_rate)
data_provider_right = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_RIGHT, sample_rate=sample_rate)
dataset_left = data_provider_left.get_batch(batch_size=1, shuffle=False)
dataset_right = data_provider_right.get_batch(batch_size=1, shuffle=False)
try:
batch_left = next(iter(dataset_left))
except OutOfRangeError:
raise ValueError(
'TFRecord contains no examples. Please try re-running the pipeline with '
'different audio file(s).')
try:
batch_right = next(iter(dataset_right))
except OutOfRangeError:
raise ValueError(
'TFRecord contains no examples. Please try re-running the pipeline with '
'different audio file(s).')
# Parse the gin configs.
if DRIVE_DIR:
gin_file_left = os.path.join(SAVE_DIR_LEFT, 'operative_config-0.gin')
gin_file_right = os.path.join(SAVE_DIR_RIGHT, 'operative_config-0.gin')
else:
gin_file_left = os.path.join(MODEL_DIR_LEFT, 'operative_config-0.gin')
gin_file_right = os.path.join(MODEL_DIR_RIGHT, 'operative_config-0.gin')
gin.parse_config_file(gin_file_left)
gin.parse_config_file(gin_file_right)
# Load models
model_left = ddsp.training.models.Autoencoder()
model_right = ddsp.training.models.Autoencoder()
if DRIVE_DIR:
model_left.restore(SAVE_DIR_LEFT)
model_right.restore(SAVE_DIR_RIGHT)
else:
model_left.restore(MODEL_DIR_LEFT)
model_right.restore(MODEL_DIR_RIGHT)
# Resynthesize audio.
audio_left = batch_left['audio']
audio_right = batch_right['audio']
outputs_left = model_left(batch_left, training=False)
audio_gen_left = model_left.get_audio_from_outputs(outputs_left)
outputs_right = model_right(batch_right, training=False)
audio_gen_right = model_right.get_audio_from_outputs(outputs_right)
# Merge to stereo.
audio_left_stereo = np.expand_dims(np.squeeze(audio_left.numpy()), axis=1)
audio_right_stereo = np.expand_dims(np.squeeze(audio_right.numpy()), axis=1)
audio_stereo = np.concatenate((audio_left_stereo, audio_right_stereo), axis=1)
audio_gen_left_stereo = np.expand_dims(np.squeeze(audio_gen_left.numpy()), axis=1)
audio_gen_right_stereo = np.expand_dims(np.squeeze(audio_gen_right.numpy()), axis=1)
audio_gen_stereo = np.concatenate((audio_gen_left_stereo, audio_gen_right_stereo), axis=1)
# Play.
print('Original Audio')
play(audio_stereo, sample_rate=sample_rate)
print('Resynthesis')
play(audio_gen_stereo, sample_rate=sample_rate)
# Plot.
print('Spectrograms: Top two are Original Audio L/R, bottom two are Resynthesis L/R')
specplot(audio_left)
specplot(audio_right)
specplot(audio_gen_left)
specplot(audio_gen_right)
WRITE_PATH = OUTPUT_DIR + "/resynthesis.wav"
write_audio("resynthesis.wav", sample_rate, audio_gen_stereo)
write_audio(WRITE_PATH, sample_rate, audio_gen_stereo)
!ffmpeg-normalize resynthesis.wav -o resynthesis.wav -t -15 -ar 48000 -f
download("resynthesis.wav")
###Output
###Markdown
Timbre Transfer Install & ImportInstall ddsp, define some helper functions, and download the model. This transfers a lot of data and should take a minute or two.
###Code
# Ignore a bunch of deprecation warnings
import warnings
warnings.filterwarnings("ignore")
import copy
import os
import time
import crepe
import ddsp
import ddsp.training
from ddsp.colab import colab_utils
from ddsp.colab.colab_utils import (
auto_tune, detect_notes, fit_quantile_transform,
get_tuning_factor, download, play, record,
specplot, upload, DEFAULT_SAMPLE_RATE)
import gin
from google.colab import files
import librosa
import matplotlib.pyplot as plt
import numpy as np
import pickle
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
# Helper Functions
sample_rate = 48000
n_fft = 2048
print('Done!')
###Output
###Markdown
Primer Audio File
###Code
from google.colab import files
from ddsp.colab.colab_utils import play
import re
#@markdown * Audio should be monophonic (single instrument / voice).
#@markdown * Extracts fundmanetal frequency (f0) and loudness features.
#@markdown * Choose an audio file on Drive or upload an audio file.
#@markdown * If you are using Drive, place the audio file in the "primers" folder inside the DRIVE_DIR folder. Enter the file name below.
PRIMER_FILE = "" #@param {type:"string"}
DRIVE_OR_UPLOAD = "Drive" #@param ["Drive", "Upload (.wav)"]
# Check for .wav extension.
match = re.search(r'.wav', PRIMER_FILE)
if match:
print ('')
else:
PRIMER_FILE = PRIMER_FILE + ".wav"
if DRIVE_OR_UPLOAD == "Drive":
PRIMER_PATH = PRIMERS_DIR + "/" + PRIMER_FILE
# Convert to 48kHz.
audio, unused_sample_rate = librosa.load(PRIMER_PATH, sr=48000, mono=False)
if (audio.ndim == 2):
audio = np.swapaxes(audio, 0, 1)
else:
# Load audio sample here (.wav file)
# Just use the first file.
audio_files = files.upload()
fnames = list(audio_files.keys())
audios = []
for fname in fnames:
audio, unused_sample_rate = librosa.load(fname, sr=48000, mono=False)
if (audio.ndim == 2):
audio = np.swapaxes(audio, 0, 1)
audios.append(audio)
audio = audios[0]
# Mono to stereo.
if (audio.ndim == 1):
print('Converting mono to stereo.')
audio = np.stack((audio, audio), axis=-1)
# Setup the session.
ddsp.spectral_ops.reset_crepe()
# Compute features.
audio_left = np.squeeze(audio[:, 0]).astype(np.float32)
audio_right = np.squeeze(audio[:, 1]).astype(np.float32)
audio_left = audio_left[np.newaxis, :]
audio_right = audio_right[np.newaxis, :]
start_time = time.time()
audio_features_left = ddsp.training.metrics.compute_audio_features(audio_left, n_fft=n_fft, sample_rate=sample_rate)
audio_features_right = ddsp.training.metrics.compute_audio_features(audio_right, n_fft=n_fft, sample_rate=sample_rate)
audio_features_left['loudness_db'] = audio_features_left['loudness_db'].astype(np.float32)
audio_features_right['loudness_db'] = audio_features_right['loudness_db'].astype(np.float32)
audio_features_mod_left = None
audio_features_mod_right = None
print('Audio features took %.1f seconds' % (time.time() - start_time))
play(audio, sample_rate=sample_rate)
TRIM = -15
# Plot Features.
fig, ax = plt.subplots(nrows=6,
ncols=1,
sharex=True,
figsize=(6, 16))
ax[0].plot(audio_features_left['loudness_db'][:TRIM])
ax[0].set_ylabel('loudness_db L')
ax[1].plot(audio_features_right['loudness_db'][:TRIM])
ax[1].set_ylabel('loudness_db R')
ax[2].plot(librosa.hz_to_midi(audio_features_left['f0_hz'][:TRIM]))
ax[2].set_ylabel('f0 [midi] L')
ax[3].plot(librosa.hz_to_midi(audio_features_right['f0_hz'][:TRIM]))
ax[3].set_ylabel('f0 [midi] R')
ax[4].plot(audio_features_left['f0_confidence'][:TRIM])
ax[4].set_ylabel('f0 confidence L')
_ = ax[4].set_xlabel('Time step [frame] L')
ax[5].plot(audio_features_right['f0_confidence'][:TRIM])
ax[5].set_ylabel('f0 confidence R')
_ = ax[5].set_xlabel('Time step [frame] R')
###Output
###Markdown
Load the Model
###Code
def find_model_dir(dir_name):
# Iterate through directories until model directory is found
for root, dirs, filenames in os.walk(dir_name):
for filename in filenames:
if filename.endswith(".gin") and not filename.startswith("."):
model_dir = root
break
return model_dir
if DRIVE_DIR:
model_dir_left = find_model_dir(SAVE_DIR_LEFT)
model_dir_right = find_model_dir(SAVE_DIR_RIGHT)
else:
model_dir_left = find_model_dir(MODEL_DIR_LEFT)
model_dir_right = find_model_dir(MODEL_DIR_RIGHT)
gin_file_left = os.path.join(model_dir_left, 'operative_config-0.gin')
gin_file_right = os.path.join(model_dir_right, 'operative_config-0.gin')
# Load the dataset statistics.
DATASET_STATS_LEFT = None
DATASET_STATS_RIGHT = None
dataset_stats_file_left = os.path.join(model_dir_left, 'dataset_statistics_left.pkl')
dataset_stats_file_right = os.path.join(model_dir_right, 'dataset_statistics_right.pkl')
print(f'Loading dataset statistics from {dataset_stats_file_left}')
try:
if tf.io.gfile.exists(dataset_stats_file_left):
with tf.io.gfile.GFile(dataset_stats_file_left, 'rb') as f:
DATASET_STATS_LEFT = pickle.load(f)
except Exception as err:
print('Loading dataset statistics from pickle failed: {}.'.format(err))
print(f'Loading dataset statistics from {dataset_stats_file_right}')
try:
if tf.io.gfile.exists(dataset_stats_file_right):
with tf.io.gfile.GFile(dataset_stats_file_right, 'rb') as f:
DATASET_STATS_RIGHT = pickle.load(f)
except Exception as err:
print('Loading dataset statistics from pickle failed: {}.'.format(err))
# Parse gin config,
with gin.unlock_config():
gin.parse_config_file(gin_file_left, skip_unknown=True)
# Assumes only one checkpoint in the folder, 'ckpt-[iter]`.
if DRIVE_DIR:
latest_checkpoint_fname_left = os.path.basename(tf.train.latest_checkpoint(SAVE_DIR_LEFT))
latest_checkpoint_fname_right = os.path.basename(tf.train.latest_checkpoint(SAVE_DIR_RIGHT))
else:
latest_checkpoint_fname_left = os.path.basename(tf.train.latest_checkpoint(MODEL_DIR_LEFT))
latest_checkpoint_fname_right = os.path.basename(tf.train.latest_checkpoint(MODEL_DIR_RIGHT))
ckpt_left = os.path.join(model_dir_left, latest_checkpoint_fname_left)
ckpt_right = os.path.join(model_dir_right, latest_checkpoint_fname_right)
# Ensure dimensions and sampling rates are equal
time_steps_train = gin.query_parameter('DefaultPreprocessor.time_steps')
n_samples_train = gin.query_parameter('Additive.n_samples')
hop_size = int(n_samples_train / time_steps_train)
time_steps = int(audio_left.shape[1] / hop_size)
n_samples = time_steps * hop_size
# print("===Trained model===")
# print("Time Steps", time_steps_train)
# print("Samples", n_samples_train)
# print("Hop Size", hop_size)
# print("\n===Resynthesis===")
# print("Time Steps", time_steps)
# print("Samples", n_samples)
# print('')
gin_params = [
'Additive.n_samples = {}'.format(n_samples),
'FilteredNoise.n_samples = {}'.format(n_samples),
'DefaultPreprocessor.time_steps = {}'.format(time_steps),
'oscillator_bank.use_angular_cumsum = True', # Avoids cumsum accumulation errors.
]
with gin.unlock_config():
gin.parse_config(gin_params)
# Trim all input vectors to correct lengths
for key in ['f0_hz', 'f0_confidence', 'loudness_db']:
audio_features_left[key] = audio_features_left[key][:time_steps]
audio_features_right[key] = audio_features_right[key][:time_steps]
audio_features_left['audio'] = audio_features_left['audio'][:, :n_samples]
audio_features_right['audio'] = audio_features_right['audio'][:, :n_samples]
# Set up the model just to predict audio given new conditioning
model_left = ddsp.training.models.Autoencoder()
model_right = ddsp.training.models.Autoencoder()
model_left.restore(ckpt_left)
model_right.restore(ckpt_right)
# Build model by running a batch through it.
start_time = time.time()
unused_left = model_left(audio_features_left, training=False)
unused_right = model_right(audio_features_right, training=False)
print('Restoring model took %.1f seconds' % (time.time() - start_time))
#@title Modify conditioning
#@markdown These models were not explicitly trained to perform timbre transfer, so they may sound unnatural if the incoming loudness and frequencies are very different then the training data (which will always be somewhat true).
#@markdown ## Note Detection
#@markdown You can leave this at 1.0 for most cases
threshold = 1 #@param {type:"slider", min: 0.0, max:2.0, step:0.01}
#@markdown ## Automatic
ADJUST = True #@param{type:"boolean"}
#@markdown Quiet parts without notes detected (dB)
quiet = 30 #@param {type:"slider", min: 0, max:60, step:1}
#@markdown Force pitch to nearest note (amount)
autotune = 0 #@param {type:"slider", min: 0.0, max:1.0, step:0.1}
#@markdown ## Manual
#@markdown Shift the pitch (octaves)
pitch_shift = 0 #@param {type:"slider", min:-2, max:2, step:1}
#@markdown Adjsut the overall loudness (dB)
loudness_shift = 0 #@param {type:"slider", min:-20, max:20, step:1}
audio_features_mod_left = {k: v.copy() for k, v in audio_features_left.items()}
audio_features_mod_right = {k: v.copy() for k, v in audio_features_right.items()}
## Helper functions.
def shift_ld(audio_features, ld_shift=0.0):
"""Shift loudness by a number of ocatves."""
audio_features['loudness_db'] += ld_shift
return audio_features
def shift_f0(audio_features, pitch_shift=0.0):
"""Shift f0 by a number of ocatves."""
audio_features['f0_hz'] *= 2.0 ** (pitch_shift)
audio_features['f0_hz'] = np.clip(audio_features['f0_hz'],
0.0,
librosa.midi_to_hz(110.0))
return audio_features
mask_on_left = None
mask_on_right = None
if ADJUST and DATASET_STATS_LEFT and DATASET_STATS_RIGHT is not None:
# Detect sections that are "on".
mask_on_left, note_on_value_left = detect_notes(audio_features_left['loudness_db'],
audio_features_left['f0_confidence'],
threshold)
mask_on_right, note_on_value_right = detect_notes(audio_features_right['loudness_db'],
audio_features_right['f0_confidence'],
threshold)
if np.any(mask_on_left) or np.any(mask_on_right):
# Shift the pitch register.
target_mean_pitch_left = DATASET_STATS_LEFT['mean_pitch']
target_mean_pitch_right = DATASET_STATS_RIGHT['mean_pitch']
pitch_left = ddsp.core.hz_to_midi(audio_features_left['f0_hz'])
pitch_right = ddsp.core.hz_to_midi(audio_features_right['f0_hz'])
mean_pitch_left = np.mean(pitch_left[mask_on_left])
mean_pitch_right = np.mean(pitch_right[mask_on_right])
p_diff_left = target_mean_pitch_left - mean_pitch_left
p_diff_right = target_mean_pitch_right - mean_pitch_right
p_diff_octave_left = p_diff_left / 12.0
p_diff_octave_right = p_diff_right / 12.0
round_fn_left = np.floor if p_diff_octave_left > 1.5 else np.ceil
round_fn_right = np.floor if p_diff_octave_right > 1.5 else np.ceil
p_diff_octave_left = round_fn_left(p_diff_octave_left)
p_diff_octave_right = round_fn_right(p_diff_octave_right)
audio_features_mod_left = shift_f0(audio_features_mod_left, p_diff_octave_left)
audio_features_mod_right = shift_f0(audio_features_mod_right, p_diff_octave_right)
# Quantile shift the note_on parts.
_, loudness_norm_left = colab_utils.fit_quantile_transform(
audio_features_left['loudness_db'],
mask_on_left,
inv_quantile=DATASET_STATS_LEFT['quantile_transform'])
_, loudness_norm_right = colab_utils.fit_quantile_transform(
audio_features_right['loudness_db'],
mask_on_right,
inv_quantile=DATASET_STATS_RIGHT['quantile_transform'])
# Turn down the note_off parts.
mask_off_left = np.logical_not(mask_on_left)
mask_off_right = np.logical_not(mask_on_right)
loudness_norm_left[mask_off_left] -= quiet * (1.0 - note_on_value_left[mask_off_left][:, np.newaxis])
loudness_norm_right[mask_off_right] -= quiet * (1.0 - note_on_value_right[mask_off_right][:, np.newaxis])
loudness_norm_left = np.reshape(loudness_norm_left, audio_features_left['loudness_db'].shape)
loudness_norm_right = np.reshape(loudness_norm_right, audio_features_right['loudness_db'].shape)
audio_features_mod_left['loudness_db'] = loudness_norm_left
audio_features_mod_right['loudness_db'] = loudness_norm_right
# Auto-tune.
if autotune:
f0_midi_left = np.array(ddsp.core.hz_to_midi(audio_features_mod_left['f0_hz']))
f0_midi_right = np.array(ddsp.core.hz_to_midi(audio_features_mod_right['f0_hz']))
tuning_factor_left = get_tuning_factor(f0_midi_left, audio_features_mod_left['f0_confidence'], mask_on_left)
tuning_factor_right = get_tuning_factor(f0_midi_right, audio_features_mod_right['f0_confidence'], mask_on_right)
f0_midi_at_left = auto_tune(f0_midi_left, tuning_factor_left, mask_on_left, amount=autotune)
f0_midi_at_right = auto_tune(f0_midi_right, tuning_factor_right, mask_on_right, amount=autotune)
audio_features_mod_left['f0_hz'] = ddsp.core.midi_to_hz(f0_midi_at_left)
audio_features_mod_right['f0_hz'] = ddsp.core.midi_to_hz(f0_midi_at_right)
else:
print('\nSkipping auto-adjust (no notes detected or ADJUST box empty).')
else:
print('\nSkipping auto-adujst (box not checked or no dataset statistics found).')
# Manual Shifts.
audio_features_mod_left = shift_ld(audio_features_mod_left, loudness_shift)
audio_features_mod_right = shift_ld(audio_features_mod_right, loudness_shift)
audio_features_mod_left = shift_f0(audio_features_mod_left, pitch_shift)
audio_features_mod_right = shift_f0(audio_features_mod_right, pitch_shift)
# Plot Features.
has_mask_left = int(mask_on_left is not None)
has_mask_right = int(mask_on_right is not None)
n_plots = 4 + has_mask_left + has_mask_right
fig, axes = plt.subplots(nrows=n_plots,
ncols=1,
sharex=True,
figsize=(2*n_plots, 10))
if has_mask_left:
ax = axes[0]
ax.plot(np.ones_like(mask_on_left[:TRIM]) * threshold, 'k:')
ax.plot(note_on_value_left[:TRIM])
ax.plot(mask_on_left[:TRIM])
ax.set_ylabel('Note-on Mask L')
ax.set_xlabel('Time step [frame]')
ax.legend(['Threshold', 'Likelihood','Mask'])
if has_mask_right:
ax = axes[0 + has_mask_left]
ax.plot(np.ones_like(mask_on_right[:TRIM]) * threshold, 'k:')
ax.plot(note_on_value_right[:TRIM])
ax.plot(mask_on_right[:TRIM])
ax.set_ylabel('Note-on Mask R')
ax.set_xlabel('Time step [frame]')
ax.legend(['Threshold', 'Likelihood','Mask'])
ax = axes[0 + has_mask_left + has_mask_right]
ax.plot(audio_features_left['loudness_db'][:TRIM])
ax.plot(audio_features_mod_left['loudness_db'][:TRIM])
ax.set_ylabel('loudness_db L')
ax.legend(['Original','Adjusted'])
ax = axes[1 + has_mask_left + has_mask_right]
ax.plot(audio_features_right['loudness_db'][:TRIM])
ax.plot(audio_features_mod_right['loudness_db'][:TRIM])
ax.set_ylabel('loudness_db R')
ax.legend(['Original','Adjusted'])
ax = axes[2 + has_mask_left + has_mask_right]
ax.plot(librosa.hz_to_midi(audio_features_left['f0_hz'][:TRIM]))
ax.plot(librosa.hz_to_midi(audio_features_mod_left['f0_hz'][:TRIM]))
ax.set_ylabel('f0 [midi] L')
_ = ax.legend(['Original','Adjusted'])
ax = axes[3 + has_mask_left + has_mask_right]
ax.plot(librosa.hz_to_midi(audio_features_right['f0_hz'][:TRIM]))
ax.plot(librosa.hz_to_midi(audio_features_mod_right['f0_hz'][:TRIM]))
ax.set_ylabel('f0 [midi] R')
_ = ax.legend(['Original','Adjusted'])
!pip3 install ffmpeg-normalize
from scipy.io.wavfile import write as write_audio
#@title #Resynthesize Audio
af_left = audio_features_left if audio_features_mod_left is None else audio_features_mod_left
af_right = audio_features_right if audio_features_mod_right is None else audio_features_mod_right
# Run a batch of predictions.
start_time = time.time()
outputs_left = model_left(af_left, training=False)
audio_gen_left = model_left.get_audio_from_outputs(outputs_left)
outputs_right = model_right(af_right, training=False)
audio_gen_right = model_right.get_audio_from_outputs(outputs_right)
print('Prediction took %.1f seconds' % (time.time() - start_time))
# Merge to stereo.
audio_gen_left = np.expand_dims(np.squeeze(audio_gen_left.numpy()), axis=1)
audio_gen_right = np.expand_dims(np.squeeze(audio_gen_right.numpy()), axis=1)
audio_gen_stereo = np.concatenate((audio_gen_left, audio_gen_right), axis=1)
# Play
print('Resynthesis with primer')
play(audio_gen_stereo, sample_rate=sample_rate)
WRITE_PATH = OUTPUT_DIR + "/resynthesis_primer.wav"
write_audio("resynthesis_primer.wav", sample_rate, audio_gen_stereo)
write_audio(WRITE_PATH, sample_rate, audio_gen_stereo)
!ffmpeg-normalize resynthesis_primer.wav -o resynthesis_primer.wav -t -15 -ar 48000 -f
colab_utils.download("resynthesis_primer.wav")
###Output
|
AlphaVantage Test.ipynb | ###Markdown
Searching and Basic Functions SearchHaven't figure out a way to search a ticker or their documentation of each function, but it is possible to get a suggestion page based on webpage input. Consider the following example. Simply change 'keywords=TICKER_YOU_WANT_TO_SEARCH' and 'apikey=YOUR_API_KEY'. https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=hot&apikey=NQFFPAG3ZLJNCK5X DirectorySince Python is a bitch, and AlphaVantage's documentation page is not updated for Python, it is hard to tell what are the subsets of each function. Frequently use dir() to check what are the available inputs for each function.
###Code
# Example:
#dir(ts)
#dir(ts.get_daily)
###Output
_____no_output_____
###Markdown
Plotting Time Series Since the output format is in pandas, we can use it to plot it's intra-minute price.
###Code
MSFT_intra, MSFT_meta = ts.get_intraday(symbol = "MSFT", interval = '1min', outputsize = 'full')
print(MSFT_intra.head(5))
MSFT_intra['5. volume'].plot()
# choose either '1. open', '2. high', etc. It is found from the columns printed from head()
plt.title('Intraday T-series for MSFT (1min)')
plt.show()
###Output
1. open 2. high 3. low 4. close 5. volume
date
2020-09-25 20:00:00 207.05 207.10 207.05 207.10 1074.0
2020-09-25 19:57:00 207.00 207.00 207.00 207.00 541.0
2020-09-25 19:53:00 207.05 207.05 207.05 207.05 305.0
2020-09-25 19:51:00 207.01 207.01 207.00 207.00 524.0
2020-09-25 19:50:00 207.13 207.13 207.13 207.13 149.0
###Markdown
Technical IndicatorsYou are also allowed to plot technical indicators. Make sure you import Techindicators and change the output format to pandas.
###Code
from alpha_vantage.techindicators import TechIndicators
import matplotlib.pyplot as plt
ti = TechIndicators(key = 'NQFFPAG3ZLJNCK5X', output_format = 'pandas')
# dir(ti)
MSFT_ti, MSFT_ti_meta = ti.get_bbands(symbol = 'MSFT', interval = '60min', time_period = 50)
# bbands refers to Bollinger Bands
# what does the time_period even do?? It doesn't work with daily interval,
# nor does it represent the number of observations...
MSFT_ti.plot()
plt.title('BBbands indicator for MSFT (60 min)')
plt.show()
#why does my plot look so ugly?
dir(ti.get_bbands)
###Output
_____no_output_____
###Markdown
Sector PerformanceWe can also plot sector performance with AlphaVantage.
###Code
from alpha_vantage.sectorperformance import SectorPerformances
import matplotlib.pyplot as plt
sp = SectorPerformances(key = 'NQFFPAG3ZLJNCK5X', output_format = 'pandas')
# dir(sp)
sector, sector_meta = sp.get_sector()
#print(sector)
# A list of available columns to print and plot
# Rank G: 1 Year, H: 3-Year, I: 5-Year, J: 10-year
sector['Rank H: Year Performance'].plot(kind = 'bar')
plt.title('Real Time Performance (%) per Sector')
plt.tight_layout()
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
CryptocurrenciesAlphaVantage also supports cryptocurrencies like BTC:
###Code
from alpha_vantage.cryptocurrencies import CryptoCurrencies
import matplotlib.pyplot as plt
cc = CryptoCurrencies(key = 'NQFFPAG3ZLJNCK5X', output_format = 'pandas')
# dir(cc)
XRP_daily, XRP_daily_meta = cc.get_digital_currency_weekly(symbol ='BTC', market ='EUR')
print(XRP_daily.head(2))
# View the available columns to extract
# How do you print prices before 2018?
XRP_daily['4b. close (USD)'].plot()
plt.tight_layout()
plt.title('Daily close value for Ripple (USD)')
plt.grid()
plt.show()
###Output
1a. open (EUR) 1b. open (USD) 2a. high (EUR) 2b. high (USD) \
date
2020-09-27 9383.796604 10920.28 9442.727398 10988.86
2020-09-20 8879.009412 10332.84 9606.793547 11179.79
3a. low (EUR) 3b. low (USD) 4a. close (EUR) 4b. close (USD) \
date
2020-09-27 8710.569426 10136.82 9260.409717 10776.69
2020-09-20 8775.463762 10212.34 9383.796604 10920.28
5. volume 6. market cap (USD)
date
2020-09-27 302319.240343 302319.240343
2020-09-20 374339.558814 374339.558814
###Markdown
Foreign Exchange (FX)The forex endpoint has no metadata, so it is only available as json format and pandas. (using the 'csv' format will raise an Error)
###Code
from alpha_vantage.foreignexchange import ForeignExchange
from pprint import pprint
fx = ForeignExchange(key = 'NQFFPAG3ZLJNCK5X')
# There's no metadata in this call (??)
MYRtoUSD, _ = fx.get_currency_exchange_rate(from_currency= 'MYR', to_currency = 'USD')
pprint(MYRtoUSD)
# to make things more neat
# MYRtoUSD['5. Exchange Rate'].plot()
# plt.tight_layout()
# plt.title('Daily close value for Ripple (USD)')
# plt.grid()
# plt.show()
###Output
_____no_output_____
###Markdown
End of Tutorial Testing Data
###Code
dir(ti)
msft_p, r = ts.get_daily_adjusted(symbol = 'MSFT', outputsize = '50')
msft_sma, re = ti.get_sma(symbol = 'MSFT', interval = 'daily', time_period = 50)
msft_wma, ree = ti.get_wma(symbol = 'MSFT', interval = 'daily', time_period = 50)
# you still need a metadata for the plots to work
# metadata uses the same name, yet it still works..?
# Technical indicators have a 50-day delay, does the plot accurately capture that?
# Answer: Yes. Note the blue line is much shorter than green and yellow.
# Yes, the outputsize for TimeSeries and TechIndicator is inconsistent.
# Annoying I know...
print(msft_sma.head(2))
print(msft_wma.head(2))
print(msft_p.head(2))
# Monthly RSI. (Daily RSI is too noisy to predict)
msft_rsi, r = ti.get_rsi(symbol = 'MSFT', interval = 'monthly')
print(msft_rsi.tail(2))
#Plotting
msft_p['4. close'].plot()
msft_wma['WMA'].plot()
msft_sma['SMA'].plot()
# you are allowed to plot in the same chart.
# what if I want to separate the chart?
plt.title('SMA and WMA against adjusted closing (MSFT, daily)')
plt.show()
# Separate plot. (how do you rename the 2nd plot?)
rsi = msft_rsi['RSI'].plot()
rsi.plt.title('test')
rsi.plt.show()
###Output
SMA
date
2000-01-11 31.9904
2000-01-12 32.0764
WMA
date
2000-01-11 33.8182
2000-01-12 33.8909
1. open 2. high 3. low 4. close 5. adjusted close 6. volume \
date
2020-09-25 203.55 209.04 202.54 207.82 207.82 29437312.0
2020-09-24 199.85 205.57 199.20 203.19 203.19 31202493.0
7. dividend amount 8. split coefficient
date
2020-09-25 0.0 1.0
2020-09-24 0.0 1.0
RSI
date
2020-08-31 85.6510
2020-09-25 75.5467
###Markdown
Update 25 September 2020 Fundamental DataYou may now obtain fundamental data from AlphaVantage's API. Remember to use dir() to find out the available functions.
###Code
from alpha_vantage.fundamentaldata import FundamentalData
fd = FundamentalData(key = 'NQFFPAG3ZLJNCK5X', output_format = 'pandas')
MSFTfd, d = fd.get_company_overview(symbol = 'MSFT')
# Seems like the key isn't fixed for fundamental data yet
###Output
_____no_output_____
###Markdown
Stock Prediction with AgathaUsing LSTM network, Agatha aims to predict close prices for a user-specified number of days into the future. The training data used for reference comes from AlphaVantage.
###Code
# pip install agatha
from agatha import getOrTrainModel, predictFuture
###Output
_____no_output_____ |
MobileNetV2/mobilenetv2.ipynb | ###Markdown
Data augmentation
###Code
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
])
plt.figure(figsize=(10,10))
for images,_ in train_ds.take(2):
for i in range(9):
ax = plt.subplot(3,3,i+1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.axis("off")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.prefetch(buffer_size=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Define the model
###Code
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
IMG_SHAPE = (160,160,3)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,include_top=False,weights="imagenet")
image_batch,label_batch = next(iter(train_ds))
feature_batch = base_model(image_batch)
base_model.trainable = False
base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
prediction_layer = tf.keras.layers.Dense(2)
prediction_batch = prediction_layer(feature_batch_average)
def get_model():
inputs = tf.keras.Input(shape=(160,160,3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x,training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2,seed=1337)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs,outputs)
return model
model = get_model()
model.summary()
model.compile(tf.keras.optimizers.Adam(),tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),metrics=["accuracy"])
###Output
Model: "mobilenetv2_1.00_160"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 160, 160, 3) 0
__________________________________________________________________________________________________
Conv1 (Conv2D) (None, 80, 80, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
bn_Conv1 (BatchNormalization) (None, 80, 80, 32) 128 Conv1[0][0]
__________________________________________________________________________________________________
Conv1_relu (ReLU) (None, 80, 80, 32) 0 bn_Conv1[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise (Depthw (None, 80, 80, 32) 288 Conv1_relu[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_BN (Bat (None, 80, 80, 32) 128 expanded_conv_depthwise[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_relu (R (None, 80, 80, 32) 0 expanded_conv_depthwise_BN[0][0]
__________________________________________________________________________________________________
expanded_conv_project (Conv2D) (None, 80, 80, 16) 512 expanded_conv_depthwise_relu[0][0
__________________________________________________________________________________________________
expanded_conv_project_BN (Batch (None, 80, 80, 16) 64 expanded_conv_project[0][0]
__________________________________________________________________________________________________
block_1_expand (Conv2D) (None, 80, 80, 96) 1536 expanded_conv_project_BN[0][0]
__________________________________________________________________________________________________
block_1_expand_BN (BatchNormali (None, 80, 80, 96) 384 block_1_expand[0][0]
__________________________________________________________________________________________________
block_1_expand_relu (ReLU) (None, 80, 80, 96) 0 block_1_expand_BN[0][0]
__________________________________________________________________________________________________
block_1_pad (ZeroPadding2D) (None, 81, 81, 96) 0 block_1_expand_relu[0][0]
__________________________________________________________________________________________________
block_1_depthwise (DepthwiseCon (None, 40, 40, 96) 864 block_1_pad[0][0]
__________________________________________________________________________________________________
block_1_depthwise_BN (BatchNorm (None, 40, 40, 96) 384 block_1_depthwise[0][0]
__________________________________________________________________________________________________
block_1_depthwise_relu (ReLU) (None, 40, 40, 96) 0 block_1_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_1_project (Conv2D) (None, 40, 40, 24) 2304 block_1_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_1_project_BN (BatchNormal (None, 40, 40, 24) 96 block_1_project[0][0]
__________________________________________________________________________________________________
block_2_expand (Conv2D) (None, 40, 40, 144) 3456 block_1_project_BN[0][0]
__________________________________________________________________________________________________
block_2_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_2_expand[0][0]
__________________________________________________________________________________________________
block_2_expand_relu (ReLU) (None, 40, 40, 144) 0 block_2_expand_BN[0][0]
__________________________________________________________________________________________________
block_2_depthwise (DepthwiseCon (None, 40, 40, 144) 1296 block_2_expand_relu[0][0]
__________________________________________________________________________________________________
block_2_depthwise_BN (BatchNorm (None, 40, 40, 144) 576 block_2_depthwise[0][0]
__________________________________________________________________________________________________
block_2_depthwise_relu (ReLU) (None, 40, 40, 144) 0 block_2_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_2_project (Conv2D) (None, 40, 40, 24) 3456 block_2_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_2_project_BN (BatchNormal (None, 40, 40, 24) 96 block_2_project[0][0]
__________________________________________________________________________________________________
block_2_add (Add) (None, 40, 40, 24) 0 block_1_project_BN[0][0]
block_2_project_BN[0][0]
__________________________________________________________________________________________________
block_3_expand (Conv2D) (None, 40, 40, 144) 3456 block_2_add[0][0]
__________________________________________________________________________________________________
block_3_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_3_expand[0][0]
__________________________________________________________________________________________________
block_3_expand_relu (ReLU) (None, 40, 40, 144) 0 block_3_expand_BN[0][0]
__________________________________________________________________________________________________
block_3_pad (ZeroPadding2D) (None, 41, 41, 144) 0 block_3_expand_relu[0][0]
__________________________________________________________________________________________________
block_3_depthwise (DepthwiseCon (None, 20, 20, 144) 1296 block_3_pad[0][0]
__________________________________________________________________________________________________
block_3_depthwise_BN (BatchNorm (None, 20, 20, 144) 576 block_3_depthwise[0][0]
__________________________________________________________________________________________________
block_3_depthwise_relu (ReLU) (None, 20, 20, 144) 0 block_3_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_3_project (Conv2D) (None, 20, 20, 32) 4608 block_3_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_3_project_BN (BatchNormal (None, 20, 20, 32) 128 block_3_project[0][0]
__________________________________________________________________________________________________
block_4_expand (Conv2D) (None, 20, 20, 192) 6144 block_3_project_BN[0][0]
__________________________________________________________________________________________________
block_4_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_4_expand[0][0]
__________________________________________________________________________________________________
block_4_expand_relu (ReLU) (None, 20, 20, 192) 0 block_4_expand_BN[0][0]
__________________________________________________________________________________________________
block_4_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_4_expand_relu[0][0]
__________________________________________________________________________________________________
block_4_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_4_depthwise[0][0]
__________________________________________________________________________________________________
block_4_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_4_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_4_project (Conv2D) (None, 20, 20, 32) 6144 block_4_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_4_project_BN (BatchNormal (None, 20, 20, 32) 128 block_4_project[0][0]
__________________________________________________________________________________________________
block_4_add (Add) (None, 20, 20, 32) 0 block_3_project_BN[0][0]
block_4_project_BN[0][0]
__________________________________________________________________________________________________
block_5_expand (Conv2D) (None, 20, 20, 192) 6144 block_4_add[0][0]
__________________________________________________________________________________________________
block_5_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_5_expand[0][0]
__________________________________________________________________________________________________
block_5_expand_relu (ReLU) (None, 20, 20, 192) 0 block_5_expand_BN[0][0]
__________________________________________________________________________________________________
block_5_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_5_expand_relu[0][0]
__________________________________________________________________________________________________
block_5_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_5_depthwise[0][0]
__________________________________________________________________________________________________
block_5_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_5_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_5_project (Conv2D) (None, 20, 20, 32) 6144 block_5_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_5_project_BN (BatchNormal (None, 20, 20, 32) 128 block_5_project[0][0]
__________________________________________________________________________________________________
block_5_add (Add) (None, 20, 20, 32) 0 block_4_add[0][0]
block_5_project_BN[0][0]
__________________________________________________________________________________________________
block_6_expand (Conv2D) (None, 20, 20, 192) 6144 block_5_add[0][0]
__________________________________________________________________________________________________
block_6_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_6_expand[0][0]
__________________________________________________________________________________________________
block_6_expand_relu (ReLU) (None, 20, 20, 192) 0 block_6_expand_BN[0][0]
__________________________________________________________________________________________________
block_6_pad (ZeroPadding2D) (None, 21, 21, 192) 0 block_6_expand_relu[0][0]
__________________________________________________________________________________________________
block_6_depthwise (DepthwiseCon (None, 10, 10, 192) 1728 block_6_pad[0][0]
__________________________________________________________________________________________________
block_6_depthwise_BN (BatchNorm (None, 10, 10, 192) 768 block_6_depthwise[0][0]
__________________________________________________________________________________________________
block_6_depthwise_relu (ReLU) (None, 10, 10, 192) 0 block_6_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_6_project (Conv2D) (None, 10, 10, 64) 12288 block_6_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_6_project_BN (BatchNormal (None, 10, 10, 64) 256 block_6_project[0][0]
__________________________________________________________________________________________________
block_7_expand (Conv2D) (None, 10, 10, 384) 24576 block_6_project_BN[0][0]
__________________________________________________________________________________________________
block_7_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_7_expand[0][0]
__________________________________________________________________________________________________
block_7_expand_relu (ReLU) (None, 10, 10, 384) 0 block_7_expand_BN[0][0]
__________________________________________________________________________________________________
block_7_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_7_expand_relu[0][0]
__________________________________________________________________________________________________
block_7_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_7_depthwise[0][0]
__________________________________________________________________________________________________
block_7_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_7_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_7_project (Conv2D) (None, 10, 10, 64) 24576 block_7_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_7_project_BN (BatchNormal (None, 10, 10, 64) 256 block_7_project[0][0]
__________________________________________________________________________________________________
block_7_add (Add) (None, 10, 10, 64) 0 block_6_project_BN[0][0]
block_7_project_BN[0][0]
__________________________________________________________________________________________________
block_8_expand (Conv2D) (None, 10, 10, 384) 24576 block_7_add[0][0]
__________________________________________________________________________________________________
block_8_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_8_expand[0][0]
__________________________________________________________________________________________________
block_8_expand_relu (ReLU) (None, 10, 10, 384) 0 block_8_expand_BN[0][0]
__________________________________________________________________________________________________
block_8_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_8_expand_relu[0][0]
__________________________________________________________________________________________________
block_8_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_8_depthwise[0][0]
__________________________________________________________________________________________________
block_8_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_8_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_8_project (Conv2D) (None, 10, 10, 64) 24576 block_8_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_8_project_BN (BatchNormal (None, 10, 10, 64) 256 block_8_project[0][0]
__________________________________________________________________________________________________
block_8_add (Add) (None, 10, 10, 64) 0 block_7_add[0][0]
block_8_project_BN[0][0]
__________________________________________________________________________________________________
block_9_expand (Conv2D) (None, 10, 10, 384) 24576 block_8_add[0][0]
__________________________________________________________________________________________________
block_9_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_9_expand[0][0]
__________________________________________________________________________________________________
block_9_expand_relu (ReLU) (None, 10, 10, 384) 0 block_9_expand_BN[0][0]
__________________________________________________________________________________________________
block_9_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_9_expand_relu[0][0]
__________________________________________________________________________________________________
block_9_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_9_depthwise[0][0]
__________________________________________________________________________________________________
block_9_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_9_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_9_project (Conv2D) (None, 10, 10, 64) 24576 block_9_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_9_project_BN (BatchNormal (None, 10, 10, 64) 256 block_9_project[0][0]
__________________________________________________________________________________________________
block_9_add (Add) (None, 10, 10, 64) 0 block_8_add[0][0]
block_9_project_BN[0][0]
__________________________________________________________________________________________________
block_10_expand (Conv2D) (None, 10, 10, 384) 24576 block_9_add[0][0]
__________________________________________________________________________________________________
block_10_expand_BN (BatchNormal (None, 10, 10, 384) 1536 block_10_expand[0][0]
__________________________________________________________________________________________________
block_10_expand_relu (ReLU) (None, 10, 10, 384) 0 block_10_expand_BN[0][0]
__________________________________________________________________________________________________
block_10_depthwise (DepthwiseCo (None, 10, 10, 384) 3456 block_10_expand_relu[0][0]
__________________________________________________________________________________________________
block_10_depthwise_BN (BatchNor (None, 10, 10, 384) 1536 block_10_depthwise[0][0]
__________________________________________________________________________________________________
block_10_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_10_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_10_project (Conv2D) (None, 10, 10, 96) 36864 block_10_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_10_project_BN (BatchNorma (None, 10, 10, 96) 384 block_10_project[0][0]
__________________________________________________________________________________________________
block_11_expand (Conv2D) (None, 10, 10, 576) 55296 block_10_project_BN[0][0]
__________________________________________________________________________________________________
block_11_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_11_expand[0][0]
__________________________________________________________________________________________________
block_11_expand_relu (ReLU) (None, 10, 10, 576) 0 block_11_expand_BN[0][0]
__________________________________________________________________________________________________
block_11_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_11_expand_relu[0][0]
__________________________________________________________________________________________________
block_11_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_11_depthwise[0][0]
__________________________________________________________________________________________________
block_11_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_11_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_11_project (Conv2D) (None, 10, 10, 96) 55296 block_11_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_11_project_BN (BatchNorma (None, 10, 10, 96) 384 block_11_project[0][0]
__________________________________________________________________________________________________
block_11_add (Add) (None, 10, 10, 96) 0 block_10_project_BN[0][0]
block_11_project_BN[0][0]
__________________________________________________________________________________________________
block_12_expand (Conv2D) (None, 10, 10, 576) 55296 block_11_add[0][0]
__________________________________________________________________________________________________
block_12_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_12_expand[0][0]
__________________________________________________________________________________________________
block_12_expand_relu (ReLU) (None, 10, 10, 576) 0 block_12_expand_BN[0][0]
__________________________________________________________________________________________________
block_12_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_12_expand_relu[0][0]
__________________________________________________________________________________________________
block_12_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_12_depthwise[0][0]
__________________________________________________________________________________________________
block_12_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_12_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_12_project (Conv2D) (None, 10, 10, 96) 55296 block_12_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_12_project_BN (BatchNorma (None, 10, 10, 96) 384 block_12_project[0][0]
__________________________________________________________________________________________________
block_12_add (Add) (None, 10, 10, 96) 0 block_11_add[0][0]
block_12_project_BN[0][0]
__________________________________________________________________________________________________
block_13_expand (Conv2D) (None, 10, 10, 576) 55296 block_12_add[0][0]
__________________________________________________________________________________________________
block_13_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_13_expand[0][0]
__________________________________________________________________________________________________
block_13_expand_relu (ReLU) (None, 10, 10, 576) 0 block_13_expand_BN[0][0]
__________________________________________________________________________________________________
block_13_pad (ZeroPadding2D) (None, 11, 11, 576) 0 block_13_expand_relu[0][0]
__________________________________________________________________________________________________
block_13_depthwise (DepthwiseCo (None, 5, 5, 576) 5184 block_13_pad[0][0]
__________________________________________________________________________________________________
block_13_depthwise_BN (BatchNor (None, 5, 5, 576) 2304 block_13_depthwise[0][0]
__________________________________________________________________________________________________
block_13_depthwise_relu (ReLU) (None, 5, 5, 576) 0 block_13_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_13_project (Conv2D) (None, 5, 5, 160) 92160 block_13_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_13_project_BN (BatchNorma (None, 5, 5, 160) 640 block_13_project[0][0]
__________________________________________________________________________________________________
block_14_expand (Conv2D) (None, 5, 5, 960) 153600 block_13_project_BN[0][0]
__________________________________________________________________________________________________
block_14_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_14_expand[0][0]
__________________________________________________________________________________________________
block_14_expand_relu (ReLU) (None, 5, 5, 960) 0 block_14_expand_BN[0][0]
__________________________________________________________________________________________________
block_14_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_14_expand_relu[0][0]
__________________________________________________________________________________________________
block_14_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_14_depthwise[0][0]
__________________________________________________________________________________________________
block_14_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_14_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_14_project (Conv2D) (None, 5, 5, 160) 153600 block_14_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_14_project_BN (BatchNorma (None, 5, 5, 160) 640 block_14_project[0][0]
__________________________________________________________________________________________________
block_14_add (Add) (None, 5, 5, 160) 0 block_13_project_BN[0][0]
block_14_project_BN[0][0]
__________________________________________________________________________________________________
block_15_expand (Conv2D) (None, 5, 5, 960) 153600 block_14_add[0][0]
__________________________________________________________________________________________________
block_15_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_15_expand[0][0]
__________________________________________________________________________________________________
block_15_expand_relu (ReLU) (None, 5, 5, 960) 0 block_15_expand_BN[0][0]
__________________________________________________________________________________________________
block_15_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_15_expand_relu[0][0]
__________________________________________________________________________________________________
block_15_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_15_depthwise[0][0]
__________________________________________________________________________________________________
block_15_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_15_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_15_project (Conv2D) (None, 5, 5, 160) 153600 block_15_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_15_project_BN (BatchNorma (None, 5, 5, 160) 640 block_15_project[0][0]
__________________________________________________________________________________________________
block_15_add (Add) (None, 5, 5, 160) 0 block_14_add[0][0]
block_15_project_BN[0][0]
__________________________________________________________________________________________________
block_16_expand (Conv2D) (None, 5, 5, 960) 153600 block_15_add[0][0]
__________________________________________________________________________________________________
block_16_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_16_expand[0][0]
__________________________________________________________________________________________________
block_16_expand_relu (ReLU) (None, 5, 5, 960) 0 block_16_expand_BN[0][0]
__________________________________________________________________________________________________
block_16_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_16_expand_relu[0][0]
__________________________________________________________________________________________________
block_16_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_16_depthwise[0][0]
__________________________________________________________________________________________________
block_16_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_16_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_16_project (Conv2D) (None, 5, 5, 320) 307200 block_16_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_16_project_BN (BatchNorma (None, 5, 5, 320) 1280 block_16_project[0][0]
__________________________________________________________________________________________________
Conv_1 (Conv2D) (None, 5, 5, 1280) 409600 block_16_project_BN[0][0]
__________________________________________________________________________________________________
Conv_1_bn (BatchNormalization) (None, 5, 5, 1280) 5120 Conv_1[0][0]
__________________________________________________________________________________________________
out_relu (ReLU) (None, 5, 5, 1280) 0 Conv_1_bn[0][0]
==================================================================================================
Total params: 2,257,984
Trainable params: 0
Non-trainable params: 2,257,984
__________________________________________________________________________________________________
###Markdown
Train and test the model on test dataset
###Code
if __name__=="__main__":
initial_epochs = 1
loss0,accuracy0 = model.evaluate(val_ds)
print("Initial loss: {:.2f} %".format(100*loss0))
print("Initial accuracy: {:.2f} %".format(100*accuracy0))
checkpoint = tf.keras.callbacks.ModelCheckpoint("airbus.h5",save_weights_only=False,monitor="val_accuracy",save_best_only=True)
model.fit(train_ds,epochs=initial_epochs,validation_data=val_ds,callbacks=[checkpoint])
best = tf.keras.models.load_model("airbus.h5")
loss,accuracy = best.evaluate(test_ds)
print("\nTest accuracy: {:.2f} %".format(100*accuracy))
print("Test loss: {:.2f} %".format(100*loss))
###Output
1788/1788 [==============================] - 75s 40ms/step - loss: 1.0186 - accuracy: 0.3431
Initial loss: 101.86 %
Initial accuracy: 34.31 %
11173/11173 [==============================] - 513s 46ms/step - loss: 0.0602 - accuracy: 0.9783 - val_loss: 0.0456 - val_accuracy: 0.9837
|
notebooks/score_matching/NF_implicit.ipynb | ###Markdown
Nomalizing Flow with implicit coupling layers
###Code
!pip install --quiet --upgrade dm-haiku optax tensorflow-probability
!pip install --quiet git+https://github.com/astrodeepnet/sbi_experiments.git@ramp_bijector
%pylab inline
%load_ext autoreload
%autoreload 2
import jax
import jax.numpy as jnp
import numpy as onp
import haiku as hk
import optax
from functools import partial
from tqdm import tqdm
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
tfpk = tfp.math.psd_kernels
d=2
batch_size = 1024
from sbiexpt.distributions import get_two_moons
from sbiexpt.bijectors import ImplicitRampBijector
@jax.jit
def get_batch(seed):
two_moons = get_two_moons(sigma= 0.05)
batch = two_moons.sample(batch_size, seed=seed) / 5 + 0.45
return batch
batch = get_batch(jax.random.PRNGKey(0))
hist2d(batch[:,0], batch[:,1],100, range=[[0,1],[0,1]]); gca().set_aspect('equal');
class CustomCoupling(hk.Module):
"""This is the coupling layer used in the Flow."""
def __call__(self, x, output_units, **condition_kwargs):
# NN to get a b and c
net = hk.Linear(128)(x)
net = jax.nn.leaky_relu(net)
net = hk.Linear(128)(net)
net = jax.nn.leaky_relu(net)
log_a_bound=4
min_density_lower_bound=1e-4
log_a = jax.nn.tanh(hk.Linear(output_units)(net)) * log_a_bound
b = jax.nn.sigmoid(hk.Linear(output_units)(net))
c = min_density_lower_bound + jax.nn.sigmoid(hk.Linear(output_units)(net)) * (1 - min_density_lower_bound)
return ImplicitRampBijector(lambda x: x**5, jnp.exp(log_a),b,c)
class Flow(hk.Module):
"""A normalizing flow using the coupling layers defined
above."""
def __call__(self):
chain = tfb.Chain([
tfb.RealNVP(d//2, bijector_fn=CustomCoupling(name = 'b1')),
tfb.Permute([1,0]),
tfb.RealNVP(d//2, bijector_fn=CustomCoupling(name = 'b2')),
tfb.Permute([1,0]),
tfb.RealNVP(d//2, bijector_fn=CustomCoupling(name = 'b3')),
tfb.Permute([1,0]),
tfb.RealNVP(d//2, bijector_fn=CustomCoupling(name = 'b4')),
tfb.Permute([1,0]),
])
nvp = tfd.TransformedDistribution(
tfd.Independent(tfd.TruncatedNormal(0.5*jnp.ones(d),
0.3*jnp.ones(d),
0.01,0.99),
reinterpreted_batch_ndims=1),
bijector=chain)
return nvp
model_NF = hk.without_apply_rng(hk.transform(lambda x : Flow()().log_prob(x)))
model_inv = hk.without_apply_rng(hk.transform(lambda x : Flow()().bijector.inverse(x)))
model_sample = hk.without_apply_rng(hk.transform(lambda : Flow()().sample(1024, seed=next(rng_seq))))
rng_seq = hk.PRNGSequence(12)
params = model_NF.init(next(rng_seq), jnp.zeros([1,d]))
# TO DO
@jax.jit
def loss_fn(params, batch):
log_prob = model_NF.apply(params, batch)
return -jnp.mean(log_prob)
@jax.jit
def update(params, opt_state, batch):
"""Single SGD update step."""
loss, grads = jax.value_and_grad(loss_fn)(params, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return loss, new_params, new_opt_state
learning_rate=0.0002
optimizer = optax.adam(learning_rate)
opt_state = optimizer.init(params)
losses = []
master_seed = hk.PRNGSequence(0)
for step in tqdm(range(5000)):
batch = get_batch(next(master_seed))
l, params, opt_state = update(params, opt_state, batch)
losses.append(l)
plot(losses[25:])
x = jnp.stack(jnp.meshgrid(jnp.linspace(0.1,0.9,128),
jnp.linspace(0.1,0.9,128)),-1)
im = model_NF.apply(params, x.reshape(-1,2)).reshape([128,128])
contourf(x[...,0],x[...,1],jnp.exp(im),100); colorbar()
hist2d(batch[:,0], batch[:,1],100, range=[[0,1],[0,1]]);gca().set_aspect('equal');
x = model_inv.apply(params, batch)
hist2d(x[:,0], x[:,1],100, range=[[0,1],[0,1]]);gca().set_aspect('equal');
x = model_sample.apply(params)
hist2d(x[:,0], x[:,1],100, range=[[0,1],[0,1]]);gca().set_aspect('equal');
coupl = hk.without_apply_rng(hk.transform(lambda x: CustomCoupling(name = 'b2')(x,1)))
predicate = lambda module_name, name, value: 'flow/b2' in module_name
params_b1 = hk.data_structures.filter(predicate, params)
params_b1=hk.data_structures.to_mutable_dict(params_b1)
params_b1={k.split('flow/')[1]:params_b1[k] for k in params_b1.keys()}
t = jnp.linspace(0,1)
bij = coupl.apply(params_b1, t.reshape([50,1]))
plot(t,bij(t)[30])
inv = bij.inverse(1*bij(t))
plot(t,inv.T)
plot(bij.forward_log_det_jacobian(t.reshape([50,1])))
plot(bij.inverse_log_det_jacobian(t.reshape([50,1])))
###Output
_____no_output_____ |
CEP 2/Solution_Deep Q Learning Stock Trading_CEP_2.ipynb | ###Markdown
PG AI - Reinforcement Learning **Problem Statement** Prepare an agent by implementing Deep Q-Learning that can perform unsupervised trading in stock trade. The aim of this project is to train an agent that uses Q-learning and neural networks to predict the profit or loss by building a model and implementing it on a dataset that is available for evaluation.The stock trading environment provides the agent with a set of actions:* Buy* Sell* SitThis project has following sections:* Import the libraries * Create a DQN agent* Preprocess the data* Train and build the model* Evaluate the model and agent**Steps to perform**In the section **create a DQN agent**, create a class called agent where:* Action size is defined as 3* Experience replay memory to deque is 1000* Empty list for stocks that has already been bought* The agent must possess the following hyperparameters: * gamma= 0.95 * epsilon = 1.0 * epsilon_final = 0.01 * epsilon_decay = 0.995 Note: It is advised to compare the results using different values in hyperparameters.* Neural network has 3 hidden layers* Action and experience replay are defined **Solution** **Import the libraries**
###Code
##import keras
#from keras.models import Sequential
##from keras.models import load_model
#from keras.layers import Dense
#from keras.optimizers import Adam
#import numpy as np
#import random
#from collections import deque
import random
import gym
import numpy as np
from collections import deque
#from keras import backend as K
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
import tensorflow.keras
#from keras.models import Sequential
from tensorflow.keras.models import load_model
###Output
_____no_output_____
###Markdown
**Create a DQN agent**
###Code
#Action space include 3 actions: Buy, Sell, and Sit
#Setting up the experience replay memory to deque with 1000 elements inside it
#Empty list with inventory is created that contains the stocks that were already bought
#Setting up gamma to 0.95, that helps to maximize the current reward over the long-term
#Epsilon parameter determines whether to use a random action or to use the model for the action.
#In the beginning random actions are encouraged, hence epsilon is set up to 1.0 when the model is not trained.
#And over time the epsilon is reduced to 0.01 in order to decrease the random actions and use the trained model
#We're then set the speed of decreasing epsililon in the epsilon_decay parameter
class Agent:
def __init__(self, state_size, is_eval=False, model_name=""):
self.state_size = state_size # normalized previous days
self.action_size = 3 # sit, buy, sell
self.memory = deque(maxlen=1000)
self.inventory = []
self.model_name = model_name
self.is_eval = is_eval
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.model = load_model("" + model_name) if is_eval else self._model()
#Defining our neural network:
#Define the neural network function called _model and it just takes the keyword self
#Define the model with Sequential()
#Define states i.e. the previous n days and stock prices of the days
#Defining 3 hidden layers in this network
#Changing the activation function to relu because mean-squared error is used for the loss
def _model(self):
model = Sequential()
model.add(Dense(units=64, input_dim=self.state_size, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="relu"))
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=0.001))
return model
def act(self, state):
if not self.is_eval and np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
options = self.model.predict(state)
return np.argmax(options[0])
def expReplay(self, batch_size):
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size + 1, l):
mini_batch.append(self.memory[i])
for state, action, reward, next_state, done in mini_batch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size + 1, l):
mini_batch.append(self.memory[i])
for state, action, reward, next_state, done in mini_batch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
###Output
_____no_output_____
###Markdown
**Preprocess the stock market data**
###Code
import math
# prints formatted price
def formatPrice(n):
return ("-$" if n < 0 else "$") + "{0:.2f}".format(abs(n))
# returns the vector containing stock data from a fixed file
def getStockDataVec(key):
vec = []
lines = open("" + key + ".csv", "r").read().splitlines()
for line in lines[1:]:
vec.append(float(line.split(",")[4]))
return vec
# returns the sigmoid
def sigmoid(x):
return 1 / (1 + math.exp(-x))
# returns an an n-day state representation ending at time t
def getState(data, t, n):
d = t - n + 1
block = data[d:t + 1] if d >= 0 else -d * [data[0]] + data[0:t + 1] # pad with t0
res = []
for i in range(n - 1):
res.append(sigmoid(block[i + 1] - block[i]))
return np.array([res])
###Output
_____no_output_____
###Markdown
**Train and build the model**
###Code
if len(sys.argv) != 4:
print ("Usage: python train.py [stock] [window] [episodes]")
exit()
#stock_name = input("Enter stock_name, window_size, Episode_count")
#window_size = input()
#episode_count = input()
stock_name = "GSPC_Training_Dataset"
window_size = 10
episode_count = 1
#Fill the given information when prompted:
#Enter stock_name = GSPC_Training_Dataset
#window_size = 10
#Episode_count = 100 or it can be 10 or 20 or 30 and so on.
agent = Agent(window_size)
data = getStockDataVec(stock_name)
l = len(data) - 1
batch_size = 32
for e in range(episode_count + 1):
print ("Episode " + str(e) + "/" + str(episode_count))
state = getState(data, 0, window_size + 1)
total_profit = 0
agent.inventory = []
for t in range(l):
action = agent.act(state)
# sit
next_state = getState(data, t + 1, window_size + 1)
reward = 0
if action == 1: # buy
agent.inventory.append(data[t])
print ("Buy: " + formatPrice(data[t]))
elif action == 2 and len(agent.inventory) > 0: # sell
bought_price = agent.inventory.pop(0)
reward = max(data[t] - bought_price, 0)
total_profit += data[t] - bought_price
print ("Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price))
done = True if t == l - 1 else False
agent.memory.append((state, action, reward, next_state, done))
state = next_state
if done:
print ("--------------------------------")
print ("Total Profit: " + formatPrice(total_profit))
if len(agent.memory) > batch_size:
agent.expReplay(batch_size)
#if e % 10 == 0:
agent.model.save("model_ep" + str(e))
###Output
Usage: python train.py [stock] [window] [episodes]
Episode 0/1
WARNING:tensorflow:From C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\training\tracking\tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
WARNING:tensorflow:From C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\training\tracking\tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1333.34
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1298.35
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1295.86 | Profit: -$37.48
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1300.80 | Profit: $2.45
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1326.65
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1329.47 | Profit: $2.82
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1354.95
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1364.17
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1373.73 | Profit: $18.78
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1366.01
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1349.47
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1340.89
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1332.53
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1330.31 | Profit: -$33.86
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1318.80 | Profit: -$47.21
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1315.92 | Profit: -$33.55
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1326.61
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1301.53 | Profit: -$39.36
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1278.94
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1255.27 | Profit: -$77.26
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1245.86
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1267.65
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1257.94 | Profit: -$68.67
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1239.94 | Profit: -$39.00
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1234.18 | Profit: -$11.68
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1253.80 | Profit: -$13.85
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1166.71
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1173.56 | Profit: $6.85
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1150.53
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1170.81
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1142.62 | Profit: -$7.91
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1122.14
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1117.58 | Profit: -$53.23
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1139.83 | Profit: $17.69
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1103.25
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1137.59
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1168.38 | Profit: $65.13
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1191.81 | Profit: $54.22
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1238.16
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1209.47
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1228.75
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1234.52 | Profit: -$3.64
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1253.05 | Profit: $43.58
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1249.46 | Profit: $20.71
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1267.43
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1248.58
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1266.61 | Profit: -$0.82
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1263.51
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1261.20
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1255.18 | Profit: $6.60
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1245.67 | Profit: -$17.84
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1248.92 | Profit: -$12.28
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1255.82
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1267.11
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1283.57 | Profit: $27.75
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1270.03
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1276.96 | Profit: $9.85
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1264.96
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1254.39 | Profit: -$15.64
INFO:tensorflow:Assets written to: model_ep0\assets
Buy: $1255.85
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1241.60 | Profit: -$23.36
INFO:tensorflow:Assets written to: model_ep0\assets
Sell: $1219.87 | Profit: -$35.98
INFO:tensorflow:Assets written to: model_ep0\assets
INFO:tensorflow:Assets written to: model_ep0\assets
###Markdown
**Evaluate the model and agent**
###Code
if len(sys.argv) != 3:
print ("Usage: python evaluate.py [stock] [model]")
exit()
stock_name = "GSPC_Evaluation_Dataset"
#model_name = r"C:\Users\saaim\Jupyter\PGP-AI---RL\CEP 2-20201025T144252Z-001\CEP 2\model_ep2\saved_model.pb"
#Note:
#Fill the given information when prompted:
#Enter stock_name = GSPC_Evaluation_Dataset
#Model_name = respective model name
for i in range(episode_count):
model_name = r"model_ep" + str(i)
print ("\n--------------------------------\n" + model_name + "\n--------------------------------\n")
model = tf.keras.models.load_model(model_name)
window_size = model.layers[0].input.shape.as_list()[1]
agent = Agent(window_size, True, model_name)
data = getStockDataVec(stock_name)
l = len(data) - 1
batch_size = 32
state = getState(data, 0, window_size + 1)
total_profit = 0
agent.inventory = []
for t in range(l):
action = agent.act(state)
# sit
next_state = getState(data, t + 1, window_size + 1)
reward = 0
if action == 1: # buy
agent.inventory.append(data[t])
print ("Buy: " + formatPrice(data[t]))
elif action == 2 and len(agent.inventory) > 0: # sell
bought_price = agent.inventory.pop(0)
reward = max(data[t] - bought_price, 0)
total_profit += data[t] - bought_price
print ("Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price))
done = True if t == l - 1 else False
agent.memory.append((state, action, reward, next_state, done))
state = next_state
if done:
print (stock_name + " Total Profit: " + formatPrice(total_profit))
print ("--------------------------------")
###Output
_____no_output_____ |
research/notebook/DetectSuddenChanges-paper.ipynb | ###Markdown
Soil moisture level shift detection brainstorm* Reliable soil moisture sensor * cluster* Interesting in moisture changes level instead of level itself* Noise enviroment * apply moving average * change level threshold * moisture probes might not be installed in the same way * worm might be near to the probes * root might be near to the probes * noise is natural in analogic devices* Monitoring sudden soil moisture sensors changes caused by: * rain (negative change level) * irrigation (negative change level) * hard sun (positive change level) * manual interventions (positive change level) * system malfuction (positive or negative change level)* Linear regression * know moisture trends: positive alpha is expected * score evaluates alpha* consider multiples sensors * consensus/voting? * sensor might not be sync * delays between change levels * time proximity referencies* https://centre-borelli.github.io/ruptures-docs/* https://techrando.com/2019/08/14/a-brief-introduction-to-change-point-detection-using-python/* https://charles.doffy.net/files/sp-review-2020.pdf* http://www.laurentoudre.fr/publis/TOG-SP-19.pdf Test location* LATITUDE: -22.019989* LONGITUDE: -47.312531 Hardware Server Side* Raspberry Pi 3B Client Side* **ESP8266*** **74hc4051N**: 8-channel analog multiplexers/demultiplexers* **Capacitive Soil Moisture Sensor** Data flow* Every 30 seconds ESP8266 collects and sends sensors data to the Raspberry Pi* Communication between ESP8266 and Raspberry Pi happens over a private wifi network, using MQTT protocol.
###Code
# CONSTANTS
MOISTURES_PROBES = ['MUX0','MUX1','MUX2','MUX3','MUX4','MUX5','MUX6','MUX7']
ROLLING_WINDOW = 30 # RUPTURE_LEVEL_THRESHOLD and PCT_CHANGE_PERIOD are affected by this value
RUPTURE_LEVEL_THRESHOLD = 0.015
PCT_CHANGE_PERIOD = 10 # RUPTURE_LEVEL_THRESHOLD is affected by this value
# UTILS
def plot(series):
plt.rcParams['figure.figsize'] = [15,7]
plt.rcParams['timezone'] = 'America/Sao_Paulo'
fig,ax = plt.subplots()
x = mdate.epoch2num(series.index)
fmt = mdate.DateFormatter('%y-%m-%d %H:%M')
ax.xaxis.set_major_formatter(fmt)
plt.xticks( rotation=25 )
plt.plot_date(x, series, linestyle='solid', marker='None')
plt.legend(MOISTURES_PROBES)
plt.show()
# RAW DATA FROM 2020-12-26 15:30 TO 2020-12-26 17:30
df = pandas.read_pickle('./detect_sudden_change_dataset.pkl')
plot(df)
df.describe()
# Filtering the noise ...
dfr = df.rolling(ROLLING_WINDOW).mean().dropna()
plot(dfr)
#Percentage change between the current and a prior element
# Finding negative or positive slopes ...
# percent change over given number of period.
pct_change_series = dfr.pct_change(periods=PCT_CHANGE_PERIOD).dropna()
plot(pct_change_series)
def detect_rupture(data):
#Percentage change between the current and a prior element
# Finding negative or positive slopes ...
# percent change over given number of period.
pct_change_series = data.pct_change(periods=PCT_CHANGE_PERIOD).dropna()
### Gathering data
ruptures={}
min_probes={}
max_probes={}
for mux in MOISTURES_PROBES:
min_entry={}
min_entry['epoch'] = pct_change_series[mux].idxmin()
min_entry['timestamp'] = datetime.datetime.fromtimestamp(min_entry['epoch']).strftime('%Y-%m-%d %H:%M:%S')
min_entry['value'] = pct_change_series[mux][min_entry['epoch']]
if min_entry['value'] < -RUPTURE_LEVEL_THRESHOLD:
min_probes[mux] = min_entry
max_entry={}
max_entry['epoch'] = pct_change_series[mux].idxmax()
max_entry['timestamp'] = datetime.datetime.fromtimestamp(max_entry['epoch']).strftime('%Y-%m-%d %H:%M:%S')
max_entry['value'] = pct_change_series[mux][max_entry['epoch']]
if max_entry['value'] > RUPTURE_LEVEL_THRESHOLD:
max_probes[mux] = max_entry
ruptures['downward'] = pandas.DataFrame(data=min_probes).T
ruptures['upward'] = pandas.DataFrame(data=max_probes).T
return ruptures
ruptures = detect_rupture(dfr)
ruptures['downward']
ruptures['upward']
###Output
_____no_output_____
###Markdown
DARK SKY forecast weather log (2020-12-26)URL: https://api.darksky.net/forecast/[key]/-22.019989,-47.312531,1609007400?units=si&lang=pt&exclude=currently,flags,dailyhttps://api.darksky.net/forecast/[key]/-22.019989,-47.312531,1609007400?units=si&lang=pt&exclude=currently,flags,daily
###Code
hourly_forecast = pandas.read_pickle('./detect_sudden_change_darksky_dataset.pkl')
start_time = 1608998400 # 1PM
#rupture estimate time: 1609010280 ~4PM
end_time = 1609020000 # 7PM
hourly_forecast[(hourly_forecast.index > start_time) & (hourly_forecast.index < end_time)]
###Output
_____no_output_____ |
energy.ipynb | ###Markdown
[← Back to Index](index.html) Energy and RMSE The **energy** ([Wikipedia](https://en.wikipedia.org/wiki/Energy_(signal_processing%29); FMP, p. 66) of a signal corresponds to the total magntiude of the signal. For audio signals, that roughly corresponds to how loud the signal is. The energy in a signal is defined as$$ \sum_n \left| x(n) \right|^2 $$ The **root-mean-square energy (RMSE)** in a signal is defined as$$ \sqrt{ \frac{1}{N} \sum_n \left| x(n) \right|^2 } $$ Let's load a signal:
###Code
x, sr = librosa.load('audio/simple_loop.wav')
sr
x.shape
librosa.get_duration(x, sr)
###Output
_____no_output_____
###Markdown
Listen to the signal:
###Code
ipd.Audio(x, rate=sr)
###Output
_____no_output_____
###Markdown
Plot the signal:
###Code
librosa.display.waveplot(x, sr=sr)
###Output
_____no_output_____
###Markdown
Compute the short-time energy using a list comprehension:
###Code
hop_length = 256
frame_length = 512
energy = numpy.array([
sum(abs(x[i:i+frame_length]**2))
for i in range(0, len(x), hop_length)
])
energy.shape
###Output
_____no_output_____
###Markdown
Compute the RMSE using [`librosa.feature.rmse`](https://librosa.github.io/librosa/generated/librosa.feature.rmse.html):
###Code
rmse = librosa.feature.rmse(x, frame_length=frame_length, hop_length=hop_length, center=True)
rmse.shape
rmse = rmse[0]
###Output
_____no_output_____
###Markdown
Plot both the energy and RMSE along with the waveform:
###Code
frames = range(len(energy))
t = librosa.frames_to_time(frames, sr=sr, hop_length=hop_length)
librosa.display.waveplot(x, sr=sr, alpha=0.4)
plt.plot(t, energy/energy.max(), 'r--') # normalized for visualization
plt.plot(t[:len(rmse)], rmse/rmse.max(), color='g') # normalized for visualization
plt.legend(('Energy', 'RMSE'))
###Output
_____no_output_____
###Markdown
Questions Write a function, `strip`, that removes leading silence from a signal. Make sure it works for a variety of signals recorded in different environments and with different signal-to-noise ratios (SNR).
###Code
def strip(x, frame_length, hop_length):
# Compute RMSE.
rmse = librosa.feature.rmse(x, frame_length=frame_length, hop_length=hop_length, center=True)
# Identify the first frame index where RMSE exceeds a threshold.
thresh = 0.01
frame_index = 0
while rmse[0][frame_index] < thresh:
frame_index += 1
# Convert units of frames to samples.
start_sample_index = librosa.frames_to_samples(frame_index, hop_length=hop_length)
# Return the trimmed signal.
return x[start_sample_index:]
###Output
_____no_output_____
###Markdown
Let's see if it works.
###Code
y = strip(x, frame_length, hop_length)
ipd.Audio(y, rate=sr)
librosa.display.waveplot(y, sr=sr)
###Output
_____no_output_____
###Markdown
[← Back to Index](index.html) Energy and RMSE The **energy** ([Wikipedia](https://en.wikipedia.org/wiki/Energy_(signal_processing%29); FMP, p. 66) of a signal corresponds to the total magntiude of the signal. For audio signals, that roughly corresponds to how loud the signal is. The energy in a signal is defined as$$ \sum_n \left| x(n) \right|^2 $$ The **root-mean-square energy (RMSE)** in a signal is defined as$$ \sqrt{ \frac{1}{N} \sum_n \left| x(n) \right|^2 } $$ Let's load a signal:
###Code
x, sr = librosa.load('audio/simple_loop.wav')
sr
x.shape
librosa.get_duration(x, sr)
###Output
_____no_output_____
###Markdown
Listen to the signal:
###Code
ipd.Audio(x, rate=sr)
###Output
_____no_output_____
###Markdown
Plot the signal:
###Code
librosa.display.waveplot(x, sr=sr)
###Output
_____no_output_____
###Markdown
Compute the short-time energy using a list comprehension:
###Code
hop_length = 256
frame_length = 512
energy = numpy.array([
sum(abs(x[i:i+frame_length]**2))
for i in range(0, len(x), hop_length)
])
energy.shape
plt.plot(energy)
###Output
_____no_output_____
###Markdown
Compute the RMSE using [`librosa.feature.rmse`](https://librosa.github.io/librosa/generated/librosa.feature.rmse.html):
###Code
rmse = librosa.feature.rms(x, frame_length=frame_length, hop_length=hop_length, center=True)
rmse.shape
rmse = rmse[0]
plt.plot(rmse)
###Output
_____no_output_____
###Markdown
Plot both the energy and RMSE along with the waveform:
###Code
frames = range(len(energy))
t = librosa.frames_to_time(frames, sr=sr, hop_length=hop_length)
len(energy), len(frames), len(t)
librosa.display.waveplot(x, sr=sr, alpha=0.4)
plt.plot(t, energy/energy.max(), 'r--') # normalized for visualization
plt.plot(t[:len(rmse)], rmse/rmse.max(), color='g') # normalized for visualization
plt.legend(('Energy', 'RMSE'))
###Output
_____no_output_____
###Markdown
Questions Write a function, `strip`, that removes leading silence from a signal. Make sure it works for a variety of signals recorded in different environments and with different signal-to-noise ratios (SNR).
###Code
def strip(x, frame_length, hop_length):
# Compute RMSE.
rmse = librosa.feature.rms(x, frame_length=frame_length, hop_length=hop_length, center=True)
# Identify the first frame index where RMSE exceeds a threshold.
thresh = 0.01
frame_index = 0
while rmse[0][frame_index] < thresh:
frame_index += 1
# Convert units of frames to samples.
start_sample_index = librosa.frames_to_samples(frame_index, hop_length=hop_length)
# Return the trimmed signal.
return x[start_sample_index:]
###Output
_____no_output_____
###Markdown
Let's see if it works.
###Code
y = strip(x, frame_length, hop_length)
len(x), len(y)
ipd.Audio(y, rate=sr)
librosa.display.waveplot(y, sr=sr)
###Output
_____no_output_____
###Markdown
Smart Infrastructure Energy Managment
###Code
import numpy
import pandas
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import RFE
from sklearn.ensemble import ExtraTreesRegressor
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
from sklearn.model_selection import cross_validate
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils
from sklearn.model_selection import StratifiedKFold
from keras.constraints import maxnorm
#from sklearn.metrics import explained_variance_score
#from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
dataframe = pandas.read_csv("energy_dataset.csv")
# Assign names to Columns
dataframe.columns = ['relative_compactness', 'surface_area', 'wall_area', 'roof_area', 'overall_height', 'orientation', 'glazing_area', 'glazing_area_distribution', 'heating_load', 'cooling_load']
print("Head:", dataframe.head())
print("Statistical Description:", dataframe.describe())
print("Shape:", dataframe.shape)
print("Data Types:", dataframe.dtypes)
###Output
Data Types: relative_compactness float64
surface_area float64
wall_area float64
roof_area float64
overall_height float64
orientation int64
glazing_area float64
glazing_area_distribution int64
heating_load float64
cooling_load float64
dtype: object
###Markdown
'overall_height' has the highest correlation with 'heating_load' and 'cooling_load' (which is a positive correlation), followed by 'roof_area' for both outputs which is a negative correlation, 'orientation' has the least correlation
###Code
dataset = dataframe.values
X = dataset[:,0:8]
Y = dataset[:,8]
Y2 = dataset[:,9]
#Feature Selection
model = ExtraTreesRegressor()
rfe = RFE(model, 3)
fit = rfe.fit(X, Y)
print("Number of Features: ", fit.n_features_)
print("Selected Features: ", fit.support_)
print("Feature Ranking: ", fit.ranking_)
###Output
Number of Features: 3
Selected Features: [False True False True True False False False]
Feature Ranking: [4 1 3 1 1 6 2 5]
###Markdown
'wall_area', 'roof_area' and 'overall_height' were top 3 selected features/feature combination for predicting 'heating_load' using Recursive Feature Elimination, the 2nd and 3rd selected features were atually among the attributes with the highest correlation with the 'heating_load'
###Code
#Feature Selection
model = ExtraTreesRegressor()
rfe = RFE(model, 3)
fit = rfe.fit(X, Y2)
print("Number of Features: ", fit.n_features_)
print("Selected Features: ", fit.support_)
print("Feature Ranking: ", fit.ranking_)
###Output
Number of Features: 3
Selected Features: [ True False True False True False False False]
Feature Ranking: [1 3 1 6 1 5 2 4]
###Markdown
'wall_area', 'glazing_area' and 'overall_height' were top 3 selected features/feature combination for predicting 'cooling_load'using Recursive Feature Elimination
###Code
plt.hist((dataframe.heating_load))
plt.hist((dataframe.cooling_load))
plt.hist((dataframe.heating_load))
plt.hist((dataframe.cooling_load))
###Output
_____no_output_____
###Markdown
Most of the dataset's samples fall between 10 and 20 of both 'heating_load' and 'cooling_load' regressional output classes, with a positive skew
###Code
dataframe.plot(kind='density', subplots=True, layout=(3,4), sharex=False, sharey=False)
###Output
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:298: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:298: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:304: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:304: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:298: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:298: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:304: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
C:\Users\Arnav\Anaconda3\envs\gpu\lib\site-packages\pandas\plotting\_matplotlib\tools.py:304: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
###Markdown
Majority of the features have a positive skew except for a few, 'oreintation' and 'overall_height' have quite even distribution
###Code
axes = plt.subplots(nrows=2, ncols=3, figsize=(6, 6))
dataframe.plot(kind='box', subplots=True, layout=(3,4), sharex=False, sharey=False)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(dataframe.corr(), vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0,9,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(dataframe.columns)
ax.set_yticklabels(dataframe.columns)
###Output
_____no_output_____
###Markdown
'overall_height' has the highest positive corelation as expected
###Code
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
num_instances = len(X)
models = []
models.append(('LiR', LinearRegression()))
models.append(('Ridge', Ridge()))
models.append(('Lasso', Lasso()))
models.append(('ElasticNet', ElasticNet()))
models.append(('Bag_Re', BaggingRegressor()))
models.append(('RandomForest', RandomForestRegressor()))
models.append(('ExtraTreesRegressor', ExtraTreesRegressor()))
models.append(('KNN', KNeighborsRegressor()))
models.append(('CART', DecisionTreeRegressor()))
models.append(('SVM', SVR()))
# Evaluations
results = []
names = []
scoring = []
for name, model in models:
# Fit the model
model.fit(X, Y)
predictions = model.predict(X)
# Evaluate the model
kfold = KFold(n_splits=10)
cv_results = cross_val_score(model, X, Y, cv=10)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
#boxplot algorithm Comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
###Output
_____no_output_____
###Markdown
'ExtraTrees Regressor' and 'Random Forest' are the best estimators/models for 'heating_load'
###Code
# Evaluations
results = []
names = []
scoring = []
for name, model in models:
# Fit the model
model.fit(X, Y2)
predictions = model.predict(X)
# Evaluate the model
kfold = KFold(n_splits=10)
cv_results = cross_val_score(model, X, Y2, cv=10)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
#boxplot algorithm Comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
###Output
_____no_output_____
###Markdown
And 'Random Forest' and 'Bagging Regressor' are the best estimators/models for 'cooling_load', they can be further explored and their hyperparameters tuned
###Code
# Define 10-fold Cross Valdation Test Harness
kfold = KFold(n_splits=5, shuffle=True, random_state=seed)
cvscores = []
for train ,test in kfold.split(X,Y,groups=None):
# create model
model = Sequential()
model.add(Dense(15, input_dim=8, init='uniform', activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(8, init='uniform', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(5, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='relu'))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='sgd')
# Fit the model
model.fit(X[train], Y[train], epochs=300, batch_size=10, verbose=0)
# Evaluate the model
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % ("score", 100-scores))
cvscores.append(100-scores)
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
# Define 10-fold Cross Valdation Test Harness
kfold = KFold(n_splits=3, shuffle=True, random_state=seed)
cvscores = []
for train, test in kfold.split(X, Y2):
# create model
model = Sequential()
model.add(Dense(15, input_dim=8, init='uniform', activation='relu'))
#model.add(Dropout(0.2))
#model.add(Dense(8, init='uniform', activation='relu', kernel_constraint=maxnorm(3)))
#model.add(Dropout(0.2))
model.add(Dense(5, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='relu'))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='sgd')
# Fit the model
model.fit(X[train], Y2[train], epochs=300, batch_size=10, verbose=0)
# Evaluate the model
scores = model.evaluate(X[test], Y2[test], verbose=0)
print("%s: %.2f%%" % ("score", 100-scores))
cvscores.append(100-scores)
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
cvscores
###Output
_____no_output_____
###Markdown
[← Back to Index](index.html) Energy and RMSE The **energy** ([Wikipedia](https://en.wikipedia.org/wiki/Energy_(signal_processing%29); FMP, p. 66) of a signal corresponds to the total magntiude of the signal. For audio signals, that roughly corresponds to how loud the signal is. The energy in a signal is defined as$$ \sum_n \left| x(n) \right|^2 $$ The **root-mean-square energy (RMSE)** in a signal is defined as$$ \sqrt{ \frac{1}{N} \sum_n \left| x(n) \right|^2 } $$ Let's load a signal:
###Code
x, sr = librosa.load('audio/simple_loop.wav')
###Output
_____no_output_____
###Markdown
Listen to the signal:
###Code
ipd.Audio(x, rate=sr)
###Output
_____no_output_____
###Markdown
Plot the signal:
###Code
librosa.display.waveplot(x, sr=sr)
###Output
_____no_output_____
###Markdown
Compute the short-time energy using a list comprehension:
###Code
hop_length = 256
frame_length = 1024
energy = numpy.array([
sum(abs(x[i:i+frame_length]**2))
for i in range(0, len(x), hop_length)
])
energy.shape
###Output
_____no_output_____
###Markdown
Compute the RMSE using [`librosa.feature.rmse`](https://librosa.github.io/librosa/generated/librosa.feature.rmse.html):
###Code
rmse = librosa.feature.rmse(x, frame_length=frame_length, hop_length=hop_length)[0]
rmse.shape
###Output
_____no_output_____
###Markdown
Plot both the energy and RMSE along with the waveform:
###Code
frames = range(len(energy))
t = librosa.frames_to_time(frames, sr=sr, hop_length=hop_length)
librosa.display.waveplot(x, sr=sr, alpha=0.4)
plt.plot(t, energy/energy.max(), 'r--') # normalized for visualization
plt.plot(t[:len(rmse)], rmse/rmse.max(), color='g') # normalized for visualization
plt.legend(('Energy', 'RMSE'))
###Output
_____no_output_____ |
Anomaly_Detection_Example.ipynb | ###Markdown
Anomaly Detection Example “Outliers are not necessarily a bad thing. These are just observations that are not following the same pattern as the other ones. But it can be the case that an outlier is very interesting. For example, if in a biological experiment, a rat is not dead whereas all others are, then it would be very interesting to understand why. This could lead to new scientific discoveries. So, it is important to detect outliers.” – Pierre Lafaye de Micheaux, Author and StatisticianThe following example was inspired by this example.It uses a special Python toolkit dedicated to Outliers Detection called PyOD, additional info are here. PyOD is a comprehensive and scalable Python toolkit for detecting outlying objects in multivariate data. This exciting yet challenging field is commonly referred as Outlier Detection or Anomaly Detection.
###Code
#install the needed toolkit
!pip install pyod
#import std packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# Import models from PyOD
from pyod.models.abod import ABOD
from pyod.models.cblof import CBLOF
from pyod.models.hbos import HBOS
from pyod.models.iforest import IForest
from pyod.models.knn import KNN
from pyod.models.lof import LOF
#Import data-generation tool from PyOD
from pyod.utils.data import generate_data, get_outliers_inliers
###Output
_____no_output_____
###Markdown
Setup
###Code
random_state = np.random.RandomState(3)
outliers_fraction = 0.1
# Define six outlier detection tools to be compared
#
classifiers = {
'Angle-based Outlier Detector (ABOD)': ABOD(contamination=outliers_fraction),
'Histogram-base Outlier Detection (HBOS)': HBOS(contamination=outliers_fraction),
'Cluster-based Local Outlier Factor (CBLOF)':CBLOF(contamination=outliers_fraction,check_estimator=False, random_state=random_state),
'Isolation Forest': IForest(contamination=outliers_fraction,random_state=random_state),
'K Nearest Neighbors (KNN)': KNN(contamination=outliers_fraction),
'Average KNN': KNN(method='mean',contamination=outliers_fraction)
}
###Output
_____no_output_____
###Markdown
Data gathering and visualization
###Code
#generate random data with two features
X_train, Y_train,X_test, Y_test = generate_data(n_train=500,n_test=200, n_features=2,random_state=3,contamination=outliers_fraction)
# store outliers and inliers in different numpy arrays
x_outliers, x_inliers = get_outliers_inliers(X_train,Y_train)
xt_outliers, xt_inliers = get_outliers_inliers(X_test,Y_test)
n_inliers = len(x_inliers)
n_outliers = len(x_outliers)
#separate the two features and use it to plot the data
F1 = X_train[:,[0]].reshape(-1,1)
F2 = X_train[:,[1]].reshape(-1,1)
# create a meshgrid
xx , yy = np.meshgrid(np.linspace(-10, 10, 200), np.linspace(-10, 10, 200))
# scatter plot
plt.figure(figsize=[15,9])
plt.scatter(x_outliers[:,0],x_outliers[:,1],c='black',edgecolor='k',label='Outliers')
plt.scatter(x_inliers[:,0],x_inliers[:,1],c='white',edgecolor='k',label='Inliers')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Train different models evaluate and visualize results
###Code
#set the figure size
plt.figure(figsize=(19, 20))
dfx = pd.DataFrame(X_train)
dfx['y'] = Y_train
for i, (clf_name,clf) in enumerate(classifiers.items()) :
# fit the dataset to the model
clf.fit(X_train)
# predict raw anomaly score
scores_pred = clf.decision_function(X_train)*-1
# prediction of a datapoint category outlier or inlier
y_pred = clf.predict(X_train)
# no of errors in prediction
n_errors = (y_pred != Y_train).sum()
dfx['outlier'] = y_pred.tolist()
# IX1 - inlier feature 1, IX2 - inlier feature 2
IX1 = np.array(dfx[0][dfx['outlier'] == 0]).reshape(-1,1)
IX2 = np.array(dfx[1][dfx['outlier'] == 0]).reshape(-1,1)
# OX1 - outlier feature 1, OX2 - outlier feature 2
OX1 = dfx[0][dfx['outlier'] == 1].values.reshape(-1,1)
OX2 = dfx[1][dfx['outlier'] == 1].values.reshape(-1,1)
# True - outlier feature 1, OX2 - outlier feature 2
TX1 = dfx[0][dfx['y'] == 1].values.reshape(-1,1)
TX2 = dfx[1][dfx['y'] == 1].values.reshape(-1,1)
text ='No of mis-detected outliers : '+clf_name+" "+str(n_errors)
if(n_errors==0):
text ="\033[1m"+"\033[91m"+'No of mis-detected outliers : '+clf_name+" "+str(n_errors)+"\033[0m"
print(text)
# rest of the code is to create the visualization
# threshold value to consider a datapoint inlier or outlier
threshold = stats.scoreatpercentile(scores_pred,100 *outliers_fraction)
# decision function calculates the raw anomaly score for every point
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) * -1
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 3, i + 1)
# fill blue colormap from minimum anomaly score to threshold value
subplot.contourf(xx, yy, Z, levels = np.linspace(Z.min(), threshold, 10),cmap=plt.cm.Blues_r)
# draw red contour line where anomaly score is equal to threshold
a = subplot.contour(xx, yy, Z, levels=[threshold],linewidths=2, colors='red')
# fill orange contour lines where range of anomaly score is from threshold to maximum anomaly score
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],colors='orange')
# scatter plot of inliers with white dots
b = subplot.scatter(IX1,IX2, c='white',s=100, edgecolor='k')
# scatter plot of detected outliers with black dots
c = subplot.scatter(OX1,OX2, c='black',s=100, edgecolor='k')
# scatter plot of true outliers with red dots
d = subplot.scatter(x_outliers[:,0],x_outliers[:,1], c='red',s=20,)
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c, d],
['learned decision function', 'inliers', 'detected outliers','true outliers'],
loc='lower right')
subplot.set_title(clf_name)
subplot.set_xlim((-10, 10))
subplot.set_ylim((-10, 10))
plt.show()
###Output
_____no_output_____
###Markdown
Test Dataset
###Code
#set the figure size
plt.figure(figsize=(19, 20))
dfxt = pd.DataFrame(X_test)
dfxt['y'] = Y_test
for i, (clf_name,clf) in enumerate(classifiers.items()) :
# predict raw anomaly score
scores_pred = clf.decision_function(X_test)*-1
# prediction of a datapoint category outlier or inlier
y_pred = clf.predict(X_test)
# no of errors in prediction
n_errors = (y_pred != Y_test).sum()
dfxt['outlier'] = y_pred.tolist()
# IX1 - inlier feature 1, IX2 - inlier feature 2
IX1 = np.array(dfxt[0][dfx['outlier'] == 0]).reshape(-1,1)
IX2 = np.array(dfxt[1][dfx['outlier'] == 0]).reshape(-1,1)
# OX1 - outlier feature 1, OX2 - outlier feature 2
OX1 = dfxt[0][dfxt['outlier'] == 1].values.reshape(-1,1)
OX2 = dfxt[1][dfxt['outlier'] == 1].values.reshape(-1,1)
# True - outlier feature 1, OX2 - outlier feature 2
TX1 = dfxt[0][dfxt['y'] == 1].values.reshape(-1,1)
TX2 = dfxt[1][dfxt['y'] == 1].values.reshape(-1,1)
text ='No of mis-detected outliers : '+clf_name+" "+str(n_errors)
if(n_errors==0):
text ="\033[1m"+"\033[91m"+'No of mis-detected outliers : '+clf_name+" "+str(n_errors)+"\033[0m"
print(text)
# rest of the code is to create the visualization
# threshold value to consider a datapoint inlier or outlier
threshold = stats.scoreatpercentile(scores_pred,100 *outliers_fraction)
# decision function calculates the raw anomaly score for every point
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) * -1
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 3, i + 1)
# fill blue colormap from minimum anomaly score to threshold value
subplot.contourf(xx, yy, Z, levels = np.linspace(Z.min(), threshold, 10),cmap=plt.cm.Blues_r)
# draw red contour line where anomaly score is equal to threshold
a = subplot.contour(xx, yy, Z, levels=[threshold],linewidths=2, colors='red')
# fill orange contour lines where range of anomaly score is from threshold to maximum anomaly score
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],colors='orange')
# scatter plot of inliers with white dots
#b = subplot.scatter(X_train[:-n_outliers, 0], X_train[:-n_outliers, 1], c='white',s=100, edgecolor='k')
b = subplot.scatter(IX1,IX2, c='white',s=100, edgecolor='k')
# scatter plot of outliers with black dots
#c = subplot.scatter(X_train[-n_outliers:, 0], X_train[-n_outliers:, 1], c='black',s=100, edgecolor='k')
c = subplot.scatter(OX1,OX2, c='black',s=100, edgecolor='k')
# scatter plot of true outliers with red dots
d = subplot.scatter(xt_outliers[:,0],xt_outliers[:,1], c='red',s=20,)
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c, d],
['learned decision function', 'inliers', 'detected outliers','true outliers'],
loc='lower right')
subplot.set_title(clf_name)
subplot.set_xlim((-10, 10))
subplot.set_ylim((-10, 10))
plt.show()
###Output
_____no_output_____ |
decision_trees_practice/decision_trees_2.ipynb | ###Markdown
决策树 使用决策树预测隐形眼镜类型 1. 收集数据:提供的文本文件2. 准备数据:解析tab键分隔的数据行3. 分析数据:快速检查数据,确保正确的解析数据内容,绘制最终的树形图4. 训练算法:使用之前的create_tree()函数5. 测试算法:编写测试函数验证决策树可以正确分类给定的数据实例6. 使用算法:存储树的数据结构,以便下次使用时无序重新构造树
###Code
from decision_trees_practice.trees import create_tree
import os
project_path = os.getcwd()
filename = os.path.join(project_path,'decision_trees_practice','lenses.txt')
lensers_labels = ['age','prescript','astigmaic','tearRate']
fr = open(filename,'r')
###Output
_____no_output_____ |
weblog_analysis_python.ipynb | ###Markdown
1. Data ETL 1.1 Data LoadingLoad weblog files from Google Drive into a Pandas dataframe and monitor number of files processed
###Code
# To access files on Google Drive
from google.colab import drive
drive.mount('/content/drive')
# Location of the tar file on Google Drive
file_name = '/content/drive/MyDrive/weblog.tar.gz'
!tar -tzf $file_name
import tarfile
import pandas as pd
# Empty list to hold dataframe for each file
data = []
# Variable to store number of files processed
filecount = 0
# Define column headers
col_names=['date','time','s-sitename','s-ip','cs-method','cs-uri-stem',
'cs-uri-query','s-port','cs-username', 'c-ip','cs(User-Agent)',
'cs(Referer)','sc-status','sc-substatus','sc-win32-status']
with tarfile.open(file_name) as tar:
for logfile in [n for n in tar.getnames() if n.endswith('.log')]:
print('Processing: ', logfile)
df = pd.read_csv(tar.extractfile(logfile),
delim_whitespace=True, # fields are space delimited
comment= '#', # To remove 4 info lines at top of logfile
header = None, # Header has extra 'Fields:' column
na_values='-', # NA columns are marked with - in logfile
names=col_names, # Set column headers
error_bad_lines=False, encoding = 'iso-8859-1')
data.append(df)
filecount+= 1
# Cleanup
del(df)
print('Number of files processed: ', filecount)
# Concatenate all dataframes in the list into a single dataframe
df_web = pd.concat(data)
# View 5 rows of the data
df_web.head()
# View data types and total rows for the dataframe
df_web.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 295612 entries, 0 to 77462
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 date 295612 non-null object
1 time 295612 non-null object
2 s-sitename 295612 non-null object
3 s-ip 295612 non-null object
4 cs-method 295612 non-null object
5 cs-uri-stem 295612 non-null object
6 cs-uri-query 18883 non-null object
7 s-port 295612 non-null int64
8 cs-username 0 non-null float64
9 c-ip 295612 non-null object
10 cs(User-Agent) 295554 non-null object
11 cs(Referer) 238989 non-null object
12 sc-status 295439 non-null float64
13 sc-substatus 295439 non-null float64
14 sc-win32-status 295439 non-null float64
dtypes: float64(4), int64(1), object(10)
memory usage: 36.1+ MB
###Markdown
1.2 Data CleaningRemove columns with > 15% NAs then remove remaining records with any NAs.
###Code
# Find NAs in each column
df_web.isna().sum()
# Find % of NAs in each column
round(df_web.isna().sum()/len(df_web) * 100)
###Output
_____no_output_____
###Markdown
The following columns have a high percentage of NAs and need to be dropped: * cs-uri-query 94%* cs-username 100%* cs(Referer) 19%
###Code
# Drop 3 columns with > 15% NAs
df_web = df_web.drop(['cs-uri-query', 'cs-username', 'cs(Referer)'], axis=1)
df_web.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 295612 entries, 0 to 77462
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 date 295612 non-null object
1 time 295612 non-null object
2 s-sitename 295612 non-null object
3 s-ip 295612 non-null object
4 cs-method 295612 non-null object
5 cs-uri-stem 295612 non-null object
6 s-port 295612 non-null int64
7 c-ip 295612 non-null object
8 cs(User-Agent) 295554 non-null object
9 sc-status 295439 non-null float64
10 sc-substatus 295439 non-null float64
11 sc-win32-status 295439 non-null float64
dtypes: float64(3), int64(1), object(8)
memory usage: 29.3+ MB
###Markdown
Drop remaining rows containing NAs.
###Code
# Drop rows with NAs
df_web.dropna(axis=0, how= 'any', inplace= True)
# Find updated number of NAs in each column
df_web.isna().sum()
# Updated data counts
df_web.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 295381 entries, 0 to 77462
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 date 295381 non-null object
1 time 295381 non-null object
2 s-sitename 295381 non-null object
3 s-ip 295381 non-null object
4 cs-method 295381 non-null object
5 cs-uri-stem 295381 non-null object
6 s-port 295381 non-null int64
7 c-ip 295381 non-null object
8 cs(User-Agent) 295381 non-null object
9 sc-status 295381 non-null float64
10 sc-substatus 295381 non-null float64
11 sc-win32-status 295381 non-null float64
dtypes: float64(3), int64(1), object(8)
memory usage: 29.3+ MB
###Markdown
Data has been cleaned. Number of rows remaining is 295,381 2. Data Analysis
###Code
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
2.1 Traffic AnalysisGroup web requests by hour and day to determine hourly traffic trends.
###Code
# Group number of requests by hour of day
# Create a new column with the hour values e.g., 00, 01, 02, .., 23
df_web['hour'] = df_web['time'].str.slice(0, 2)
# Count number of requests by hour
hour_distr = df_web['hour'].value_counts().sort_index()
hour_distr
# Create a bar chart of the web traffic distribution by hour
%matplotlib inline
plt.style.use('seaborn-pastel')
# Figure Size
fig = plt.figure(figsize =(10, 7))
plt.bar(hour_distr.index, hour_distr.values)
plt.xlabel('Hour of Day', fontweight ='bold', fontsize = 15)
plt.ylabel('Number of Web Requests', fontweight ='bold', fontsize = 15)
plt.title('Requests per Hour', fontweight ='bold', fontsize = 18)
plt.show()
# Group number of requests by hour and day
# Create a new column with the date and hour values e.g., 2006-11-0100, 2006-11-0101, 2006-11-0102, ..
df_web['date_hour'] = df_web.date.str.cat(df_web.hour)
# Count number of requests by date and hour
date_hour_distr = df_web['date_hour'].value_counts().sort_index()
date_hour_distr
# Create a bar chart of the web traffic distribution by date and hour
%matplotlib inline
plt.style.use('seaborn-pastel')
# Figure Size
fig = plt.figure(figsize =(10, 7))
plt.bar(date_hour_distr.index, date_hour_distr.values)
plt.xlabel('Hourly Traffic: 1 - 5 Nov 2006', fontweight ='bold', fontsize = 15)
plt.xticks([])
plt.ylabel('Number of Web Requests', fontweight ='bold', fontsize = 15)
plt.title('Requests per Hour', fontweight ='bold', fontsize = 18)
plt.show()
# Filter for times with traffic above 5000 requests per hour
df_5k = date_hour_distr[(date_hour_distr >= 5000)].sort_index().reset_index()
df_5k.columns = ['Date_Hour', 'Traffic']
df_5k
# Filter for times with traffic below 200 requests per hour
df_low = date_hour_distr[(date_hour_distr < 200)].sort_index().reset_index()
df_low.columns = ['Date_Hour', 'Traffic']
df_low
# Find least request in an hour
print('Least request per hour is', date_hour_distr.min(), 'at', date_hour_distr.idxmin())
###Output
Least request per hour is 171 at 2006-11-0321
###Markdown
2.2 Server AnalysisView server responses breakdown using the sc-status column.
###Code
df_web['sc-status'] = df_web['sc-status'].astype(int)
# Group number of requests by server code status
srv_code_distr = df_web['sc-status'].value_counts()
srv_code_distr
# how many types of status reported?
len(srv_code_distr)
# list all server status codes
srv_code_distr.index.sort_values()
# only a few status codes are returned frequently
# the remaining codes happen only make up a small percentage
# save data into a dict for extraction into 'other' category
sc_dict = srv_code_distr.to_dict()
# separate the frequent codes
big_slice = { k:v for (k,v) in sc_dict.items() if v >= 1000 }
big_slice
# collect the remaining codes into the 'other' category
other_slice = { k:v for (k,v) in sc_dict.items() if v < 1000 }
other_slice
# add the 'other' category into the big_slice dict
big_slice['other'] = sum(other_slice.values())
big_slice
# Create a pie chart of the Server Response Code distribution
%matplotlib inline
# create a figure with 2 side by side plots
fig, (ax1, ax2) = plt.subplots(nrows= 1, ncols= 2, figsize=(12, 7),
gridspec_kw={'width_ratios': [3, 1]})
# Pie chart
# explode the wedge for the smallest values
explode = [0, 0, 0, 0.1, 0.7]
# line properties
wedge_props = {'linewidth': 1, 'edgecolor': 'gray'}
# set pie chart text to bold
text_props = {'weight': 'bold'}
ax1.pie(big_slice.values(), autopct='%.2f%%', labels=big_slice.keys(),
explode = explode, wedgeprops=wedge_props,
startangle=40, textprops=text_props)
ax1.set_title('Server Status Codes', fontdict= {'fontweight': 'bold', 'fontsize': 18})
# Other bar
xpos = 0
bottom = 0
width = 2
colors = ['aqua', 'lightblue', 'plum', 'yellow', 'indigo', 'blue', 'red']
for i, (k,v) in enumerate(other_slice.items()):
height = v
ax2.bar(xpos, height, width, bottom=bottom, color= colors[i] )
bottom += height
ax2.set_title('Other Status Codes')
ax2.legend(other_slice.keys())
ax2.axis('off')
ax2.set_xlim(- 1.5 * width, 2.5 * width)
plt.show()
###Output
_____no_output_____
###Markdown
2.3 Geographic AnalysisUse **DbIpCity** from **ip2geotools** to find the geolocation information from given IPs for a single hour - 3rd Nov 2006 9pm.
###Code
# discard the stdout stream for this cell
%%capture
!pip install ip2geotools
###Output
_____no_output_____
###Markdown
2.3.1 Requests by Country
###Code
# Extract records for 3rd Nov 2006 21:00 - 21:59
df_03Nov_9pm = df_web[(df_web['date_hour'] == '2006-11-0321')]
# How many records?
print(len(df_03Nov_9pm.index))
###Output
171
###Markdown
There are 171 requests in total.Pass the IP Address for each row of data to DbIpCity.get().country method to extract source country for the IP Address. Store the Country information in a new column in the dataframe
###Code
from ip2geotools.databases.noncommercial import DbIpCity
# for each row, lookup the country information using IP Address
df_03Nov_9pm['country'] = df_03Nov_9pm.loc[:, 'c-ip'].apply(lambda x: DbIpCity.get(x, api_key='free').country)
# Get distribution of requests by Country
country_distr = df_03Nov_9pm['country'].value_counts()
country_distr
# Verify the sum of requests by countries matches number of rows in df
country_distr.sum()
# Create a pie chart of the country distribution
%matplotlib inline
plt.style.use('seaborn-pastel')
# explode the wedge for the smallest values
explode = [0, 0, 0, 0.3, 0.5, 0.7]
# line properties
wedge_props = {'linewidth': 1, 'edgecolor': 'gray'}
# text properties
text_props = {'weight': 'bold'}
plt.pie(country_distr.values, labels=country_distr.index,
autopct='%.2f%%', explode=explode,
wedgeprops=wedge_props, textprops=text_props,
radius = 2.5)
plt.title('Request by Country', y = 1.6,
fontdict= {'fontweight': 'bold'})
plt.show()
###Output
_____no_output_____
###Markdown
2.3.2 Requests by City
###Code
# discard the stdout stream for this cell
%%capture
# for each row, lookup the city information using IP Address
df_03Nov_9pm['city'] = df_03Nov_9pm.loc[:, 'c-ip'].apply(lambda x: DbIpCity.get(x, api_key='free').city)
# Get distribution of requests by Cityy
city_distr = df_03Nov_9pm['city'].value_counts()
print(city_distr)
# Verify the sum of requests by cities matches number of rows in df
city_distr.sum()
# how many origin cities
len(city_distr)
# Create a pie chart of the city distribution
plt.style.use('seaborn-pastel')
# explode the wedge for the smallest values
explode = [0, 0, 0, 0, 0.3, 0.6, 0.9, 1.2, 0.3, 0.6, 0.9]
plt.pie(city_distr.values, labels=city_distr.index,
autopct='%1.1f%%', explode=explode,
wedgeprops=wedge_props, textprops=text_props,
radius = 2.5)
plt.title('Request by City',
y = 1.6,
fontdict= {'fontweight': 'bold'})
plt.show()
# Get top 3 cities by number of request.
# Since the series is already sorted, use head()
city_distr.head(3)
# View request distribution by country and city
df_03Nov_9pm.groupby(['country', 'city']).size().reset_index(name='count')
###Output
_____no_output_____ |
PythonDataBasics_ndarrays.ipynb | ###Markdown
Now that we have this list of array coordinate, list of tuples, we can use this to actually access those locations. Here we use those locations (there should be 3 *NaN*'s) to replace the missing values with very small porosity values (0.0001).
###Code
print('Value at the first NaN indices is ' + str(porosity_map[nan_list_tuple[0]]) + '.') # get value at first index
porosity_map[nan_list_tuple[0]] = 0.001 # set the NaN's to a low porosity value
porosity_map[nan_list_tuple[1]] = 0.001
porosity_map[nan_list_tuple[2]] = 0.001
print('Value at the first NaN indices after setting to 0.001 is ' + str(porosity_map[nan_list_tuple[0]]) + '.')
###Output
_____no_output_____
###Markdown
Making ArraysThere are various methods to make *ndarray*s from scratch. In some cases, our arrays are small enough we can just write them like this.
###Code
my_array = np.array([[0,1,2],[4,5,6],[7,8,9]]) # make an ndarray by scratch
print(my_array.shape)
my_array
###Output
_____no_output_____
###Markdown
We now have a 3 x 3 *ndarray*.We can also use NumPy's *rand* to make an *ndarray* of any shape with random values between 0 and 1 and *zeros* to make an array of any shape with 0's.
###Code
from scipy import stats # summary stats
rand_array = np.random.rand(100,100) # make 100 x 100 node array with random values
print('Shape of the random array = ' + str(rand_array.shape))
print(stats.describe(rand_array.flatten()))
pixelplt(rand_array,xmin,xmax,ymin,ymax,cell_size,0,1,"Random Values","X(m)","Y(M)","Random",cmap,"random")
zero_array = np.zeros((100,100)) # make 100 x 100 node array with zeros
print('Shape of the zero array = ' + str(zero_array.shape))
print(stats.describe(zero_array.flatten()))
pixelplt(zero_array,xmin,xmax,ymin,ymax,cell_size,-1,1,"Zeros","X(m)","Y(M)","Zeros",cmap,"zeros")
###Output
_____no_output_____
###Markdown
OperationsWe can search for values in our array with any criteria we like. In this example we identify all nodes with porosity values greater than 15%, the result of *porosity > 15.0* is a boolean array (true and false) with true when that criteria is met. We apply that to the *porosity_map* *ndarray* to return all node values with true in a new array. We can check the size of that array to get the total number of nodes with porosity values greater than 15.
###Code
greater_than = porosity_map[porosity_map > 15.0] # make boolean array and get values that meet criteria
print(greater_than)
print('There are ' + str(greater_than.size) + ' of a total of ' + str(porosity_map.flatten().size) + '.')
###Output
_____no_output_____
###Markdown
We can actually plot the boolean array (true = 1 and false = 0 numerically) to get a map of the nodes that meet the criteria. We do that below with porosity > 13% because it looks more interesting than only 25 nodes for the porosity > 15% case.
###Code
thresh_porosity_map = porosity_map > 13.0
pixelplt(thresh_porosity_map,xmin,xmax,ymin,ymax,cell_size,0,1,"Porosity > 13%","X(m)","Y(M)","Boolean",cmap,"threshold")
###Output
_____no_output_____
###Markdown
How would you get a list of the indices that meet the criteria in the *porosity map* array? We repeat the command to make a list of tuples with locations with porosity > 15%, *loc_hig_por*. Then we simply grab the ix and iy index values from this list. The list is set up like this, my_list[0 for ix, 1 for iy][1 to number of nodes]
###Code
loc_high_por = np.nonzero(porosity_map > 15) # get the indices with high porosity
print('Loc #1, ix = ' + str(loc_high_por[1][0]) + ' and iy = ' + str(loc_high_por[0][0]) + '.')
print(' With a value of ', str(porosity_map[loc_high_por[0][0],loc_high_por[1][0]]) + '.')
print('Loc #2, ix = ' + str(loc_high_por[1][1]) + ' and iy = ' + str(loc_high_por[0][1]) + '.')
print(' With a value of ', str(porosity_map[loc_high_por[0][1],loc_high_por[1][1]]) + '.')
loc_high_por
###Output
_____no_output_____
###Markdown
Perhaps you want to do something more creative with your *ndarray*. The most flexible approach is to use a loop and iterate over the array. Let's add noise to our porosity map. To do this we take the previously calculated random array and center it (set the mean to 0.0 by subtracting the current mean), we will multiply it by a factor of 5 so that the result is more noticable and add it to the *porosity_map* array.
###Code
porosity_map_noise = np.zeros((100,100)) # use of loops to maniputale ndarrays
for iy in range(ny):
for ix in range(nx):
porosity_map_noise[iy,ix] = porosity_map[iy,ix] + (rand_array[iy,ix]-0.5)*5
print(stats.describe(porosity_map_noise.flatten()))
pixelplt(porosity_map_noise,xmin,xmax,ymin,ymax,cell_size,0,16,"Porosity With Noise","X(m)","Y(M)","Porosity (%)",cmap,"Residual")
###Output
DescribeResult(nobs=10000, minmax=(0.2943955412382694, 17.5222066796764), mean=10.015014588520044, variance=6.123237021433289, skewness=0.06359438025667884, kurtosis=-0.350145166325619)
###Markdown
We could have done the above without the loops, by using the simple statement below. We can use algebriac operators on *ndarray*s like this example below if the *ndarray* are all the same size.
###Code
porosity_map_noice2 = porosity_map + (rand_array-0.5) * 5 # using matrix algebra to repeat the previous looped method
print(stats.describe(porosity_map_noise.flatten()))
pixelplt(porosity_map_noise,xmin,xmax,ymin,ymax,cell_size,0,16,"Porosity With Noise","X(m)","Y(M)","Porosity (%)",cmap,"Residual2")
###Output
DescribeResult(nobs=10000, minmax=(0.2943955412382694, 17.5222066796764), mean=10.015014588520044, variance=6.123237021433289, skewness=0.06359438025667884, kurtosis=-0.350145166325619)
###Markdown
Let's write our new *ndarray* to a file for storage and to apply with other software such as GSLIB.
###Code
ndarray2GSLIB(porosity_map_noise,"porosity_noise_GSLIB.dat","porosity_noise") # write out 2D array to a Geo-DAS ASCII file
###Output
_____no_output_____
###Markdown
Regular Gridded Data Structures / ndarrays in Python for Engineers and Geoscientists Michael Pyrcz, Associate Professor, University of Texas at Austin Contacts: [Twitter/@GeostatsGuy](https://twitter.com/geostatsguy) | [GitHub/GeostatsGuy](https://github.com/GeostatsGuy) | [www.michaelpyrcz.com](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446)This is a tutorial for / demonstration of **Regular Gridded Data Structures in Python**. In Python, a common tool for dealing with Regular Gridded Data Structures is the *ndarray* from the **NumPy Python package** (by Jim Hugunin et al.). This tutorial includes the methods and operations that would commonly be required for Engineers and Geoscientists working with Regularly Gridded Data Structures for the purpose of:1. Data Checking and Cleaning2. Data Mining / Inferential Data Analysis3. Predictive Modelingfor Data Analytics, Geostatistics and Machine Learning. Regular Data StructuresIn Python we will commonly store our data in two formats, tables and arrays. For sample data with typically multiple features $1,\ldots,m$ over $1,\ldots,n$ samples we will work with tables. For exhaustive 2D maps and 3D models (usually representing a single feature) on a regular grid over $[1,\ldots,n_{1}], [1,\ldots,n_{2}],\ldots,[1,\ldots,n_{ndim}]$, where $n_{dim}$ is the number of dimensions, we will work with arrays. Of course, it is always possible to add another dimension to our array to include multiple features, $1,\ldots,m$, over all locations.In geostatistical workflows the tables are typically sample data from wells and drill holes and the grids are the interpolated or simulated models or secondary data from sources such as seismic inversion.The NumPy package provides a convenient *ndarray* object for working with regularly gridded data. In the following tutorial we will focus on practical methods with *ndarray*s. There is another section available on Tabular Data Structures that focuses on DataFrames at https://github.com/GeostatsGuy/PythonNumericalDemos/blob/master/PythonDataBasics_DataFrame.ipynb. Project GoalLearn the basics for working with Regular Gridded Data Structures in Python to build practical subsurfrace modeling and machine learning workflows. CaveatsI included methods that I have found useful for building my geo-engineering workflows for subsurface modeling. I think they should be accessible to most geoscientists and engineers. Certainly, there are more advanced, more compact, more efficient methods to accomplish the same tasks. I tried to keep the methods simple. I appreciate feedback and I will use it to improve this tutorial periodically. Load the required librariesThe following code loads the required libraries.
###Code
import os # set current working directory
import numpy as np # ndarrays
import matplotlib.pyplot as plt # plotting
from scipy import stats # summary stats
###Output
_____no_output_____
###Markdown
If you get a package import error, you may have to first install some of these packages. This can usually be accomplished by opening up a command window on Windows and then typing 'python -m pip install [package-name]'. More assistance is available with the respective package docs. Declare functionsThese are the functions we have included here:1. GSLIB2ndarray - load GSLIB Geo-EAS format regular grid data 1D or 2D to NumPy *ndarray*2. ndarray2GSLIB - write NumPy array to GSLIB Geo-EAS format regular grid data 1D or 2D3. pixelplt - plot 2D NumPy arrays with same parameters as GSLIB's pixelplt I include and demonstrate the GSLIB Geo-EAS file read and write functions, because (1) *ndarray* read and write member functions are convenience functions that are limited and (2) for geostatistical modeling it is conveneint to read and write from Geo-EAS the format used in GSLIB by Deutsch and Journel (1998). Also, I included a function that reimpliments the 2D array plotting program 'pixelplt' from GSLIB. The inputs are simple and the method is consistent with GSLIB, and by using it we postpone having to learn the MatPlotLib package for plotting.Warning, there has been no attempt to make these functions robust in the precense of bad inputs. If you get a crazy error check the inputs. Are the arrays the correct dimension? Is the parameter order mixed up? Make sure the inputs are consistent with the descriptions in this document.
###Code
# utility to convert 1D or 2D numpy ndarray to a GSLIB Geo-EAS file for use with GSLIB methods
def ndarray2GSLIB(array,data_file,col_name):
file_out = open(data_file, "w")
file_out.write(data_file + '\n')
file_out.write('1 \n')
file_out.write(col_name + '\n')
if array.ndim == 2:
ny = (array.shape[0])
nx = (array.shape[1])
ncol = 1
for iy in range(0, ny):
for ix in range(0, nx):
file_out.write(str(array[ny-1-iy,ix])+ '\n')
elif array.ndim == 1:
nx = len(array)
for ix in range(0, nx):
file_out.write(str(array[ix])+ '\n')
else:
Print("Error: must use a 2D array")
file_out.close()
return
file_out.close()
# utility to convert GSLIB Geo-EAS files to a 1D or 2D numpy ndarray for use with Python methods
def GSLIB2ndarray(data_file,kcol,nx,ny):
colArray = []
if ny > 1:
array = np.ndarray(shape=(ny,nx),dtype=float,order='F')
else:
array = np.zeros(nx)
with open(data_file) as myfile: # read first two lines
head = [next(myfile) for x in range(2)]
line2 = head[1].split()
ncol = int(line2[0]) # get the number of columns
for icol in range(0, ncol): # read over the column names
head = [next(myfile) for x in range(1)]
if icol == kcol:
col_name = head[0].split()[0]
if ny > 1:
for iy in range(0,ny):
for ix in range(0,nx):
head = [next(myfile) for x in range(1)]
array[ny-1-iy][ix] = head[0].split()[kcol]
else:
for ix in range(0,nx):
head = [next(myfile) for x in range(1)]
array[ix] = head[0].split()[kcol]
return array,col_name
# pixel plot, reimplemention in Python of GSLIB pixelplt with MatPlotLib methods (commented out image file creation)
def pixelplt(array,xmin,xmax,ymin,ymax,step,vmin,vmax,title,xlabel,ylabel,vlabel,cmap,fig_name):
xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step))
plt.figure(figsize=(8,6))
im = plt.contourf(xx,yy,array,cmap=cmap,vmin=vmin,vmax=vmax,levels=np.linspace(vmin,vmax,100))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
cbar = plt.colorbar(im,orientation = 'vertical',ticks=np.linspace(vmin,vmax,10))
cbar.set_label(vlabel, rotation=270, labelpad=20)
# plt.savefig(fig_name + '.' + image_type,dpi=dpi)
plt.show()
return im
###Output
_____no_output_____
###Markdown
Set the working directoryI always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time). Also, in this case make sure to place the required (see below) data file in this directory. When we are done with this tutorial we will write our new dataset back to this directory.
###Code
os.chdir("c:/PGE383") # set the working directory
###Output
_____no_output_____
###Markdown
Loading and WritingLet's load the 2D porosity map from the provide binary file. This file was created with the NumPy *ndarray* member function 'tofile'. Note: this and the read from file member function, *fromfile*, are convenience functions. They do not store any information about the array. So when we read our 100 x 100 array this results in a 10,000 1D array. Let's try for ourselves. We can read the binary to an array like this:
###Code
porosity_map = np.fromfile('porosity_truth_map.dat')
###Output
_____no_output_____
###Markdown
Next, let's look at the shape member:
###Code
porosity_map.shape
###Output
_____no_output_____
###Markdown
Confirmed, the shape is (10000,), a 10,000 node 1D array. Given we know it is actually a 100x100 array, we can use the *ndarray* member function *reshape* to correct this. Note, you get an error if the sizes are inconsistent, $\prod^{i} n_{i} \neq n_{1D}$ where $n_{i}$ is the number of nodes for axis $i$ and $n_{1D}$ is the number of nodes in the 1D vector that was read in. We reshape the array to 100x100, print the results and then get the 'ndarray' member 'shape' elements 0 and 1 to confirm the $n_{1} = n_{2} = 100$.
###Code
porosity_map = np.reshape(porosity_map,[100,100]) # reshape the array to 100 x 100
print(porosity_map.shape)
ny = porosity_map.shape[0] # get the array nx
nx = porosity_map.shape[1] # get the array ny
print('Our 2D array has number of x cells = ' + str(nx) + ', and y cells = ' + str(ny) + '.' )
###Output
_____no_output_____
###Markdown
Let's close the loop and write out the array and read it back in, to demonstrat the *ndarray* writing member function *tofile*.
###Code
porosity_map.tofile("porosity_test.dat") # save our 2D array to a 1D binary file
porosity_test = np.fromfile('porosity_test.dat') # read the 1D binary back to a 1D array
check = np.array_equal(porosity_map.flatten(),porosity_test) # check if the read in array is the same as flatten orig.
print('The array we wrote out and read back in are the same, we closed the loop,' + str(check) + '.')
###Output
_____no_output_____
###Markdown
It worked! We used the NumPy function 'array_equal' to test if the arrays are the same. Did you notice I added the *flatten* member function? This caused the 100x100 'porosity_map' array to be passed to the *array_equal* as a 10,000 node 1D array, the same as 'porosity_test' array was loaded. We can write an array and read it back in and we get the same thing. Let's check out using .csv files to store a 2D ndarray.
###Code
np.savetxt("porosity_map.csv", porosity_map, delimiter=",")
###Output
_____no_output_____
###Markdown
The 2D ndarray is saved with each line containing a row and each column delimited by a comma. In this format the 2D grid can be directly loaded into Excel. One can use conditional formatting to conduct a very quick check of the 'look' of the data. E.g. confirm that it is not upside down, scrambled etc.
###Code
porosity_map_test = np.loadtxt("porosity_map.csv", delimiter=",") # load the csv file back into a 2D ndarray
test = np.array_equal(porosity_map,porosity_map_test) # check if the arrays are the same
print(test)
###Output
_____no_output_____
###Markdown
OK, we confirmed that the save and reloaded 2D ndarray is the same as the original 2D ndarray. This save and load method works. Lets perform the same test for the included GeostatsPy functions to save and load gridded data in Geo-EAS format (this is the format used by GSLIB programs).
###Code
ndarray2GSLIB(porosity_map,"porosity_map_GSLIB.out","porosity") # save the gridded data to Geo-EAS format
porosity_map_test2, col_name = GSLIB2ndarray("porosity_map_GSLIB.out",0,nx,ny)
test = np.array_equal(porosity_map,porosity_map_test2) # check if the arrays are the same
print(test)
###Output
_____no_output_____
###Markdown
OK, we confirmed that the GeostatsPy methods for saving and loading 2D gridded data work. VisualizationLet's look at the dataset that we loaded. Instead of working with the MatPlotLib package directly (common data visualization package for Python) we will use the *pixelplt* reimplimentation from our set of functions from my effort to bring GSLIB to Python, the 'in-progress' GeostatsPy package. This function uses MatPlotLib with the function parameters to build a nice figure, so we can procastinate learning MatPlotLib for now! First let's set some parameters, including the spatial limits of the plot, the cell sizes in the plot and the min and max feature values and color map for the color bar. Our regular grid is 100 x 100 cells of 10 m cells (i.e. squares), 1,000 x 1,000 m in extents and we assume the origin, low left corder is at coordinate 0,0. Our porosity values are contained within the interval between 4 to 16%.
###Code
xmin = 0.0;xmax = 1000.0; ymin = 0.0; ymax = 1000.0; cell_size = 10.0; vmin = 4.0; vmax = 16.0; cmap = plt.cm.plasma
###Output
_____no_output_____
###Markdown
Now we are ready to plot the 2D array with the *pixpelplt* reimplementation from our GSLIB in Python.
###Code
pixelplt(porosity_map,xmin,xmax,ymin,ymax,cell_size,vmin,vmax,"Porosity Truth Map","X(m)","Y(M)","Porosity (%)",cmap,"Porosity_Map")
###Output
_____no_output_____
###Markdown
The NumPy package *ndarray* docs recommend that users consider making their own functions to read and write *ndarray*s from ASCII files. We have coded functions to do this using the GSLIB Geo-EAS format, to support geostatistical workflows that utilize GSLIB programs as part of the GeostatsPy package that we are developing. We included the read and write functions here for this tutorial. You can look at a truncated representation of the *ndarray* like this. Sometimes a good way to check data is to just look at it.
###Code
print(porosity_map)
###Output
_____no_output_____
###Markdown
You can see that the 2D array is actually an array of arrays, e.g. an array of $1,\ldots,n_{x}$ of arrays of $1,\ldots,n_{y}$. To show this we can include an index for x and we will get a slice for all values with equal $x$ index. Let's look at the the first slice of $y$ values with x index equal to zero.
###Code
porosity_map[0]
###Output
_____no_output_____
###Markdown
If we add another index we get a single node from the 2D array. Let's get the first and last values from this slice with $x$ index equal to zero. We will print them and you can confirm they are the first and last values from the output above.
###Code
print(porosity_map[0][0]) # get first and last value for ix = 0 slice
print(porosity_map[0][99])
###Output
_____no_output_____
###Markdown
Alternatively, you can use this notation to access a single cell in a *ndarray*.
###Code
print(porosity_map[0,0]) # get first and last value for ix = 0 slice
print(porosity_map[0,99])
###Output
_____no_output_____
###Markdown
You could get access to a range of values of the array like this (see below). We get the results for *porosity_map* indices $ix = 0$ and $iy = 0,1,\ldots,9$.
###Code
print(porosity_map[0][0:10]) # get first 10 values for the ix = 0 slice
###Output
_____no_output_____
###Markdown
If you want to see the entire array without truncated representation then you change the print options threshold in NumPy to a *NaN* like this. Note, this is probably not a good idea if you are working with very large arrays. For this example you can literally look through 10,000 values!
###Code
np.set_printoptions(threshold=np.nan) # remove truncation from array visualization
print(porosity_map)
###Output
_____no_output_____
###Markdown
Summary StatisticsLet's try some summary statistics. Here's a convenient method from SciPy. Like many of the methods it anticipates a 1D array so we do a *flatten* on the 2D array to convert it to a 1D array before passing it.
###Code
stats = stats.describe(porosity_map.flatten()) # array summary statistics
stats
###Output
_____no_output_____
###Markdown
We also have a variety of built in summary statistic calculations that we may apply on *ndarray*s. Note, these methods work directly with our 2D array; therefore, do not require flatening to a 1D array.
###Code
mean_por = porosity_map.mean() # array summary statistics
stdev_por = porosity_map.std()
min_por = porosity_map.min()
max_por = porosity_map.max()
print('Summary Statistics of Porosity \n Mean = ' + str(mean_por) + ', StDev = ' + str(stdev_por))
print(' Min = ' + str(min_por) + ', Max = ' + str(max_por))
###Output
_____no_output_____
###Markdown
We can also do this with NumPy functions that work with arrays that calculate the previous summary statistics and more.
###Code
mean_por = np.mean(porosity_map) # array summary statistics
stdev_por = np.std(porosity_map)
min_por = np.min(porosity_map)
max_por = np.max(porosity_map)
P10_por,P90_por = np.percentile(porosity_map,[0.10,0.90])
print('Summary Statistics of Porosity \n Mean = ' + str(mean_por) + ', StDev = ' + str(stdev_por))
print(' Min = ' + str(min_por) + ', Max = ' + str(max_por))
print(' P10 = ' + str(P10_por) + ', P90 = ' + str(P90_por))
###Output
_____no_output_____
###Markdown
Checking and ManipulatingWe can read and write individual value of our array with indices $ix = 0,\ldots,nx-1$ and $iy = 0,\ldots,ny-1$.
###Code
local_por = porosity_map[0,0] # get porosity at location 0,0
print('Porosity at location 0,0 in our ndarray is ' + str(local_por) + '.')
porosity_map[0,0] = 10.0000 # change the porosity value at location 0,0
print('Porosity at location 0,0 in our ndarray is now ' + str(porosity_map[0,0]) + '.')
###Output
_____no_output_____
###Markdown
We can also check for *NaN*s, invalid or missing values in our *ndarray*.
###Code
porosity_map[0,0] = np.nan
print('Porosity at location 0,0 in our ndarray is now ' + str(porosity_map[0,0]) + '.')
###Output
_____no_output_____
###Markdown
We can check for any *NaN*'s in our array with the following code. First, let's add a couple more *NaN* values to make this example more interesting.
###Code
porosity_map[0,1] = np.nan # add another NaN
porosity_map[2,1] = np.nan # add another NaN
result = np.isnan(porosity_map).any()
result
###Output
_____no_output_____
###Markdown
Ok, so now we kown that we have *NaN*'s in our array. This could cause issues with our calculations. We can get a list of indices with *NaN*'s in our *ndarray*.
###Code
nan_list = np.argwhere(np.isnan(porosity_map)) # get list of indices of array with NaNs
print(nan_list)
###Output
_____no_output_____
###Markdown
We now have a list of the indices (0,0), (0,1) and (2,1) with *NaN*'s. This is exactly the array indices that we assigned to NaN. If you convert this list of indices by mapping them with *map* to *tuple*s and make that into a new list we get something we can use to directly interact with the *NaN*'s in our 2D *ndarray*.
###Code
nan_list_tuple = list(map(tuple, nan_list)) # convert index list to tuple list
print(nan_list_tuple) # check the tuple list
print(porosity_map[nan_list_tuple[0]]) # get the values at the indices
print(porosity_map[nan_list_tuple[1]])
print(porosity_map[nan_list_tuple[2]])
###Output
_____no_output_____ |
notebooks/line-sink-ditch.ipynb | ###Markdown
Line-sink ditchA string of line-sinks for which the total discharge is specified
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from ttim import *
ml = ModelMaq(kaq=10, z=[10, 0], Saq=1e-4, tmin=0.01, tmax=10)
x = np.linspace(-100, 100, 21)
y = np.zeros(len(x))
lsd = LineSinkDitchString(ml, xy=list(zip(x, y)), tsandQ=[(0, 100)])
ml.solve()
t = 2
print(f'Discharge at time t={t}:, {lsd.discharge(t)}')
for i, Q in enumerate(lsd.discharge_list(t=t)):
print(f'Discharge of segment {i}: {Q}')
ml = ModelMaq(kaq=10, z=[10, 0], Saq=1e-4, tmin=0.01, tmax=10)
x = np.linspace(-100, 100, 21)
y = np.zeros(len(x))
lsd = LineSinkDitchString(ml, xy=list(zip(x, y)), tsandQ=[(0, 100)], Astorage=100)
ml.solve()
t = 2
print(f'Discharge at time t={t}:, {lsd.discharge(t)}')
np.sum(lsd.headinside(2, derivative=1) * lsd.Astorage)
###Output
_____no_output_____
###Markdown
Line-sink ditchA string of line-sinks for which the total discharge is specified
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from ttim import *
ml = ModelMaq(kaq=10, z=[10, 0], Saq=1e-4, tmin=0.01, tmax=10)
x = np.linspace(-100, 100, 21)
y = np.zeros(len(x))
lsd = LineSinkDitchString(ml, xy=list(zip(x, y)), tsandQ=[(0, 100)])
ml.solve()
t = 2
print(f'Discharge at time t={t}:, {lsd.discharge(t)}')
for i, Q in enumerate(lsd.discharge_list(t=t)):
print(f'Discharge of segment {i}: {Q}')
ml = ModelMaq(kaq=10, z=[10, 0], Saq=1e-4, tmin=0.01, tmax=10)
x = np.linspace(-100, 100, 21)
y = np.zeros(len(x))
lsd = LineSinkDitchString(ml, xy=list(zip(x, y)), tsandQ=[(0, 100)], Astorage=100)
ml.solve()
t = 2
print(f'Discharge at time t={t}:, {lsd.discharge(t)}')
np.sum(lsd.headinside(2, derivative=1) * lsd.Astorage)
###Output
_____no_output_____ |
autompg_linearregressoin.ipynb | ###Markdown
###Code
##데이터 코딩
import pandas as pd
df = pd.read_csv('./auto-mpg.csv', header=None)
df.columns = ['mpg','cylinders','displacement','horsepower','weight',
'acceleration','model year','origin','name']
df.info()
df.describe(include='all')
df[['horsepower','name']].describe(include='all')
df['horsepower'].value_counts()
df['horsepower'].unique()
df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False)
df_horsepower.unique()
df_horsepower.astype('float')
df['name'].unique()
###Output
_____no_output_____
###Markdown
check colunms * 연속형 : displacement, horespower, weight, acceleration, mpg* 분류형 : model year, name, clylinders, origin
###Code
df['name'].value_counts()
###Output
_____no_output_____
###Markdown
정규화 단계
###Code
Y = df['mpg']
X_contiuns= df[['displacement','horsepower','weight','acceleration']]
X_category=df[['model year','cylinders','origin']]
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
type(scaler)
scaler.fit(X_contiuns)
X = scaler.transform(X_contiuns)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
type(lr)
lr.fit(X,Y)
lr.score(X,Y)
import pickle
pickle.dump(lr, './autompg_lr.pkl')
###Output
_____no_output_____ |
Lab/18--des-gas-station-solns.ipynb | ###Markdown
CX 4230, Spring 2016: [18] Discrete event simulation of a gas stationRecall the introduction to queueing models and discrete event simulators from the last class: [link](https://t-square.gatech.edu/access/content/group/gtc-59b8-dc03-5a67-a5f4-88b8e4d5b69a/cx4230-sp16--17-queueing.pdf). In this notebook, you will implement it. Exponential random numbersRecall that in a queueing model, it is common to assume that customer interarrival times and service times are independent and identically distributed random variables. Classically, the most commonly assumed distribution is _exponential_.More specifically, an exponentially distributed random variable $X \sim \mathcal{E}(\lambda)$ has the probability density function,$$ f_X(x) = \lambda \cdot \exp\left(-\frac{x}{\lambda}\right),$$where $\lambda$ is the mean of the distribution.Using Numpy, these are easy to generate using the function, `numpy.random.exponential()`: http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.random.exponential.html.Here is a quick demo.
###Code
from numpy.random import exponential
X_MEAN = 10.0
X_COUNT = 5
x_values = exponential (X_MEAN, X_COUNT)
print ("X ~ Exp(%g):" % X_MEAN)
for (i, x_i) in enumerate (x_values):
print (" X_%d = %g" % (i, x_i))
###Output
X ~ Exp(10):
X_0 = 1.08368
X_1 = 7.63503
X_2 = 0.21214
X_3 = 18.6386
X_4 = 1.58331
###Markdown
As a sanity check, let's generate a large number of values and compare the sample mean to the desired (true) mean.
###Code
from numpy import mean
# @Demo
N_BIG = 1000
big_mean = mean (exponential (X_MEAN, N_BIG))
print ("\nSample mean of %d values: %g" % (N_BIG, big_mean))
###Output
Sample mean of 1000 values: 10.445
###Markdown
Priority queuesTo maintain the future event list, you need some kind of priority queue data structure. One classical choice is to use a heap, for which there is a standard implementation in Python: [link](http://www.bogotobogo.com/python/python_PriorityQueue_heapq_Data_Structure.php)Here's a quick demo.
###Code
from heapq import heappush, heappop, heapify
# Method 1: Convert any Python list into a heap
h1 = list (x_values)
print ("Initial values:", h1)
heapify (h1)
print ("\nHeapified:", h1)
print ("\nExtracting mins...")
for i in range (len (h1)):
print (i, ":", heappop (h1))
# Method 2: Insert values into the heap one at a time
print ("Inserting...")
h2 = []
for (i, x_i) in enumerate (x_values):
print (i, ":", x_i)
heappush (h2, x_i)
print ("\nHeap:", h2)
print ("\nExtracting minima...")
for i in range (len (h2)):
print (i, ":", heappop (h2))
###Output
Inserting...
0 : 1.08367611765
1 : 7.63503268684
2 : 0.212139653586
3 : 18.63858829
4 : 1.58330730396
Heap: [0.21213965358639475, 1.5833073039635692, 1.0836761176519276, 18.638588289987613, 7.6350326868415213]
Extracting minima...
0 : 0.212139653586
1 : 1.08367611765
2 : 1.58330730396
3 : 7.63503268684
4 : 18.63858829
###Markdown
A generic discrete event simulation engineWe can build a simple, generic discrete event simulation engine. This engine manages the future event list, which you'll recall is a priority queue of timestamped events. It continually removes the event with the lowest timestamp and processes it. Suppose we represent an event by a tuple, `(t, e)`, where `t` is the event's timestamp and `e` is an event handler. An event handler is simply a function. Let's suppose that this function takes two arguments, `e (t, s)`, where `t` is (again) the timestamp and `s` is the system state, encoded in an application-specific way. When `e (t, s)` executes, it may update the state `s`. **Exercise.** Complete the following function, which implements a generic discrete event simulation engine. The future events list is `events`. The initial system state is `initial_state`; the starter code below makes a copy of this state as a variable `s`, which your simulator can modify. Additionally, you should correct the last `print()` statement so that instead of a pair of `None` values it prints the timestamp and event name (name of the event handler).
###Code
from copy import deepcopy
def simulate (events, initial_state):
s = deepcopy (initial_state)
print ("\nFuture event list:\n%s" % str (events))
print ("\nt=0: %s" % str (s))
while events:
# @YOUSE: Get event and process it
(t, e) = heappop (events)
e (t, s)
#print ("t=%d: event '%s' => '%s'" % (None, None, str (s)))
print ("t=%d: event '%s' => '%s'" % (t, e.__name__, str (s)))
###Output
_____no_output_____
###Markdown
Instantiating the simulatorFor the gas station model, we asked you to assume the interarrival times, pumping times, and shopping times were exponential i.i.d. random variables. So, let's start by defining some parameters for these distributions. Let's also pre-generate some number of car arrivals.
###Code
# Event parameters
MEAN_INTERARRIVAL_TIME = 15.0 # minutes
MEAN_PUMPING_TIME = 5.0 # minutes
MEAN_SHOPPING_TIME = 10.0 # minutes
# Number of customers (cars)
NUM_CARS = 5
# Pre-generate some interarrival times
car_interarrival_times = exponential (MEAN_INTERARRIVAL_TIME, NUM_CARS)
print ("Interrival times (in minutes) of all cars:\n", car_interarrival_times)
###Output
Interrival times (in minutes) of all cars:
[ 1.25351011 3.78744989 3.05267482 22.83193381 21.42100338]
###Markdown
Recall that the state consists of the logical simulation time (`now`) and three state variables: `AtPump`, `AtStore`, and `PumpFree`. Let's create this state.
###Code
now = 0.0 # Current (logical) simulation time
state = {'AtPump': 0 # no. cars at pump or waiting
, 'AtStore': 0 # no. cars at store
, 'PumpFree': True # True <==> pump is available
}
###Output
_____no_output_____
###Markdown
Let's represent an _event_ as a tuple, `(t, e)`, where `t` is the timestamp of the event and `e` is an event handler, implemented as a Python function.If the future event list is stored in a global priority queue called `events`, the following function will insert an event into that queue.
###Code
def schedule (t, e):
"""
Schedules a new event `e` at time `t`.
"""
global events
print (" ==> '%s' @ t=%g" % (e.__name__, t))
heappush (events, (t, e))
###Output
_____no_output_____
###Markdown
**Exercise.** Implement an event handler to process a car arrival event. Assume that event handlers take as input the timestamp `t` of the event and the state `s` of the system at time `t`.
###Code
def arrives (t, s):
"""
Processes an arrival event at time `t` for a system in state `s`.
Schedules a pumping event if the pump is free. Returns the new
system state.
"""
# @YOUSE
s['AtPump'] += 1
if s['PumpFree']:
s['PumpFree'] = False
schedule (t + exponential (MEAN_PUMPING_TIME), finishes)
return s
###Output
_____no_output_____
###Markdown
**Exercise.** Implement a function to process the event for a car that finishes pumping gas.
###Code
def finishes (t, s):
"""
Processes a finished-pumping event at time `t` for a system in
state `s`. Schedules a pumping event if any cars are waiting.
Returns the new system state.
"""
# @YOUSE
s['AtPump'] -= 1
s['AtStore'] += 1
schedule (t + exponential (MEAN_SHOPPING_TIME), departs)
if s['AtPump'] > 0:
schedule (t + exponential (MEAN_PUMPING_TIME), finishes)
else:
s['PumpFree'] = True
return s
###Output
_____no_output_____
###Markdown
**Exercise.** Implement a function to process the event for a car that leaves the store.
###Code
def departs (t, s):
"""
Processes a departure from the station event at
time `t` for a system in state `s`.
"""
# @YOUSE
s['AtStore'] -= 1
return s
###Output
_____no_output_____
###Markdown
**Exercise.** Create an initial future events list by converting the raw interarrival times into arrival events and inserting them into the future events list.
###Code
# Hint: This function may prove useful
from numpy import cumsum
events = [] # Future event list, initially empty
# @YOUSE: Create initial events from all car arrivals
for t in cumsum (car_interarrival_times):
schedule (t, arrives)
# Test code
print ("\nContents of `events[:]`:")
for (i, event) in enumerate (events):
print ("[%d] t=%g: %s" % (i, event[0], event[1].__name__))
# More test code: If everything worked, so should this simulation!
simulate (events, state)
###Output
Future event list:
[(1.2535101084918805, <function arrives at 0x1049e8840>), (5.040959995107885, <function arrives at 0x1049e8840>), (8.0936348179354294, <function arrives at 0x1049e8840>), (30.925568631470099, <function arrives at 0x1049e8840>), (52.346572008225948, <function arrives at 0x1049e8840>)]
t=0: {'PumpFree': True, 'AtStore': 0, 'AtPump': 0}
==> 'finishes' @ t=2.28553
t=1: event 'arrives' => '{'PumpFree': False, 'AtStore': 0, 'AtPump': 1}'
==> 'departs' @ t=3.99141
t=2: event 'finishes' => '{'PumpFree': True, 'AtStore': 1, 'AtPump': 0}'
t=3: event 'departs' => '{'PumpFree': True, 'AtStore': 0, 'AtPump': 0}'
==> 'finishes' @ t=5.94144
t=5: event 'arrives' => '{'PumpFree': False, 'AtStore': 0, 'AtPump': 1}'
==> 'departs' @ t=6.98998
t=5: event 'finishes' => '{'PumpFree': True, 'AtStore': 1, 'AtPump': 0}'
t=6: event 'departs' => '{'PumpFree': True, 'AtStore': 0, 'AtPump': 0}'
==> 'finishes' @ t=10.5891
t=8: event 'arrives' => '{'PumpFree': False, 'AtStore': 0, 'AtPump': 1}'
==> 'departs' @ t=30.5016
t=10: event 'finishes' => '{'PumpFree': True, 'AtStore': 1, 'AtPump': 0}'
t=30: event 'departs' => '{'PumpFree': True, 'AtStore': 0, 'AtPump': 0}'
==> 'finishes' @ t=32.9676
t=30: event 'arrives' => '{'PumpFree': False, 'AtStore': 0, 'AtPump': 1}'
==> 'departs' @ t=34.8229
t=32: event 'finishes' => '{'PumpFree': True, 'AtStore': 1, 'AtPump': 0}'
t=34: event 'departs' => '{'PumpFree': True, 'AtStore': 0, 'AtPump': 0}'
==> 'finishes' @ t=54.9389
t=52: event 'arrives' => '{'PumpFree': False, 'AtStore': 0, 'AtPump': 1}'
==> 'departs' @ t=57.5688
t=54: event 'finishes' => '{'PumpFree': True, 'AtStore': 1, 'AtPump': 0}'
t=57: event 'departs' => '{'PumpFree': True, 'AtStore': 0, 'AtPump': 0}'
|
CloudTorrentClient_Public.ipynb | ###Markdown
**Important Notes:*** **`VERY IMPORTANT:`** FIRST OF ALL SAVE A COPY OF THIS NOTEBOOK USING THE FILE MENU IN THE TOP LEFT SIDE OF YOUR SCREEN. YOU CAN NOT EDIT OR SAVE THIS NOTEBOOK. I OWN IT! MAKE A COPY, OPEN YOUR OWN COPY AND THEN FOLLOW THE REST OF THE INSTRUCTIONS.* Your google colab notebooks will automatically turn off after 1.5 hours of inactivity. (Inactivity means not interacting with the google colab browser tab)* Your google colab notebooks will automatically turn off after 12 hours of work. (Maximum allowed run time) Mount your Google Drive > **Note1**: you can only upload 750 GBs per day to google drive. uploads more than that amount result in a 1 day upload ban on your account.> **Note2**: however if you hit 750GB limit while uploading, your currently uplading file will not be interrupted.Example: you have uploaded 749GBs. you will queue a 4GB file for uploading. your 4GB file will get uploaded then you will receive the 1 day upload ban.
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
**Important note:**when setting `DownloadPath`, the directory for your main personal google drive is: `drive/MyDrive` if you have a shared drive you would like to use as download path then the path is: `drive/Shareddrives/YourDriveName` Signup at [ngrok.io](https://ngrok.com) and save your auth token in the field below for later use. (double click to edit)**Token:** ```save your token here```
###Code
#@markdown <h4>⬅️ Click here to START server</h4>
#@markdown <br><center><img src='https://i.ibb.co/SKGZS75/DOoxSuO.png' height="50" alt="netdata"/></center>
#@markdown <center><h3>SimpleTorrent is a a self-hosted remote torrent client.</h3></center><br>
import os
import time
import pathlib
import urllib.request
from IPython.display import clear_output
# script version
Version = '1.0'
#####################################
USE_FREE_TOKEN = False
NgrokTOKEN = "Enter Your ngrok Token Here" # @param {type:"string"}
DownloadPath = "drive/Shareddrives/YourDriveName/" #@param {type:"string"}
HOME = os.path.expanduser("~")
if not os.path.exists(f"{HOME}/.ipython/ttmg.py"):
hCode = "https://raw.githubusercontent.com/Pavithran-R/" \
"Colab-SimpleTorrent/master/res/ttmg.py"
urllib.request.urlretrieve(hCode, f"{HOME}/.ipython/ttmg.py")
from ttmg import (
runSh,
findProcess,
loadingAn,
updateCheck,
ngrok
)
# making enviroment for simple-torrent
pathlib.Path('downloads').mkdir(mode=0o777, exist_ok=True)
pathlib.Path('torrents').mkdir(mode=0o777, exist_ok=True)
configPath = pathlib.Path('cloud-torrent.json')
configsdata = r"""
{{
"AutoStart": true,
"EngineDebug": false,
"MuteEngineLog": true,
"ObfsPreferred": true,
"ObfsRequirePreferred": false,
"DisableTrackers": false,
"DisableIPv6": false,
"DownloadDirectory": "{}",
"WatchDirectory": "torrents/",
"EnableUpload": true,
"EnableSeeding": true,
"IncomingPort": 50007,
"DoneCmd": "{}/doneCMD.sh",
"SeedRatio": 10,
"UploadRate": "Unlimited",
"DownloadRate": "Unlimited",
"TrackerListURL": "https://trackerslist.com/all.txt",
"AlwaysAddTrackers": true,
"ProxyURL": ""
}}
""".format(DownloadPath,HOME)
with open(configPath, "w+") as configFile:
configFile.write(configsdata)
#####################################
if updateCheck("Checking updates ...", Version): # VERSION CHECKING ...
!kill -9 -1 &
clear_output()
# Simple Torrent installing ...
loadingAn()
if os.path.isfile("/usr/local/bin/cloud-torrent") is False:
dcmd = "wget -qq https://raw.githubusercontent.com/Pavithran-R/" \
"Colab-SimpleTorrent/master/res/scripts/" \
"simpleCloudInstaller.sh -O ./simpleCloudInstaller.sh"
runSh(dcmd)
runSh('bash ./simpleCloudInstaller.sh')
runSh('rm -rf ./simpleCloudInstaller.sh')
#Opening cloud-torrent in background
if not findProcess("cloud-torrent", "cloud-torrent"):
PORT = 4444
try:
urllib.request.urlopen(f"http://localhost:{PORT}")
except:
cmdC = f'cloud-torrent --port {PORT} ' \
'-t "SimpleTorrent" ' \
'-c cloud-torrent.json ' \
'--host 0.0.0.0 --disable-log-time ' \
'&'
for run in range(10):
runSh(cmdC, shell=True)
time.sleep(3)
try:
urllib.request.urlopen(f"http://localhost:{PORT}")
break
except:
print("Error: Simple-Torrent not starting. Retrying ...")
# START_SERVER
clear_output()
Server = ngrok(
NgrokTOKEN, USE_FREE_TOKEN, [['simple-torrent', 4444, 'http'],
['peerflix-server', 4445, 'http']], 'us',
[f"{HOME}/.ngrok2/ngrok01.yml", 4040]
).start('simple-torrent')
###Output
_____no_output_____
###Markdown
**Important Notes:**
* **`VERY IMPORTANT:`** FIRST OF ALL SAVE A COPY OF THIS NOTEBOOK USING THE FILE MENU IN THE TOP LEFT SIDE OF YOUR SCREEN. YOU CAN NOT EDIT OR SAVE THIS NOTEBOOK. I OWN IT!
MAKE A COPY, OPEN YOUR OWN COPY AND THEN FOLLOW THE REST OF THE INSTRUCTIONS.
* Your google colab notebooks will automatically turn off after 1.5 hours of inactivity. (Inactivity means not interacting with the google colab browser tab)
* Your google colab notebooks will automatically turn off after 12 hours of work. (Maximum allowed run time)
Mount your Google Drive
> **Note1**: you can only upload 750 GBs per day to google drive. uploads more than that amount result in a 1 day upload ban on your account.
> **Note2**: however if you hit 750GB limit while uploading, your currently uplading file will not be interrupted.
Example: you have uploaded 749GBs. you will queue a 4GB file for uploading. your 4GB file will get uploaded then you will receive the 1 day upload ban.
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
**Important note:**
when setting `DownloadPath`, the directory for your main personal google drive is: `drive/MyDrive`
if you have a shared drive you would like to use as download path then the path is: `drive/Shareddrives/YourDriveName` Signup at [ngrok.io](https://ngrok.com) and save your auth token in the field below for later use. (double click to edit)**Token:** ```save your token here```
###Code
#@markdown <h4>⬅️ Click here to START server</h4>
#@markdown <br><center><img src='https://i.ibb.co/SKGZS75/DOoxSuO.png' height="50" alt="netdata"/></center>
#@markdown <center><h3>SimpleTorrent is a a self-hosted remote torrent client.</h3></center><br>
import os
import time
import pathlib
import urllib.request
from IPython.display import clear_output
# script version
Version = '1.0'
#####################################
USE_FREE_TOKEN = False
NgrokTOKEN = "Enter Your ngrok Token Here" # @param {type:"string"}
DownloadPath = "drive/Shareddrives/YourDriveName/" #@param {type:"string"}
HOME = os.path.expanduser("~")
if not os.path.exists(f"{HOME}/.ipython/ttmg.py"):
hCode = "https://raw.githubusercontent.com/Pavithran-R/" \
"Colab-SimpleTorrent/master/res/ttmg.py"
urllib.request.urlretrieve(hCode, f"{HOME}/.ipython/ttmg.py")
from ttmg import (
runSh,
findProcess,
loadingAn,
updateCheck,
ngrok
)
# making enviroment for simple-torrent
pathlib.Path('downloads').mkdir(mode=0o777, exist_ok=True)
pathlib.Path('torrents').mkdir(mode=0o777, exist_ok=True)
configPath = pathlib.Path('cloud-torrent.json')
configsdata = r"""
{{
"AutoStart": true,
"EngineDebug": false,
"MuteEngineLog": true,
"ObfsPreferred": true,
"ObfsRequirePreferred": false,
"DisableTrackers": false,
"DisableIPv6": false,
"DownloadDirectory": "{}",
"WatchDirectory": "torrents/",
"EnableUpload": true,
"EnableSeeding": true,
"IncomingPort": 50007,
"DoneCmd": "{}/doneCMD.sh",
"SeedRatio": 10,
"UploadRate": "Unlimited",
"DownloadRate": "Unlimited",
"TrackerListURL": "https://trackerslist.com/all.txt",
"AlwaysAddTrackers": true,
"ProxyURL": ""
}}
""".format(DownloadPath,HOME)
with open(configPath, "w+") as configFile:
configFile.write(configsdata)
#####################################
if updateCheck("Checking updates ...", Version): # VERSION CHECKING ...
!kill -9 -1 &
clear_output()
# Simple Torrent installing ...
loadingAn()
if os.path.isfile("/usr/local/bin/cloud-torrent") is False:
dcmd = "wget -qq https://raw.githubusercontent.com/Pavithran-R/" \
"Colab-SimpleTorrent/master/res/scripts/" \
"simpleCloudInstaller.sh -O ./simpleCloudInstaller.sh"
runSh(dcmd)
runSh('bash ./simpleCloudInstaller.sh')
runSh('rm -rf ./simpleCloudInstaller.sh')
#Opening cloud-torrent in background
if not findProcess("cloud-torrent", "cloud-torrent"):
PORT = 4444
try:
urllib.request.urlopen(f"http://localhost:{PORT}")
except:
cmdC = f'cloud-torrent --port {PORT} ' \
'-t "SimpleTorrent" ' \
'-c cloud-torrent.json ' \
'--host 0.0.0.0 --disable-log-time ' \
'&'
for run in range(10):
runSh(cmdC, shell=True)
time.sleep(3)
try:
urllib.request.urlopen(f"http://localhost:{PORT}")
break
except:
print("Error: Simple-Torrent not starting. Retrying ...")
# START_SERVER
clear_output()
Server = ngrok(
NgrokTOKEN, USE_FREE_TOKEN, [['simple-torrent', 4444, 'http'],
['peerflix-server', 4445, 'http']], 'us',
[f"{HOME}/.ngrok2/ngrok01.yml", 4040]
).start('simple-torrent')
###Output
_____no_output_____ |
BiofilmTwoDModel/BiofilmTwoDsolverTutorial.ipynb | ###Markdown
Biofilm 2D solver class tutorial**Maintainer: Brendan Harding**\**Initial development: July 2020**\**Last updated: August 2020**This notebook acts as a brief tutorial on using the Biofilm 2D solver class contained in ```BiofilmTwoDLubricationClass.py```.The class implements solvers for the biofilm model described in: - *A Thin-Film Lubrication Model for Biofilm Expansion Under Strong Adhesion*,\A. Tam, B. Harding, J.E.F. Green, S. Balasuriya, and B.J. Binder,\To be submitted soon, 2020. which builds upon the model developed by Alex Tam in his PhD thesis: - *Mathematical Modelling of Pattern Formation in Yeast Biofilms*,\Alex Tam,\The University of Adelaide, 2019.For details of the equations solved by the class one should refer to the aforementioned paper. Note that the 2D solver does take quite a bit longer than the 1D solver to produce solutions (and the time taken for various methods implemented within this class also varies greatly). Now, onto the tutorial, the following cell will load a few standard Python libraries, set a couple of plotting parameters, and import the solver class. Note: if you don't have latex on your system you should change the ```usetex=True``` option to ```usetex=False``` (or just comment out this line with a at the front).
###Code
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rc('text',usetex=True)
plt.rc('font',size=14)
from BiofilmTwoDLubricationClass import BiofilmTwoDLubricationModel
from BiofilmTwoDPlottingHelper import *
###Output
_____no_output_____
###Markdown
Accessing documentationThe class itself contains some quite a bit of documentation (although is far from complete). You can print the entire documentation using ```help(BiofilmTwoDLubricationModel)```. You'll see a some documentation for the entire class, then a list of available methods and their arguments along with a brief description for each.The documentation for a specific class method can also be printed on its own using ```help(BiofilmTwoDLubricationModel.solve)``` for example.(The class also contains a large number of *private* methods, but are not shown in the help as it is not expected that the typical user should call them directly. More advanced users can look directly at the class code to learn about these.)
###Code
help(BiofilmTwoDLubricationModel)
###Output
Help on class BiofilmTwoDLubricationModel in module BiofilmTwoDLubricationClass:
class BiofilmTwoDLubricationModel(builtins.object)
| BiofilmTwoDLubricationModel(R=2.0, dr=0.0078125, nxi=33, dt=None, params=None, solver='DCN', verbose=False)
|
| Helper class for solving the PDEs describing the development of
| a radially symmetric and thin yeast biofilm over time.
| The model/system that is solved includes the biofilm height,
| the cell concentration, and the nutrient concentrations in both
| the biofilm and the substrate.
|
| Methods defined here:
|
| __init__(self, R=2.0, dr=0.0078125, nxi=33, dt=None, params=None, solver='DCN', verbose=False)
| Initialise the class
|
| With no arguments a default problem set up is initialised.
|
| Optionally you may pass the following:
| R: The radius of the domain (or petri dish). If not specified
| a default value of 2 is used.
| dr: The grid spacing used for the discretisation of the domain.
| If not specified a default value of 0.5**7 is used.
| dt: The time step size, if not specified 0.25*dr is used.
| params: Parameters for the system of equations. These should
| be passed as a dictionary. Any which are not specified will
| be set to a default value (specifically corresponding to
| Table 6.1 in Alex's thesis).
| solver: specify which solver to use.
| verbose: Set to True to output convergence information when solving
|
| get_Phi_n(self)
| Returns the current cumulative cell volume fraction Phi_n (=int_0^{h xi} phi_n dz).
| (Note this is given with respect to the re-scaled coordinates r,xi.)
|
| get_dt(self)
| Get the current time step size (dt) which is used by default
| (i.e. if dt is not specified when solve is called then this value is used)
|
| get_g_b(self)
| Returns the biofilm nutrient concentration g_b.
|
| get_g_s(self)
| Returns the substrate nutrient concentration g_s.
|
| get_h(self)
| Returns the current biofilm height h.
|
| get_parameters(self, param=None)
| Get the current problem parameters.
| If a specific parameter is not requested
| then all are returned in a dictionary.
|
| get_phi_n(self)
| Returns the current cell volume fraction phi_n.
| (Note this is given with respect to the re-scaled coordinates r,xi.)
|
| get_phi_n_bar(self)
| Returns the vertically averaged cell volume fraction bar{phi_n} =(1/h) int_0^{h} phi_n dz.
| (Note this is given with respect to the re-scaled coordinates r,xi.)
|
| get_r(self)
| Returns the array for the radial coordinates.
|
| get_t(self)
| Get the current solution time T.
|
| get_xi(self)
| Returns the array for the radial coordinates.
|
| set_Phi_n(self, Phi_n)
| Update the cumulative cell volume fraction Phi_n (=int_0^{h xi} phi_n dz).
| For example, use this to set the initial condition.
| (Note this over-writes the current solution in the class.)
| It is expected that Phi_n be provided in the re-scaled coordinates r,xi.
| Accepts a callable function Phi_n(r,xi), or an array (with correct shape).
|
| set_dt(self, dt)
| Set/change the time step size (dt) which is used by default
| (i.e. if dt is not specified when solve is called then this value is used)
|
| set_g_b(self, g_b)
| Update the biofilm nutrient concentration g_b.
| For example, use this to set the initial condition.
| (Note this over-writes the current solution in the class)
| Accepts a callable function g_b(r), or an array (with correct length).
|
| set_g_s(self, g_s)
| Update the substrate nutrient concentration g_s.
| For example, use this to set the initial condition.
| (Note this over-writes the current solution in the class)
| Accepts a callable function g_s(r), or an array (with correct length).
|
| set_h(self, h)
| Update the biofilm height h.
| For example, use this to set the initial condition.
| (Note this over-writes the current solution in the class.)
| Accepts a callable function h(r), or an array (with correct length).
| Note: This will not alter Phi_n=int_0^{h xi} phi_n dz. If it is desired that this
| too be changed it should be done separately via set_Phi_n or set_phi_n.
|
| set_parameters(self, params)
| Set the current problem parameters.
| Parameters should be passed using a dictionary.
|
| set_phi_n(self, phi_n)
| Update the cell volume fraction phi_n.
| For example, use this to set the initial condition.
| (Note this over-writes the current solution in the class.)
| It is expected that phi_n be provided in re-scaled coordinates r,xi.
| Accepts a callable function phi_n(r,xi), or an array (with correct length).
| Note: This internally updates Phi_n=\int_0^{h xi} phi_n dz using the existing h.
| If h is also to be updated, it should be done first!
|
| set_solver(self, solver)
| Set/change the solver used by the class
|
| set_t(self, t)
| Set/change the current solution time t.
|
| set_verbosity(self, verbose)
| Set the verbosity for the solvers (True or False).
|
| solve(self, T, dt=None)
| Solve the biofilm evolution for a duration T (from the current time)
|
| Optional: dt can be provided to override that stored internally.
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Getting startedOkay, now let's get started shall we.The following cell initialises an instance of the class using a default setup (no arguments).We then fetch and plot the initial conditions so you can see how to do this from the interface.Initial conditions can be changed using the corresponding ```set_...``` method, e.g. ```set_g_s```.For each of these you can either pass a function which the clas will then sample on an appropriate grid/array, or you can pass an array directly (although it must be the same length as the ```r``` variable within the class).
###Code
# Initialise the class using all internal defaults
BLM_2D = BiofilmTwoDLubricationModel()
# Fetch the initial conditions
r = BLM_2D.get_r() # 1D array of r values
xi = BLM_2D.get_xi() # 1D array of xi values (xi is the vertical coordinates when 'stretched' to a rectangle)
R,XI = np.meshgrid(r,xi) # 2D arrays of the r and xi values
h = BLM_2D.get_h()
Phi_n = BLM_2D.get_Phi_n() # This is the internal Phi_n field which is solved for
phi_n = BLM_2D.get_phi_n() # This is phi_n derived from Phi_n
phi_n_bar = BLM_2D.get_phi_n_bar() # This is the average of phi_n with respect to z, and is derived from Phi_n
g_s = BLM_2D.get_g_s()
g_b = BLM_2D.get_g_b()
# Plot the initial conditions of the 1D fields, include a legend
fig,[ax1,ax2] = Plot1DFields(r,h,phi_n_bar,g_s,g_b)
plt.show()
# Plot the initial condition of the 2D Phi_n field (in both the 'physical' and 'stretched' domains)
fig,axes = Plot2DField(R,XI,h,Phi_n,r'$\Phi_n$')
plt.show()
# Plot the initial condition of the 2D phi_n field (in both the 'physical' and 'stretched' domains)
fig,axes = Plot2DField(R,XI,h,Phi_n,r'$\phi_n$')
plt.show()
###Output
_____no_output_____
###Markdown
Getting and setting parametersYou can get and set the parameters for the class using the ```get_parameters``` and ```set_parameters``` methods.If ```get_parameters()``` is called with no arguments it returns all the parameters as a dictionary. Alternatively specific parameters can be fetched by passing their name as a string, e.g. ```get_parameters('Pe')```.To use ```set_parameters``` you must pass a dictionary of the parameters you wish to set. E.g. to set $\mathrm{Pe}=10$ and $\Upsilon=5$ you would call ```set_parameters({'Pe':10,'Upsilon':5})```. You need only include those parameters you wish to change. Alternatively, the dictionary returned by ```get_parameters()``` can also be edited directly (it is a reference rather than a copy), although I advise against this approach.Note: there are a couple of parameters which cannot be changed, ```R``` and ```dr``` in particular. If, for some reason, you wanted to change these, the best thing to do is create a new instance of a class where the desired ```R``` and ```dr``` must be specified during initialisation. You then need to manually reset other parameters and initial conditions as needed.Here we will change the slip parameter $\lambda^{\ast}$ to a finite number, say $100$.Here I also set the pre-cursor thickness to a very small number, then update the initial condition accordingly.
###Code
params = BLM_2D.get_parameters()
print(params)
BLM_2D.set_parameters({'lambda_ast':100.0,'b':1.0E-9})
print(BLM_2D.get_parameters())
BLM_2D.set_h(params['b']+(params['H0']-params['b'])*(r<1)*(1-r**2)**4)
###Output
{'b': 0.0001, 'H0': 0.1, 'Psi_m': 0.111, 'Psi_d': 0.0, 'gamma_ast': 1.0, 'D': 1.05, 'Pe': 3.94, 'Upsilon': 3.15, 'Q_b': 8.65, 'Q_s': 2.09, 'h_ast': 0.002, 'lambda_ast': inf}
{'b': 1e-09, 'H0': 0.1, 'Psi_m': 0.111, 'Psi_d': 0.0, 'gamma_ast': 1.0, 'D': 1.05, 'Pe': 3.94, 'Upsilon': 3.15, 'Q_b': 8.65, 'Q_s': 2.09, 'h_ast': 0.002, 'lambda_ast': 100.0}
###Markdown
SolvingOkay, now let's run the (default) solver for a duration of $T=2$ units and plot the result.Note that the call to solve returns the solutions at the end (in the order $h,\Phi_n,g_s,g_b$).Calling to solve again will continue to evolve the solution for the specified period of time from the current solution.Beware: Currently if you evolve so long that the biofilm reaches the right hand wall then the solver will most likely fail. (This will be fixed at some point in the future.)
###Code
# Solve for 2 units in time (this may take several minutes)
solution = BLM_2D.solve(2.0)
# Plot the solutions
fig,[ax1,ax2] = Plot1DFields(r,solution[0],BLM_2D.get_phi_n_bar(),solution[2],solution[3])
plt.show()
###Output
_____no_output_____
###Markdown
More complex use caseOkay, now let's re-initialise the class on a larger domain, solve over several time periods, plotting the solutions as we go.Note: The increased domain size, longer simulation time means this may take many hours to complete using the default solver. It may be advisable to run this on a remote compute server (e.g. cloud or HPC resource).
###Code
# Initialise the class
BLM_2D = BiofilmTwoDLubricationModel(R=10.0,dr=0.5**6,nxi=17,dt=0.5**7,params={'lambda_ast':100.0,'b':1.0E-9})
# Fetch a copy of the domain and initial conditions
r = BLM_2D.get_r()
xi = BLM_2D.get_xi()
R,XI = np.meshgrid(r,xi)
h = BLM_2D.get_h()
Phi_n = BLM_2D.get_Phi_n()
phi_n = BLM_2D.get_phi_n()
phi_n_bar = BLM_2D.get_phi_n_bar()
g_s = BLM_2D.get_g_s()
g_b = BLM_2D.get_g_b()
# Plot the initial conditions of the 1D fields, include a legend
fig,[ax1,ax2] = Plot1DFields(r,h,phi_n_bar,g_s,g_b)
plt.show()
# Optionally plot the initial condition of the 2D Phi_n field (in both the 'physical' and 'stretched' domains)
if False:
fig,axes = Plot2DField(R,XI,h,Phi_n,r'$\Phi_n$')
plt.show()
# Optionally plot the initial condition of the 2D phi_n field (in both the 'physical' and 'stretched' domains)
if True:
fig,axes = Plot2DField(R,XI,h,Phi_n,r'$\phi_n$')
plt.show()
# Set some parameters for evolveing a longer simulation
dT = 5.0
nT = 10
results = [[h.copy(),Phi_n.copy(),g_s.copy(),g_b.copy()]] # store copy of initial condition
# Now evolve, recording results and plotting after each dT step
for k in range(nT):
timestamp = datetime.now().strftime("%H:%M:%S %d/%m/%Y")
print("Solving for step {:03d} of {:03d} (with dT={:g}) (time stamp: {:s})".format(k+1,nT,dT,timestamp))
solution = BLM_2D.solve(dT)
h = solution[0]
Phi_n = solution[1]
g_s = solution[2]
g_b = solution[3]
results.append([h.copy(),Phi_n.copy(),g_s.copy(),g_b.copy()]) # ensure copies are recorded
phi_n_bar = BLM_2D.get_phi_n_bar()
fig,[ax1,ax2] = Plot1DFields(r,h,phi_n_bar,g_s,g_b)
plt.show()
if False: # optionally plot the Phi_n field:
fig,axes = Plot2DField(R,XI,h,Phi_n,r'$\Phi_n$')
plt.show()
if True: # optionally plot the phi_n field
phi_n = BLM_2D.get_phi_n()
fig,axes = Plot2DField(R,XI,h,phi_n,r'$\phi_n$')
plt.show()
###Output
_____no_output_____
###Markdown
Alternative plot of resultsThe following cell takes all of the results computed in the cell above, stored in the list ```results```, and plots them analogous to figure 6.4 in Alex Tam's thesis (albeit we have included slip in this case).Note: these can be saved by calling ```plt.savefig(...)``` with suitable arguments immediately before ```plt.show()```.
###Code
# Plot all of the h solutions together...
plt.plot(r,results[0][0],'k--',lw=1)
for i in range(1,len(results)):
plt.plot(r,results[i][0],'k-',lw=1)
plt.xlabel(r'$r$',labelpad=0)
plt.ylabel(r'$h$',rotation=0,labelpad=10)
plt.show()
# Plot all of the phi_n solutions together...
plt.plot(r,results[0][1][-1,:]/results[0][0],'k--',lw=1)
for i in range(1,len(results)):
phi_n_bar = results[i][1][-1,:]/results[i][0]
if True: # optional smoothing, helps smooth out erroneous peaks at edge
phi_n_bar[1:-1] = 0.25*(phi_n_bar[2:]+2*phi_n_bar[1:-1]+phi_n_bar[:-2])
plt.plot(r,phi_n_bar,'k-',lw=1)
plt.xlabel(r'$r$',labelpad=0)
plt.ylabel(r'$\bar{\phi}_n$',rotation=0,labelpad=10)
plt.ylim(-0.05,1.05)
plt.show()
# Plot all of the g_s solutions together...
plt.plot(r,results[0][2],'k--',lw=1)
for i in range(1,len(results)):
plt.plot(r,results[i][2],'k-',lw=1)
plt.xlabel(r'$r$',labelpad=0)
plt.ylabel(r'$g_s$',rotation=0,labelpad=10)
plt.ylim(-0.05,1.05)
plt.show()
# Plot all of the g_b solutions together...
plt.plot(r,results[0][3],'k--',lw=1)
for i in range(1,len(results)):
plt.plot(r,results[i][3],'k-',lw=1)
plt.xlabel(r'$r$',labelpad=0)
plt.ylabel(r'$g_b$',rotation=0,labelpad=10)
plt.ylim(-0.05,1.05)
plt.show()
###Output
_____no_output_____ |
Analysis and Prediction of Default of Credit Card Clients Dataset.ipynb | ###Markdown
分析並預測信用卡用戶違約資料====================2005年台灣信用卡違約用戶資料分析。由 Kaggle 所提供之資料,[Default Payments of Credit Card Clients in Taiwan from 2005](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset),其中有30,000筆台灣信用卡用戶的用戶資料以及違約情形,資料格式包含了性別、教育程度、信用卡額度、年齡等基本資料,以及2005年4月到9月付款狀況、信用卡帳務、還款金額等資料。本研究除了針對用戶的分佈資料進行分析之外,還利用信用卡用戶的付款狀況、信用卡帳務、還款金額等資料建立模型分析並預測用戶是否會違約。模型是利用機器學習的 KMean Cluster 結合 Linear Probability Model 統計模型完成,先利用 KMean Cluster 將用戶依照信用卡付款狀況進行分類,之後再將分類完的分群進行利用 Linear Probability Model 計算該群體的違約機率,利用此方法可以將原本整體違約機率約22%的全體用戶,分成違約機率 10% 至 78% 共 19 群的群體。最後再依分群完的機率經過設定的機率閥值換算後,準確率可達 81.15% 。與 Kaggle 上其他模型的 82% 相近。
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import random
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
pd.options.display.max_columns = 35
pd.options.display.max_rows = 100
rawdata = pd.read_csv('UCI_Credit_Card.csv', index_col='ID')
print('Shape of Raw Data:', rawdata.shape)
print('一共30,000筆資料,含是否違約合計24個變數')
rawdata.rename(columns={
'PAY_0':'PAY_Sept', 'PAY_2':'PAY_Aug',
'PAY_3':'PAY_Jul', 'PAY_4':'PAY_Jun',
'PAY_5':'PAY_May', 'PAY_6':'PAY_Apr',
'BILL_AMT1':'BILL_AMT_Sept','BILL_AMT2':'BILL_AMT_Aug',
'BILL_AMT3':'BILL_AMT_Jul', 'BILL_AMT4':'BILL_AMT_Jun',
'BILL_AMT5':'BILL_AMT_May', 'BILL_AMT6':'BILL_AMT_Apr',
'PAY_AMT1':'PAY_AMT_Sept','PAY_AMT2':'PAY_AMT_Aug',
'PAY_AMT3':'PAY_AMT_Jul', 'PAY_AMT4':'PAY_AMT_Jun',
'PAY_AMT5':'PAY_AMT_May', 'PAY_AMT6':'PAY_AMT_Apr',
'default.payment.next.month':'is_default'
}, inplace=True)
repay_status = rawdata[['PAY_Sept','PAY_Aug','PAY_Jul','PAY_Jun','PAY_May','PAY_Apr']]
bill_statement = rawdata[['BILL_AMT_Sept','BILL_AMT_Aug','BILL_AMT_Jul',
'BILL_AMT_Jun','BILL_AMT_May','BILL_AMT_Apr',]]
prev_payment = rawdata[['PAY_AMT_Sept','PAY_AMT_Aug','PAY_AMT_Jul',
'PAY_AMT_Jun','PAY_AMT_May','PAY_AMT_Apr']]
rawdata.head(5)
###Output
Shape of Raw Data: (30000, 24)
一共30,000筆資料,含是否違約合計24個變數
###Markdown
is_default 檢查違約人數及比例
###Code
is_default = rawdata['is_default']
show_default = pd.concat([is_default.value_counts(),
is_default.value_counts(normalize=True)],
axis=1)
show_default.columns = ['人數', '百分比']
show_default.index = ['正常', '違約']
print('整體違約人數及比例:')
show_default
###Output
整體違約人數及比例:
###Markdown
1. Demographic Factors 人口因素 Limit Balance 信用額度
###Code
# Limit Balance 信用額度
limit_bal = rawdata.LIMIT_BAL
print('信用額度之敘述統計資料:')
print(limit_bal.describe().round())
%matplotlib inline
fig, ax = plt.subplots(figsize=(15,7))
# 總體信用額度分佈情形
n, bins, patches = plt.hist(limit_bal, bins=200)
ax.text(50000,3365,'($50,000, 3365)')
ax.text(200000,1528,'($200,000, 1528)')
ax.text(360000,881,'($360,000, 881)')
ax.text(500000,722,'($500,000, 722)')
ax.text(930000,50,'($1,000,000, max)')
ax.text(167484,2500,'Average: $167484')
# 違約用戶之信用額度分佈情形
n, bins, patches = plt.hist(limit_bal[is_default==1], bins=200)
# 用紅線畫出平均數
plt.axvline(x=167484.0, color='red')
plt.xlabel
plt.legend(['Average Limit Balance of All Clients',
'All Clients',
'Default Clients'])
plt.title('Histogram of Limit Balance', fontsize=20)
plt.ylabel('Clients')
plt.xlabel('NT Dollars')
plt.show()
###Output
信用額度之敘述統計資料:
count 30000.0
mean 167484.0
std 129748.0
min 10000.0
25% 50000.0
50% 140000.0
75% 240000.0
max 1000000.0
Name: LIMIT_BAL, dtype: float64
###Markdown
可以看到一個滿有趣的現象,在1. (\$50,000, 3365)2. (\$200,000, 1528)3. (\$360,000, 881)4. (\$500,000, 722)這幾個點的時候比例特別的多,應該是有什麼原因,也許是有些門檻之類的,之後可以好好探討,也許把這幾個點的資料拉出來看,可能倒帳的機率有比較低?因為不會倒帳所以核的特別多? Gender 性別資料
###Code
gender_map = {1:'Male', 2:'Female'}
gender = rawdata.SEX.map(gender_map)
default_rate_by_gender = gender[is_default==1].value_counts() / \
gender.value_counts()
gender_stats = pd.concat([gender.value_counts(),
gender.value_counts(normalize=True),
gender[is_default==1].value_counts(),
default_rate_by_gender], axis=1)
gender_stats.columns = ['人數', '人數比例', '違約人數', '違約率']
print('性別資料:')
gender_stats
###Output
性別資料:
###Markdown
可以看到整體資料中男女性別比例大約是,女性60%、男性40%。而男女的違約比例上,男性違約率24.16%高於女性的20.78%。
###Code
# Gender 繪圖
fig, ax = plt.subplots(figsize=(10,5))
ax.text(0,18300,'18,112')
ax.text(0,3000,'3,763 (20.8%)')
ax.text(1,12300,'11,888')
ax.text(1,2200,'2,873 (24.2%)')
plt.bar(gender.value_counts().index,
gender.value_counts())
plt.bar(gender[is_default==1].value_counts().index,
gender[is_default==1].value_counts())
plt.legend(['All Clients',
'Default Clients'])
plt.title('Default Clients by Gender', fontsize=20)
plt.ylabel('Clients')
plt.show()
###Output
_____no_output_____
###Markdown
Education 學歷
###Code
edu_map = {1:'Graduate school', 2:'University', 3:'High school', 4:'Others',
5:'Unknown', 6:'Unknown'}
education = rawdata.EDUCATION.map(edu_map)
default_rate_by_education = education[is_default==1].value_counts() / \
education.value_counts()
education_stats = pd.concat([education.value_counts(),
education.value_counts(normalize=True),
education[is_default==1].value_counts(),
default_rate_by_education], axis=1)
education_stats.columns = ['人數', '人數比例', '違約人數', '違約率']
print('學歷資料:')
education_stats
fig, ax = plt.subplots(figsize=(10,5))
ax.text(0,14030,'14,030')
ax.text(0,3330,'3,330 (23.7%)')
ax.text(1,10585,'10,585')
ax.text(1,2036,'2,036 (19.2%)')
ax.text(2,4917,'4,917')
ax.text(2,1237,'1,237 (25.1%)')
plt.bar(education.value_counts().index,
education.value_counts())
plt.bar(education[is_default==1].value_counts().index,
education[is_default==1].value_counts())
plt.legend(['All Clients',
'Default Clients'])
plt.title('Default Clients by Education', fontsize=20)
plt.ylabel('Clients')
plt.show()
###Output
_____no_output_____
###Markdown
Marriage 婚姻狀況
###Code
marri_map = {1:'Married', 2:'Single', 3:'Others'}
marriage = rawdata.MARRIAGE.map(marri_map)
default_rate_by_marriage = marriage[is_default==1].value_counts() / \
marriage.value_counts()
marriage_stats = pd.concat([marriage.value_counts(),
marriage.value_counts(normalize=True),
marriage[is_default==1].value_counts(),
default_rate_by_marriage], axis=1)
marriage_stats.columns = ['人數', '人數比例', '違約人數', '違約率']
print('婚姻狀況資料:')
marriage_stats
fig, ax = plt.subplots(figsize=(10,5))
ax.text(0,15964,'15,964')
ax.text(0,3341,'3,341(20.9%)')
ax.text(1,13659,'13,659')
ax.text(1,3206,'3,206 (23.5%)')
plt.bar(marriage.value_counts().index,
marriage.value_counts())
plt.bar(marriage[is_default==1].value_counts().index,
marriage[is_default==1].value_counts())
plt.legend(['All Clients',
'Default Clients'])
plt.title('Default Clients by Marriage', fontsize=20)
plt.ylabel('Clients')
plt.show()
###Output
_____no_output_____
###Markdown
Age 年齡
###Code
age = rawdata.AGE
age_bins = [20, 25, 30, 35, 40, 45, 50, 55, 60, np.Inf]
age_map = {
pd.Interval(20.0, 25.0, closed='right'):'20-25',
pd.Interval(25.0, 30.0, closed='right'):'26-30',
pd.Interval(30.0, 35.0, closed='right'):'31-35',
pd.Interval(35.0, 40.0, closed='right'):'36-40',
pd.Interval(40.0, 45.0, closed='right'):'41-45',
pd.Interval(45.0, 50.0, closed='right'):'46-50',
pd.Interval(50.0, 55.0, closed='right'):'51-55',
pd.Interval(55.0, 60.0, closed='right'):'56-60',
pd.Interval(60.0, np.Inf, closed='right'):'60-'}
age = age.map(age_map)
age.value_counts()
default_rate_by_age = age[is_default==1].value_counts() / \
age.value_counts()
age_stats = pd.concat([age.value_counts(),
age.value_counts(normalize=True),
age[is_default==1].value_counts(),
default_rate_by_age], axis=1)
age_stats.columns = ['人數', '人數比例', '違約人數', '違約率']
print('年齡資料:')
age_stats.sort_index()
# Age
age = rawdata.AGE
fig, ax = plt.subplots(figsize=(15,7))
ax.text(35,1400,'Average age: 35')
n, bins, patches = plt.hist(age, bins=200)
n, bins, patches = plt.hist(age[is_default==1], bins=200)
# 用紅線畫出平均數
plt.axvline(x=35, color='red')
plt.legend(['Average Age',
'All Clients',
'Default Clients'])
plt.title('Default Clients by Marriage', fontsize=20)
plt.ylabel('Clients')
plt.show()
###Output
_____no_output_____
###Markdown
2. Bill Statement 信用卡帳務 信用卡帳務資料異常 信用卡帳務資料異常,全部都為 0 或是為 負值信用卡帳務應該要為正或是零以上的值,出現負值所代表的意義與原因還需要再探討,可能是因為帳務記錄出錯或是有其他的涵意;而全部為零很可能是該用戶並未使用該信用卡,在沒有資料的情形下很難去預測該用戶是否違約。
###Code
# 信用卡帳務資料異常,全部都為 0 或是為 負值
abnormal_bill_record = bill_statement.loc[(bill_statement<=0).all(axis=1)]
print('檢視信用卡帳務資料異常:')
abnormal_bill_record.head(10)
abnormal_default = is_default[abnormal_bill_record.index]
abnormal_default_show = pd.concat([abnormal_default.value_counts(),
abnormal_default.value_counts(normalize=True)], axis=1)
abnormal_default_show.columns = ['人數', '百分比']
abnormal_default_show.index = ['正常', '違約']
print('帳務資料異常之違約人數及違約率:')
abnormal_default_show
###Output
帳務資料異常之違約人數及違約率:
###Markdown
信用卡帳務資料異常之違約率為35.9%,比先前計算的整體用戶違約率22.12%來得高,但是取樣不同無法確認是統計上的誤差導致,還是確實帳務資料異常之違約率會比較高,需要進行統計檢定才能得知。 檢查信用卡帳務資料異常用戶的違約率是否與全體用戶有顯著地不同這裡使用 Linear Probability Model 來檢查是否顯著,將帳務資料異常用戶標記為1,正常的標記為0的dummy variable,對是否違約進行迴歸,如果該dummy variable有顯著的係數的話,即表示帳務資料異常用戶的違約率與全體用戶不同。
###Code
# 建立信用卡帳務資料異常用戶的 dummy variable
rawdata['is_bill_abnormal'] = 0
rawdata.loc[abnormal_bill_record.index, 'is_bill_abnormal'] = 1
# 利用 OLS 檢驗如果帳務異常(帳務記錄,bill statement, 皆為 0 或負數時),違約比例是
# 否有所不同。這邊將有帳務記錄異常的以 dummy variable 的方式記為 1,否則記為 0 。
model = sm.OLS(rawdata.is_default,
sm.add_constant(rawdata.is_bill_abnormal))
result = model.fit()
result.summary2()
###Output
_____no_output_____
###Markdown
帳務異常(is_bill_abnormal)的係數為0.1426,且十分顯著,表示有帳務異常的違約比例會較正常的高出14.26%,也就是帳務異常的比例為 21.66% + 14.26% = 35.92%,並且有統計上的支持。 3. Repay Status 還款狀態 結合 KMean Cluster 機器學習法與 Linear Probability Model 統計模型計算不同分群違約率- 經過一些資料探勘後發現,還款狀態的分群結果對於客戶的違約率有很強的解釋力,因此這邊利用KMean Cluster方法,依客戶四月到九月的還款狀態將客戶分群,分群之後利用Linear Probability Model即可計算出各客群的違約率,並且檢視分群結果是否顯著。- 之後利用Linear Probability Model的調整後R平方來選擇何KMean Cluster需要分成幾群 利用機器學習的 KMean Cluster 方法將客戶依還款狀態分群
###Code
# 將客戶依還款狀態分為 10 群(n_clusters=10)
n_clusters = 10
kmean_model = KMeans(n_clusters=n_clusters, random_state=1).fit(repay_status)
cluster_label = kmean_model.predict(repay_status)
# KMean Cluster Label Data
cluster_label = pd.Series(cluster_label)
cluster_label.index = is_default.index
# 觀察 KMean Cluster 分群人數
cluster_counts = cluster_label.value_counts().sort_index()
cluster_counts.index = ['cluster_'+str(i) for i in cluster_counts.index]
cluster_counts = pd.DataFrame(cluster_counts)
cluster_counts.columns = ['人數']
print('KMean Cluster 分群人數:')
cluster_counts
###Output
KMean Cluster 分群人數:
###Markdown
將分群後的結果以 Linear Probability Model 計算各分群違約率
###Code
# 將 KMean Cluster Label 為 dummy variable,用於計算每群違約率
cluster_dummy = pd.get_dummies(cluster_label, prefix='cluster')
cluster_dummy = cluster_dummy.join(is_default)
# Linear Probability Model
model = sm.OLS(cluster_dummy.is_default,
sm.add_constant(cluster_dummy.iloc[:,:-2]))
result = model.fit()
result.summary2()
###Output
_____no_output_____
###Markdown
可以看到 cluster_0 到 cluster_8 的係數相較於 const(cluster_9) 都有顯著的不同,也就是每群的違約率都有所不同,將各個係數加上 const 之後即是各個分群的違約率。 ex. cluster_5 客群的違約率為 0.1960 + 0.4886 = 68.46% ,而 cluster_9 違約率為 19.6%。模型中的調整後R平方為0.131,隨著分群數的增加應可以讓調整後R平方提高。
###Code
cluster_ols_params = result.params
cluster_default_rate = (cluster_ols_params[1:]+cluster_ols_params[0]).append(
pd.Series(cluster_ols_params[0],
index=['cluster_'+str(len(cluster_ols_params)-1)]))
cluster_default_rate = pd.DataFrame(cluster_default_rate)
cluster_default_rate.columns = ['違約率']
cluster_default_rate.join(cluster_counts)
###Output
_____no_output_____
###Markdown
先利用 KMean Cluster 將客戶分群後,再利用 Linear Probability Model 即可迅速得到各分群的違約率,並且同時檢驗各分群是否顯著。這邊可以看到 cluster_0 與 cluster_2 的違約率最低,只有約14%。而 cluster_5 與 cluster_8 的違約率最高,有將近 7 成的違約率。 選擇分群數量先前分群時是直接選定分 10 群,調整後R平方為0.131,但分群數量會使Linear Probability Model的調整後R平方上升,也就是解釋力的增強,但是分群分太細也可能導致Linear Probability Model的參數過多使得解釋力下降,因此這邊將檢驗分群數從 3 至 50 的結果,依照調整後R平方,與不顯著的係數數量來決定最後選擇的分群數量。
###Code
def loop_n_cluster(n_clusters):
kmean_model = KMeans(n_clusters=n_clusters, random_state=1).fit(repay_status)
cluster_label = kmean_model.predict(repay_status)
# KMean Cluster Label Data
cluster_label = pd.Series(cluster_label)
cluster_label.index = is_default.index
# KMean Cluster Dummy Data
cluster_dummy = pd.get_dummies(cluster_label, prefix='cluster')
cluster_dummy = cluster_dummy.join(is_default)
# Linear Probability Model
model = sm.OLS(cluster_dummy.is_default,
sm.add_constant(cluster_dummy.iloc[:,:-2]))
result = model.fit()
# 回傳 調整後R平方值 與 P_value 大於 0.05 的係數數量
return result.rsquared_adj, (result.pvalues>0.05).value_counts()[False]
# 檢驗分群數量從 2至 50 群的結果
cluster_n_choose = pd.DataFrame(columns=['分群數量', '調整後R2',
'不顯著係數數量'])
for n in range(2,51):
rsq, significant = loop_n_cluster(n)
cluster_n_choose = cluster_n_choose.append(
pd.DataFrame({'分群數量':[n],
'調整後R2':[rsq],
'不顯著係數數量':[n-significant]}))
cluster_n_choose.set_index('分群數量', inplace=True)
# 繪出各分群下的 調整後R2 與 不顯著係數數量
fig, ax = plt.subplots(figsize=(10,10))
plt.subplot(211)
plt.plot(cluster_n_choose['調整後R2'])
ax.text(19,0.1,'Number of Cluster = 19')
plt.title('Adjusted $R^2$')
plt.axvline(x=19, color='red')
plt.xlabel('Number of Clusters')
plt.subplot(212)
plt.plot(cluster_n_choose['不顯著係數數量'])
plt.axvline(x=19, color='red')
plt.title('Number of Insignificant Coefficients')
plt.xlabel('Number of Clusters')
plt.show()
###Output
_____no_output_____
###Markdown
可以看到分群數量在 19 群之後,調整後R平方增加的速度變緩,而不顯著的係數數量開始迅速上升,因此這裡選擇分 19 群作為 KMean Cluster 的超參數。 n_cluster = 19
###Code
# 將客戶依還款狀態分為 10 群(n_clusters=10)
n_clusters = 19
kmean_model = KMeans(n_clusters=n_clusters, random_state=1).fit(repay_status)
cluster_label = kmean_model.predict(repay_status)
# KMean Cluster Label Data
cluster_label = pd.Series(cluster_label)
cluster_label.index = is_default.index
# 觀察 KMean Cluster 分群人數
cluster_counts = cluster_label.value_counts().sort_index()
cluster_counts.index = ['cluster_'+str(i) for i in cluster_counts.index]
cluster_counts = pd.DataFrame(cluster_counts)
cluster_counts.columns = ['人數']
# 將 KMean Cluster Label 為 dummy variable,用於計算每群違約率
cluster_dummy = pd.get_dummies(cluster_label, prefix='cluster')
cluster_dummy = cluster_dummy.join(is_default)
# Linear Probability Model
model = sm.OLS(cluster_dummy.is_default,
sm.add_constant(cluster_dummy.iloc[:,:-2]))
result = model.fit()
result.summary2()
cluster_ols_params = result.params
cluster_default_rate = (cluster_ols_params[1:]+cluster_ols_params[0]).append(
pd.Series(cluster_ols_params[0],
index=['cluster_'+str(len(cluster_ols_params)-1)]))
cluster_default_rate = pd.DataFrame(cluster_default_rate)
cluster_default_rate.columns = ['違約率']
print('n_cluster=19之違約率與該群人數:')
cluster_default_rate.join(cluster_counts)
###Output
n_cluster=19之違約率與該群人數:
###Markdown
設定違約率閥值(Critical Probability)計算模型準確率假設一給定的違約機率閥值,如果該用戶分群後的違約機率超過該閥值,則設定該用戶會違約,根據此規則計算整體的的準確率。
###Code
default_rate_map = cluster_default_rate
default_rate_map.index = list(range(n_clusters))
cluster_simul = cluster_label.map(default_rate_map.iloc[:,0])
cluster_simul = pd.DataFrame(cluster_simul).join(is_default)
cluster_simul.rename(columns={0:'model_prob'}, inplace=True)
crit_prob = np.arange(0.1,1.0,0.01)
for crit in crit_prob:
cluster_simul[str(round(crit,2))] = \
cluster_simul['model_prob'].apply(lambda default_prob:
1 if default_prob>crit else 0)
model_accuracy = [accuracy_score(cluster_simul.is_default, cluster_simul[c])
for c in cluster_simul.columns[2:]]
model_accuracy = pd.Series(model_accuracy)
model_accuracy.index = crit_prob
fig, ax = plt.subplots(figsize=(10,5))
plt.plot(model_accuracy)
ax.text(0.45, 0.7, 'Critical Probability = 45% ~ 56% \n Max Accuracy = 81.15%')
plt.axvline(x=0.45, color='red')
plt.title('Model Accuracy of Critical Probability')
plt.xlabel('Critical Probability')
plt.ylabel('Accuracy')
plt.show()
###Output
_____no_output_____ |
Scrapping_Oddschecker/Oddschecker.ipynb | ###Markdown
This Notebook is superseded by the series of .py files
###Code
import pandas as pd
import numpy as np
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as soup
import string
import itertools
from datetime import datetime, timedelta
import operator
url = 'https://www.oddschecker.com/'
country_code = ['UK','IRE','USA','AUS']
def get_soup(base_url, sport = 'horses', event_url = None):
'''Uses beautiful soup to get parse the url
base_url = str, www.oddschecker.com/
sport = str, which sport do you want to look at
event_url = str, of the url extension which will take you to the '''
if sport == 'horses':
sport = 'horse-racing'
url = base_url + sport
if event_url != None:
url += event_url
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
return soup(webpage, "html.parser")
page_soup = get_soup(url)
print(type(page_soup))
def get_races(bsoup, country_codes, sport = 'horses'):
'''Will return a dictionary of the events displayed on www.oddschecker.com
Only does horse_racing atm.
dict structure = events[countrycode][venue][list of event times]
bsoup = the page parse with beautifulsoup4
country_codes = countries you want to get events for
sport = the sport you want'''
events = {code:{} for code in country_codes}
# website has both todays and tomorrows races on it. Need to only get todays races
# this returns two objects as UK and International races are in different sections
today = bsoup.findAll('div', {'data-day' : 'today'})
for i in range(len(today)):
result = today[i].findAll('div', {'class' : 'race-details'})
containers = result if i == 0 else containers + result
for container in containers:
txt = container.find('div', {'class' : 'venue-details'}).text
for code in country_codes:
# extract country code and venue
if code in txt[:3]:
cc = code
venue = txt.replace(code, '')
break
# get event times
events[cc][venue] = {} # dictionary for event times
times = [x.text for x in container.findAll('div', {'class' : 'racing-time'})]
for t in times:
# convert to datetime
d_time_now = datetime.combine(datetime.today(),datetime.strptime(t, '%H:%M').time())
# have a datetime 5 hours before the race as a marker to start collecting data
start_data_collection = d_time_now - timedelta(hours=5)
# print(f'd_time_now = {d_time_now} , start_data_collection = {start_data_collection}')
events[cc][venue][t] = start_data_collection
return events
events = get_races(page_soup, country_code)
class race():
def __init__(self,base_url, sport, cc, venue, time):
'''have a race as a class which we can add horse classes to.'''
self.url = base_url
self.sport = sport
self.cc = cc
self.venue = venue
self.time = time
# this returns that the day is 1/1/1990 need to make it today
self.datetime = datetime.combine(datetime.today(),datetime.strptime(self.time, '%H:%M').time())
print(self.venue)
print(self.time)
self.url_ext = '/' + self.venue.replace(' ','-') + '/' + self.time + '/' + 'winner'
# soup the url
soup = get_soup(self.url, self.sport, event_url = self.url_ext)
# Get race data in a dictionary. THIS METHOD DOES"T MATCH UP THE TITLE OF THE TYPE TO THE VALUES
race_info_container = soup.find('div', {'class':'content-right'}).findAll('li')
self.race_info = {x.text.split(':')[0] : x.text.split(':')[1] for x in race_info_container}
# These containers are the rows in the table on the url
containers = soup.findAll('tr', {'class' : 'diff-row evTabRow bc'})
# init horse class
self.horses = [horse(container) for container in containers]
self.rank_horses()
def __str__(self):
return f'{self.venue}, {self.cc} at {self.time}'
def get_current_odds(self):
'''Will update the odds in the horses class'''
# soup the url
soup = get_soup(base_url = self.url, sport = self.sport, event_url = self.url_ext)
for horse in self.horses:
#this should find the row for the horse we want
container = soup.findAll('tr', {'data-bname': horse.name})
if len(container) != 1 :
return 'Error - more than one row with horse name found - fix the bug'
horse.update_odds(container[0])
def rank_horses(self):
'''Orders the horses based on the value of their latest odds to find the favourite.'''
win_prob = [(h.name , h.latest_prob.values[0]) for h in self.horses]
win_prob.sort(key=operator.itemgetter(1), reverse = True)
# This just orders the horse objects in the list, need to assign ranks to the horse (with time stamp)
# And to some object associated with the race?
class horse():
def __init__(self, container):
'''Creates a horse object. Will initialise the dataframe to contain the odds data '''
try:
self.name = container.find('a', {'class' : 'popup selTxt'}).text
except:
self.name = container.find('a', {'class' : 'popup selTxt has-tip'}).text
# this also contains jockey form, need to seperate if we are going to use
self.jockey = container.find('div' ,{'class' :'bottom-row jockey'}).text
# Get the odds
odds = self.get_odds(container)
#start a dataframe of the odds
self.odds = pd.DataFrame(odds,columns = [datetime.now().replace(second = 0, microsecond=0)])
self.latest_odds = self.odds
self.stats = pd.DataFrame(self.get_stats())
def __str__(self):
return f'{self.name} ridden by {self.jockey}'
def get_odds(self, container):
'''returns a list of the odds for the horse
the container needs to be the row in the main table with the odds info in it.'''
odds = container.findAll('p') # these come as strings of fractional odds
odds_list = []
for odd in odds:
if '/' in odd.text:
numbers = odd.text.split('/')
new_odd = float(numbers[0]) / float(numbers[1]) + 1.0
elif odd.text == 'SP':
new_odd = None
else:
new_odd = float(odd.text) + 1.0
odds_list.append(new_odd)
return odds_list
def get_stats(self):
'''Return some basic stats for the horses odds at a certain time'''
mean = self.latest_odds.mean()
std = self.latest_odds.std()
maxx = self.latest_odds.max()
minn = self.latest_odds.min()
self.latest_prob = 1 / mean # use this to try and order the horses and give them a rank.
return pd.Series( (self.latest_prob, mean,std,maxx,minn), index = ['win_prob','mean','std','max','min'],
name = datetime.now().replace(second = 0, microsecond=0))
def update_odds(self, container):
'''Appends another column of raw odds and stats to their respective dataframes'''
self.latest_odds = pd.Series(self.get_odds(container),
name = datetime.now().replace(second = 0, microsecond=0) )
self.odds = pd.concat([self.odds, self.latest_odds], axis = 1)
self.stats = pd.concat([self.stats, self.get_stats()], axis = 1)
events.keys()
events['UK'].keys()
# This would be the loop structure required to access all the points in the event dict
# Run this cell to init the objects
for (cc,v) in events.items():
for venue, times in v.items():
for time in times:
x = race(url,'horses', cc, venue, time)
break
break
break
x.race_info
x.race_info
for hor in x.horses:
print(f'Name: {hor.name} , proability of win: {hor.latest_prob.values}')
for (cc,v) in events.items():
for venue, times in v.items():
for time in times:
x.get_current_odds()
# set some sort of pause statement here depending on how long we want between requests
break
break
break
x.datetime
x.horses[0].odds
# Cell to run
# 1. get days races
# 2. Have a while loop running every X number of minutes until after the last race of the day
# 3. check time against time race starts, once it is Y number of hours before the start.
#Start collecting odds data. Odds data will get appended every X minutes as the while loop runs round
# 4. re calculate odds stats (need to see what those are as haven't read the paper fully)
# 5. Highlight if we should bet based on betting strategy
# 6. Stop grabbing odds data once race has started.
###Output
_____no_output_____ |
Amazon_Fine_Food_Reviews.ipynb | ###Markdown
Amazon Fine Food Reviews AnalysisData Source: https://www.kaggle.com/snap/amazon-fine-food-reviews The Amazon Fine Food Reviews dataset consists of reviews of fine foods from Amazon.Number of reviews: 568,454Number of users: 256,059Number of products: 74,258Timespan: Oct 1999 - Oct 2012Number of Attributes/Columns in data: 10 Attribute Information:1. Id2. ProductId - unique identifier for the product3. UserId - unqiue identifier for the user4. ProfileName5. HelpfulnessNumerator - number of users who found the review helpful6. HelpfulnessDenominator - number of users who indicated whether they found the review helpful or not7. Score - rating between 1 and 58. Time - timestamp for the review9. Summary - brief summary of the review10. Text - text of the review Objective:Given a *review*, **determine** whether the review is **positive** (Rating of 4 or 5) or **negative** (rating of 1 or 2).[Q] How to determine if a review is positive or negative? [Ans] We could use the Score/Rating. A rating of 4 or 5 could be cosnidered a positive review. A review of 1 or 2 could be considered negative. A review of 3 is nuetral and ignored. This is an approximate and proxy way of determining the polarity (positivity/negativity) of a review.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sqlite3
from google.colab import drive
drive.mount('/content/gdrive')
from IPython.display import YouTubeVideo, Image
plt.style.use('fivethirtyeight')
%matplotlib inline
###Output
Mounted at /content/gdrive
###Markdown
Loading the dataSince I chose to work on google colab I will import the dataset directly from kaggle.
###Code
!pip install kaggle
from google.colab import files
# Saving kaggle.json
# files.upload()
!mkdir -p ~/.kaggle
!cp kaggle.json ~/.kaggle/
# change the permission
!chmod 600 ~/.kaggle/kaggle.json
!kaggle datasets download -d snap/amazon-fine-food-reviews
from zipfile import ZipFile
file_name = "amazon-fine-food-reviews.zip"
with ZipFile(file_name, 'r') as zp:
zp.extractall()
print('done')
###Output
done
###Markdown
The dataset is available in two forms1. .csv file2. SQLite DatabaseIn order to load the data, We have used the SQLITE dataset as it easier to query the data and visualise the data efficiently. Here as we only want to get the global sentiment of the recommendations (positive or negative), we will purposefully ignore all Scores equal to 3. If the score id above 3, then the recommendation wil be set to "positive". Otherwise, it will be set to "negative". [1]. Reading Datausing the SQLite Table to read data.
###Code
con = sqlite3.connect('/content/database.sqlite')
###Output
_____no_output_____
###Markdown
Filtering only positive and negative reviews i.e. not taking into consideration those reviews with Score=3
###Code
filtered_data = pd.read_sql_query("""select * from Reviews where Score != 3""", con)
# changing reviews with score less the 3 to be negative(0) and more than 3
# to be positive(1)
filtered_data.loc[:, 'Score'] = filtered_data.loc[:, 'Score'].map(lambda x: 1 if x > 3 else 0)
print(f"Number of datapoints: {filtered_data.shape}")
filtered_data.head()
###Output
Number of datapoints: (525814, 10)
###Markdown
[2] Data Cleaning: DeduplicationNow let's try to see if our dataset contains any duplicate entries
###Code
display = pd.read_sql("""
select ProductId, UserId, ProfileName, Score, Time,
Text, count(*) as deduplication_cnt from Reviews
group by UserId having deduplication_cnt > 1
""", con)
print(display.shape)
display.head()
display['deduplication_cnt'].sum()
###Output
_____no_output_____
###Markdown
It is observed (as shown in the table below) that the reviews data had many duplicate entries. Hence it was necessary to remove duplicates in order to get unbiased results for the analysis of the data. Following is an example:
###Code
display= pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND UserId="AR5J8UI46CURR"
ORDER BY ProductID
""", con)
display
###Output
_____no_output_____
###Markdown
As can be seen above the same user has multiple reviews of the with the same values for HelpfulnessNumerator, HelpfulnessDenominator, Score, Time, Summary and Text and on doing analysis it was found that ProductId=B000HDOPZG was Loacker Quadratini Vanilla Wafer Cookies, 8.82-Ounce Packages (Pack of 8) ProductId=B000HDL1RQ was Loacker Quadratini Lemon Wafer Cookies, 8.82-Ounce Packages (Pack of 8) and so onIt was inferred after analysis that reviews with same parameters other than ProductId belonged to the same product just having different flavour or quantity. Hence in order to reduce redundancy it was decided to eliminate the rows having same parameters.The method used for the same was that we first sort the data according to ProductId and then just keep the first similar product review and delelte the others. for eg. in the above just the review for ProductId=B000HDL1RQ remains. This method ensures that there is only one representative for each product and deduplication without sorting would lead to possibility of different representatives still existing for the same product.
###Code
# sorting data according to productID ascending order
sorted_data = filtered_data.sort_values(by='ProductId',ascending=True,
axis=0, inplace=False,
kind='quicksort', na_position='last')
#Deduplication of entries
final = sorted_data.drop_duplicates(subset={
"UserId","ProfileName","Time","Text"
}, keep='first', inplace=False)
final.shape
#Checking to see how much % of data still remains
final.shape[0] / filtered_data.shape[0] *100.0
###Output
_____no_output_____
###Markdown
We can see that we have lost more than **30% of our data.** Let's continue our data cleaning, knowing that this step is based on common sense and there is no basic rule that must be taken to succeed in this task. we could check for example if the value of HelpfulnessNumerator is greater than HelpfulnessDenominator which is not practically possible.HelpfulnessDenominator = HelpfulnessNumerator + NO
###Code
display= pd.read_sql_query("""
SELECT *
FROM Reviews
WHERE Score != 3 AND
HelpfulnessNumerator > HelpfulnessDenominator
ORDER BY ProductID
""", con)
display
# Let's remove them
final = final[final.HelpfulnessNumerator <= final.HelpfulnessDenominator]
final.shape
###Output
_____no_output_____
###Markdown
Let's check how many positive and negative reviews are present in our dataset
###Code
np.round(final.loc[:,'Score'].value_counts(normalize=True), 3) * 100
###Output
_____no_output_____
###Markdown
We see that we have an *imbalanced* dataset positive values > negative values 84:16 Now our data is ready for **Text processing**, before moving, let's pickle it for later use
###Code
import pickle
final.to_pickle('data_before_possecing.pkl')
###Output
_____no_output_____
###Markdown
[3]. Text Preprocessing.Now that we have finished deduplication our data requires some preprocessing before we go on further with analysis and making the prediction model.Hence in the Preprocessing phase we do the following in the order below:-1. Begin by removing the html tags2. Remove any punctuations or limited set of special characters like , or . or etc.3. Check if the word is made up of english letters and is not alpha-numeric4. Check to see if the length of the word is greater than 2 (as it was researched that there is no adjective in 2-letters)5. Convert the word to lowercase6. Remove Stopwords7. Finally Snowball Stemming the word (it was obsereved to be better than Porter Stemming)After which we collect the words used to describe positive and negative reviews.Our goal is to go from a **raw text** to a **vector** form so that we can apply the full **power of linear algebra**.So the text pre-processing phase will allow us to make the data more cleaner.
###Code
# let's print some random review
sent_0 = final.loc[:,'Text'].values[0]
print(sent_0)
print("="*50)
sent_1 = final.loc[:,'Text'].values[1]
print(sent_1)
print("="*50)
sent_500 = final.loc[:,'Text'].values[500]
print(sent_500)
print("="*50)
sent_1000 = final.loc[:,'Text'].values[1000]
print(sent_1000)
print("="*50)
sent_1500 = final.loc[:,'Text'].values[1500]
print(sent_1500)
print("="*50)
sent_4900 = final.loc[:,'Text'].values[4900]
print(sent_4900)
print("="*50)
###Output
this witty little book makes my son laugh at loud. i recite it in the car as we're driving along and he always can sing the refrain. he's learned about whales, India, drooping roses: i love all the new words this book introduces and the silliness of it all. this is a classic book i am willing to bet my son will STILL be able to recite from memory when he is in college
==================================================
I grew up reading these Sendak books, and watching the Really Rosie movie that incorporates them, and love them. My son loves them too. I do however, miss the hard cover version. The paperbacks seem kind of flimsy and it takes two hands to keep the pages open.
==================================================
As many other reviewers have suggested, the best way to use this trap is NOT to bury it, just simply tamp the ground over the tunnel and make 2 slots for the jaws of the trap. It never misses, when it trips, you have one less critter ruining your lawn!
==================================================
I was really looking forward to these pods based on the reviews. Starbucks is good, but I prefer bolder taste.... imagine my surprise when I ordered 2 boxes - both were expired! One expired back in 2005 for gosh sakes. I admit that Amazon agreed to credit me for cost plus part of shipping, but geez, 2 years expired!!! I'm hoping to find local San Diego area shoppe that carries pods so that I can try something different than starbucks.
==================================================
Great ingredients although, chicken should have been 1st rather than chicken broth, the only thing I do not think belongs in it is Canola oil. Canola or rapeseed is not someting a dog would ever find in nature and if it did find rapeseed in nature and eat it, it would poison them. Today's Food industries have convinced the masses that Canola oil is a safe and even better oil than olive or virgin coconut, facts though say otherwise. Until the late 70's it was poisonous until they figured out a way to fix that. I still like it but it could be better.
==================================================
Can't do sugar. Have tried scores of SF Syrups. NONE of them can touch the excellence of this product.<br /><br />Thick, delicious. Perfect. 3 ingredients: Water, Maltitol, Natural Maple Flavor. PERIOD. No chemicals. No garbage.<br /><br />Have numerous friends & family members hooked on this stuff. My husband & son, who do NOT like "sugar free" prefer this over major label regular syrup.<br /><br />I use this as my SWEETENER in baking: cheesecakes, white brownies, muffins, pumpkin pies, etc... Unbelievably delicious...<br /><br />Can you tell I like it? :)
==================================================
###Markdown
We can easily see that our data contains numbers, punctuation, html tags ... For that we will need to import the re library of python which allows to execute operations on regular expressions (Regex) as well as the beautifulsoup library which allows you to parse html documents.
###Code
import re
import bs4 as bs
###Output
_____no_output_____
###Markdown
Before continuing on the whole document, let's do some demonstrations
###Code
string = "Why is this $[...] when the same product is available for \
$[...] here?<br />http://www.amazon.com/VICTOR-FLY-MAGNET-BAIT-REFILL/dp/B00004RBDY<br /><br />The Victor \
M380 and M502 traps are unreal, of course -- total fly genocide. Pretty stinky, but only right nearby."
# remove urls from text
print(string)
print('=='*50)
pattern = re.compile(r'https?\S+')
string = re.sub(pattern, '', string)
print(string)
# remove html tags
soup = bs.BeautifulSoup(string, 'lxml')
text = soup.get_text()
print(string)
print('=='*50)
print(text)
###Output
Why is this $[...] when the same product is available for $[...] here?<br /> /><br />The Victor M380 and M502 traps are unreal, of course -- total fly genocide. Pretty stinky, but only right nearby.
====================================================================================================
Why is this $[...] when the same product is available for $[...] here? />The Victor M380 and M502 traps are unreal, of course -- total fly genocide. Pretty stinky, but only right nearby.
###Markdown
The English language has a couple of contractions. For instance:you've -> you havehe's -> he isThese can sometimes cause headache when you are doing natural language processing.
###Code
# let's make a function that clarify it
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
string_ = "Hey, I'm sorry; but these reviews do nobody any good beyond reminding us to look before ordering."
print(string_)
string_ = decontracted(string_)
print(string)
#remove words with numbers
print(string)
print('==' * 50)
string = re.sub("\S*\d\S*", "", string).strip()
print(string)
# remove special caracteres
print(string)
print('=='*50)
string = re.sub('[^A-Za-z0-9]+', ' ', string)
print(string)
# https://gist.github.com/sebleier/554280
# we are removing the words from the stop words list: 'no', 'nor', 'not'
# <br /><br /> ==> after the above steps, we are getting "br br"
# we are including them into stop words list
# instead of <br /> if we have <br/> these tags would have revmoved in the 1st step
stopwords= set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't"])
###Output
_____no_output_____
###Markdown
Now let's combine all the above:
###Code
# Combining all the above stundents
from tqdm import tqdm
preprocessed_reviews = []
# tqdm is for printing the status bar
import bs4 as bs
import re
for sentance in tqdm(final['Text'].values):
sentance = re.sub(r"http\S+", "", sentance)
sentance = bs.BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
# https://gist.github.com/sebleier/554280
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in stopwords)
preprocessed_reviews.append(sentance.strip())
# We capture our own data in a new dataframe and save it in pickle
# form for later use. Note that we can be satisfied with the preprocessed_reviews list.
data_clean = pd.DataFrame(preprocessed_reviews)
data_clean = data_clean.rename(columns={0:'Text'})
# let's pickle it
data_clean.to_pickle('corpus.pkl')
data_clean.loc[:, 'Text'].values[52]
###Output
_____no_output_____
###Markdown
[4] FeaturizationNow that we have our a collection of text **corpus** let's convert it to vectors. [4.1] BAG OF WORDSBoW's technique consists of creating a lexical dictionary containing a set of all the words in the reviews.
###Code
YouTubeVideo(id="IKgBLTeQQL8", width=950, height=450)
###Output
_____no_output_____
###Markdown
Document-Term MatrixFor many of the techniques we'll be using in future notebooks, the text must be tokenized, meaning broken down into smaller pieces. The most common tokenization technique is to break down text into words. We can do this using scikit-learn's CountVectorizer, where every row will represent a different document and every column will represent a different word.
###Code
# We are going to create a document-term matrix using CountVectorizer,
# and exclude common English stop words
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
data_cv = cv.fit_transform(data_clean.Text)
print("the type of count vectorizer ",type(data_cv))
###Output
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
###Markdown
We see that our return object is a sparse matrix. What is a sparse matrix?
###Code
YouTubeVideo(id="4MoSrMkWovM", width=950, height=450)
print("the shape of out text BOW vectorizer ", data_cv.get_shape())
print("the number of unique words ", data_cv.get_shape()[1])
print("some feature names ", cv.get_feature_names()[:10])
# let's pickle it
pickle.dump(cv, open("cv.pkl", "wb"))
###Output
_____no_output_____
###Markdown
What is Tokenization?
###Code
YouTubeVideo(id="6ZVf1jnEKGI", width=950, height=450)
###Output
_____no_output_____
###Markdown
What is stemming and lemmatization
###Code
YouTubeVideo(id="JpxCt3kvbLk", width=950, height=450)
###Output
_____no_output_____
###Markdown
[4.2] Bi-Grams and n-Grams
###Code
YouTubeVideo(id="GiyMGBuu45w", width=950, height=450)
#bi-gram, tri-gram and n-gram
#removing stop words like "not" should be avoided before building n-grams
# count_vect = CountVectorizer(ngram_range=(1,2))
# please do read the CountVectorizer documentation http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
# you can choose these numebrs min_df=10, max_features=5000, of your choice
count_vect = CountVectorizer(ngram_range=(1,2), min_df=10, max_features=5000)
final_bigram_counts = cv.fit_transform(data_clean.Text)
print("the type of count vectorizer ",type(final_bigram_counts))
print("the shape of out text BOW vectorizer ",final_bigram_counts.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_bigram_counts.get_shape()[1])
###Output
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text BOW vectorizer (364171, 116756)
the number of unique words including both unigrams and bigrams 116756
###Markdown
[4.3] TF-IDF
###Code
YouTubeVideo(id="D2V1okCEsiE", width=950, height=450)
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
tf_idf_vect = TfidfVectorizer(ngram_range=(1,2), min_df=10)
tf_idf_vect.fit(data_clean.Text)
print("some sample features(unique words in the corpus)",tf_idf_vect.get_feature_names()[0:10])
print('='*50)
final_tf_idf = tf_idf_vect.transform(data_clean.Text)
print("the type of count vectorizer ",type(final_tf_idf))
print("the shape of out text TFIDF vectorizer ",final_tf_idf.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_tf_idf.get_shape()[1])
###Output
some sample features(unique words in the corpus) ['aa', 'aaa', 'aaaaa', 'aaah', 'aafco', 'ab', 'aback', 'abandon', 'abandoned', 'abbey']
==================================================
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text TFIDF vectorizer (364171, 203034)
the number of unique words including both unigrams and bigrams 203034
###Markdown
[4.4] Word2Vec
###Code
YouTubeVideo(id="Otde6VGvhWM", width=950, height=450)
###Output
_____no_output_____
###Markdown
Train your own Word2Vec model using your own text corpus
###Code
i=0
list_of_sentance=[]
for sentance in preprocessed_reviews:
list_of_sentance.append(sentance.split())
###Output
_____no_output_____
###Markdown
Using Google News Word2VectorsA pretrained model by googleits 3.3G file, once you load this into your memory it occupies ~9Gb, so please do this step only if you have >12G of ramTo use this code-snippet, download "GoogleNews-vectors-negative300.bin" from https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/editit's 1.9GB in size.
###Code
# s_your_ram_gt_16g=False
# want_to_use_google_w2v = False
# want_to_train_w2v = True
# if want_to_use_google_w2v and is_your_ram_gt_16g:
# if os.path.isfile('GoogleNews-vectors-negative300.bin'):
# w2v_model=KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
# print(w2v_model.wv.most_similar('great'))
# print(w2v_model.wv.most_similar('worst'))
# else:
# print("you don't have google's word2vec file, keep want_to_train_w2v = True, to train your own w2v ")
# if want_to_train_w2v:
# min_count = 5 considers only words that occured atleast 5 times
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
w2v_model=Word2Vec(list_of_sentance,min_count=5,size=50, workers=4)
print(w2v_model.wv.most_similar('great'))
print('='*50)
print(w2v_model.wv.most_similar('worst'))
w2v_words = list(w2v_model.wv.vocab)
print("number of words that occured minimum 5 times ",len(w2v_words))
print("sample words ", w2v_words[0:50])
###Output
number of words that occured minimum 5 times 33573
sample words ['witty', 'little', 'book', 'makes', 'son', 'laugh', 'loud', 'recite', 'car', 'driving', 'along', 'always', 'sing', 'refrain', 'learned', 'whales', 'india', 'drooping', 'roses', 'love', 'new', 'words', 'introduces', 'silliness', 'classic', 'willing', 'bet', 'still', 'able', 'memory', 'college', 'grew', 'reading', 'sendak', 'books', 'watching', 'really', 'rosie', 'movie', 'incorporates', 'loves', 'however', 'miss', 'hard', 'cover', 'version', 'seem', 'kind', 'flimsy', 'takes']
###Markdown
[4.4.1] Converting text into vectors using wAvg W2V, TFIDF-W2V [4.4.1.1] Avg W2v
###Code
# average Word2Vec
# compute average word2vec for each review.
sent_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors.append(sent_vec)
print(len(sent_vectors))
print(len(sent_vectors[0]))
###Output
100%|██████████| 364171/364171 [16:29<00:00, 367.86it/s]
###Markdown
[4.4.1.2] TFIDF weighted W2v
###Code
# S = ["abc def pqr", "def def def abc", "pqr pqr def"]
model = TfidfVectorizer()
model.fit(preprocessed_reviews)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary = dict(zip(model.get_feature_names(), list(model.idf_)))
# TF-IDF weighted Word2Vec
tfidf_feat = model.get_feature_names() # tfidf words/col-names
# final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf
tfidf_sent_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
# tf_idf = tf_idf_matrix[row, tfidf_feat.index(word)]
# to reduce the computation we are
# dictionary[word] = idf value of word in whole courpus
# sent.count(word) = tf valeus of word in this review
tf_idf = dictionary[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors.append(sent_vec)
row += 1
###Output
100%|██████████| 364171/364171 [4:28:57<00:00, 22.57it/s]
|
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 4_ APIs/Clases no script/M4C4 - Obteniendo la discografía.ipynb | ###Markdown
Módulo 4: APIs SpotifyEn este módulo utilizaremos APIs para obtener información sobre artistas, discos y tracks disponibles en Spotify. Pero primero.. ¿Qué es una **API**?Por sus siglas en inglés, una API es una interfaz para programar aplicaciones (*Application Programming Interface*). Es decir que es un conjunto de funciones, métodos, reglas y definiciones que nos permitirán desarrollar aplicaciones (en este caso un scraper) que se comuniquen con los servidores de Spotify. Las APIs son diseñadas y desarrolladas por las empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que utilicen sus servicios. Spotify tiene APIs públicas y bien documentadas que estaremos usando en el desarrollo de este proyecto. RESTUn término se seguramente te vas a encontrar cuando estés buscando información en internet es **REST** o *RESTful*. Significa *representational state transfer* y si una API es REST o RESTful, implica que respeta unos determinados principios de arquitectura, como por ejemplo un protocolo de comunicación cliente/servidor (que será HTTP) y (entre otras cosas) un conjunto de operaciones definidas que conocemos como **métodos**. Ya veníamos usando el método GET para hacer solicitudes a servidores web. DocumentaciónComo mencioné antes, las APIs son diseñadas por las mismas empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que consuman sus servicios o información. Es por eso que la forma de utilizar las APIs variará dependiendo del servicio que querramos consumir. No es lo mismo utilizar las APIs de Spotify que las APIs de Twitter. Por esta razón es de suma importancia leer la documentación disponible, generalmente en la sección de desarrolladores de cada sitio. Te dejo el [link a la de Spotify](https://developer.spotify.com/documentation/) JSONJson significa *JavaScript Object Notation* y es un formato para describir objetos que ganó tanta popularidad en su uso que ahora se lo considera independiente del lenguaje. De hecho, lo utilizaremos en este proyecto por más que estemos trabajando en Python, porque es la forma en la que obtendremos las respuestas a las solicitudes que realicemos utilizando las APIs. Para nosotros, no será ni más ni menos que un diccionario con algunas particularidades que iremos viendo a lo largo del curso. Links útiles para la clase:- [Documentación de Spotify - Artistas](https://developer.spotify.com/documentation/web-api/reference/artists/)- [Iron Maiden en Spotify](https://open.spotify.com/artist/6mdiAmATAx73kdxrNrnlao)
###Code
import requests
id_im = '6mdiAmATAx73kdxrNrnlao'
url_base = 'https://api.spotify.com/v1'
ep_artist = '/artists/{artist_id}'
url_base+ep_artist.format(artist_id=id_im)
r = requests.get(url_base+ep_artist.format(artist_id=id_im))
r.status_code
r.json()
token_url = 'https://accounts.spotify.com/api/token'
params = {'grant_type': 'client_credentials'}
headers = {'Authorization': 'Basic NDRiN2IzNmVjMTQ1NDY3ZjlhOWVlYWY3ZTQxN2NmOGI6N2I0YWE3YTBlZjQ4NDQwNDhhYjFkMjI0MzBhMWViMWY='}
r = requests.post(token_url, data=params, headers=headers)
r.status_code
r.json()
token = r.json()['access_token']
token
header = {"Authorization": "Bearer {}".format(token)}
r = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header)
r.status_code
r.json()
url_busqueda = 'https://api.spotify.com/v1/search'
search_params = {'q': "Iron+Maiden", 'type':'artist', 'market':'AR'}
busqueda = requests.get(url_busqueda, headers=header, params=search_params)
busqueda.status_code
busqueda.json()
import pandas as pd
df = pd.DataFrame(busqueda.json()['artists']['items'])
df.head()
df.sort_values(by='popularity', ascending=False).iloc[0]['id']
import base64
def get_token(client_id, client_secret):
encoded = base64.b64encode(bytes(client_id+':'+client_secret, 'utf-8'))
params = {'grant_type':'client_credentials'}
header={'Authorization': 'Basic ' + str(encoded, 'utf-8')}
r = requests.post('https://accounts.spotify.com/api/token', headers=header, data=params)
if r.status_code != 200:
print('Error en la request.', r.json())
return None
print('Token válido por {} segundos.'.format(r.json()['expires_in']))
return r.json()['access_token']
client_id = '44b7b36ec145467f9a9eeaf7e417cf8b'
client_secret = '7b4aa7a0ef4844048ab1d22430a1eb1f'
###Output
_____no_output_____ |
keras_tf_pytorch/Keras/.ipynb_checkpoints/EJERCICIO_KERAS-checkpoint.ipynb | ###Markdown
Ejercicio del set de imágenes CIFAR10- Clasificando imágenes diversas- PYTORCH- Andrés de la Rosa
###Code
#Importacion de todos los modulos
import time
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.layers import Activation
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras import backend as K
import tensorflow as tf
import multiprocessing as mp
from keras.datasets import cifar10
import os
###Output
_____no_output_____
###Markdown
Esta vez cargo el CIFAR10 directamente de los datasets de Keras y no de la pagina de donde estaban originalmente como hice en el ejercicio de TensorFlow. Esto facilitó mucho el trabajo debido a que no tuve que definir las funciones que hacian el preprocesamiento de las imágenes. teniendo en cuenta que al momento de utilizar mis propias imágenes tendré que hacer dicho pre procesamiento
###Code
batch_size = 32
num_classes = 10
epochs = 5
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# x_train - training data(images), y_train - labels(digits)
print(x_train.shape[0], 'imagenes de entrenamiento')
print(x_test.shape[0], 'imagenes de test')
#Convirtiendo a one hot encoding
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#Normalizando la entrada
x_train /= 255
x_test /= 255
#Definiendo el modelo de 3 capas con relu y max pooling, haciendo el same padding para no perder pixeles
model = Sequential()
#Primera capa con relu y maxpooling
model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
#Segunda capa con relu y maxpooling
model.add(Conv2D(64, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
#Tercera capa con relu y maxpooling
model.add(Conv2D(128, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
#Poniendo los datos en formato flat
model.add(Flatten())
#Finalizando con los Fully connected layers
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(num_classes))
# Compilamos el modelo para calcular su acierto
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
opt = SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=False)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
shuffle=True)
###Output
WARNING:tensorflow:From C:\Users\andre\Anaconda3\envs\practicas\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 40000 samples, validate on 10000 samples
Epoch 1/5
40000/40000 [==============================] - 90s 2ms/step - loss: 2.7842 - acc: 0.0940 - val_loss: 2.2797 - val_acc: 0.0535
Epoch 2/5
40000/40000 [==============================] - 87s 2ms/step - loss: 2.4305 - acc: 0.0831 - val_loss: 2.2902 - val_acc: 0.1257
Epoch 3/5
40000/40000 [==============================] - 85s 2ms/step - loss: 2.3079 - acc: 0.0809 - val_loss: 2.3157 - val_acc: 0.0378
Epoch 4/5
40000/40000 [==============================] - 86s 2ms/step - loss: 2.2870 - acc: 0.0596 - val_loss: 2.2332 - val_acc: 0.0420
Epoch 5/5
40000/40000 [==============================] - 87s 2ms/step - loss: 2.2247 - acc: 0.0481 - val_loss: 3.8114 - val_acc: 0.0318
|
data_formatting/PPI_STRING.ipynb | ###Markdown
Load APID PPI network data
###Code
APID_gene_id, APID_network = load_data.load_PPI_Y2H_or_APID(data_folder, ppi_data)
APID_network
###Output
_____no_output_____
###Markdown
Load STRING PPI network datadata download from: https://stringdb-static.org/download/protein.links.v10.5/9606.protein.links.v10.5.txt.gz9606 = Homosapiens
###Code
string_ppi = pd.read_table(data_folder + '9606.protein.links.v10.5.txt', delim_whitespace=True)
print("Raw STRING PPI number: ", string_ppi.shape[0])
string_ppi.head()
# sort top 10% of combine_score
top10_string = string_ppi.nlargest(round(string_ppi.shape[0]/10), 'combined_score')
print("Top 10% score of STRING PPI number: ", top10_string.shape[0])
# remove Homosapiens ID ('9606.')
top10_string['protein1'] = top10_string['protein1'].str.replace('9606.', '')
top10_string['protein2'] = top10_string['protein2'].str.replace('9606.', '')
# rename
top10_string.rename(columns={'protein1': 'ensembl_1',
'protein2': 'ensembl_2'}, inplace = True)
top10_string.head()
top10_string.shape
# unique values: proteins
top10_string['ensembl_1'].nunique()
top10_string['ensembl_2'].nunique()
###Output
_____no_output_____
###Markdown
ID mapping with BioMart data: Ensembl protein ID (ENSP) -> EntrezGene ID !!! In STRING data: Ensembl protein ID (ENSP) and not Ensembl gene ID (ENSG)Downloaded from __[BioMart](https://grch37.ensembl.org/biomart/martview)__ Human genes __(GRCh37.p13)__Attributes:- Gene stable ID (ENSG)- Protein stable ID (ENSP)- EntrezGene ID- HGNC symbol"mart_export.txt" (2018/06/25)
###Code
df_biomart = pd.read_csv(data_folder + "mart_export.txt", sep="\t", index_col=False)
df_biomart.head()
df_biomart.shape
# keep only ENSP ID and EntrezGene ID
df_biomart_ENSP_Entrez = df_biomart[['Protein stable ID', 'EntrezGene ID']]
df_biomart_ENSP_Entrez.head()
print("NaN in ENSP ID: {} ({}%)"
.format(df_biomart_ENSP_Entrez['Protein stable ID'].isnull().sum(),
round(df_biomart_ENSP_Entrez['Protein stable ID'].isnull().sum()*100/df_biomart.shape[0], 1)))
print("NaN in EntrezGene ID: {} ({}%)"
.format(df_biomart_ENSP_Entrez['EntrezGene ID'].isnull().sum(),
round(df_biomart_ENSP_Entrez['EntrezGene ID'].isnull().sum()*100/df_biomart.shape[0], 1)))
# remove ENSP and EntrezGene NaN rows
df_biomart_compact = df_biomart_ENSP_Entrez[pd.notnull(df_biomart_ENSP_Entrez['Protein stable ID'])]
df_biomart_compact = df_biomart_compact[pd.notnull(df_biomart_compact['EntrezGene ID'])]
df_biomart_compact.shape
df_biomart_compact.head()
print('Duplicated ENSP number: ',
df_biomart_compact.duplicated(subset=['Protein stable ID'], keep=False).sum())
print('Duplicated EntrezGene number: ',
df_biomart_compact.duplicated(subset=['EntrezGene ID'], keep=False).sum())
###Output
Duplicated ENSP number: 15163
Duplicated EntrezGene number: 108095
###Markdown
caca ex: 1 ENSG/ENSP/Gene symbol -> several EntrezGene IDs
###Code
df_biomart[df_biomart['Protein stable ID']=='ENSP00000459754']
# remove all ENSP duplicates rows
df_biomart_uniqENSP = df_biomart_compact.drop_duplicates(subset=['Protein stable ID'], keep=False)
removed_ensp = df_biomart_compact.shape[0] - df_biomart_uniqENSP.shape[0]
print("Removed ENSP duplicates rows: {} ({}%)"
.format(removed_ensp, round(removed_ensp*100/df_biomart_compact.shape[0], 1)))
df_biomart_uniqENSP.shape
# merge based on ensembl_1 (_x)
str_mart1 = top10_string.merge(df_biomart_uniqENSP, how='left', left_on='ensembl_1', right_on='Protein stable ID')
# str_mart1.shape
# merge based on ensembl_2 (_y)
str_mart2 = str_mart1.merge(df_biomart_uniqENSP, how='left', left_on='ensembl_2', right_on='Protein stable ID')
str_mart2.shape
df_string = str_mart2[['ensembl_1', 'ensembl_2', 'EntrezGene ID_x', 'EntrezGene ID_y']]
df_string = df_string.rename(columns={'EntrezGene ID_x': 'EntrezGene ID_1',
'EntrezGene ID_y': 'EntrezGene ID_2'})
df_string.head()
print("NaN in EntrezGene ID_1: {} ({}%)"
.format(df_string['EntrezGene ID_1'].isnull().sum(),
round(df_string['EntrezGene ID_1'].isnull().sum()*100/df_string.shape[0], 1)))
print("NaN in EntrezGene ID_2: {} ({}%)"
.format(df_string['EntrezGene ID_2'].isnull().sum(),
round(df_string['EntrezGene ID_2'].isnull().sum()*100/df_string.shape[0], 1)))
# remove EntrezGene NaN rows
df_string = df_string[pd.notnull(df_string['EntrezGene ID_1'])]
df_string = df_string[pd.notnull(df_string['EntrezGene ID_2'])]
df_string.shape
df_string_compact = df_string.drop_duplicates(subset=['EntrezGene ID_1', 'EntrezGene ID_2'])
print("Removed duplicated EntrezGene PPI: {} ({}%)"
.format(df_string.shape[0]-df_string_compact.shape[0],
round((df_string.shape[0]-df_string_compact.shape[0])*100/df_string.shape[0], 1)))
df_string_compact.shape
###Output
Removed duplicated EntrezGene PPI: 1994 (0.2%)
###Markdown
Create STRING PPI network matrix
###Code
# EntrezGene ID in lists
entrez1 = df_string_compact['EntrezGene ID_1']#.tolist()
entrez2 = df_string_compact['EntrezGene ID_2']#.tolist()
# from float to int
# entrez1 = [int(i) for i in entrez1]
# entrez2 = [int(i) for i in entrez2]
def coordinate(prot_list, all_list):
coo_list = []
for prot in prot_list:
i = all_list.index(prot)
coo_list.append(i)
return coo_list
def create_adjacency_matrix(prot1, prot2):
# remove if self interaction
prot1, prot2 = zip(*((x, y) for x, y in zip(prot1, prot2) if x!=y))
# prot1, prot2 = list(prot1), list(prot2)
edge_list = np.vstack((prot1, prot2)).T
gene_id_ppi = (edge_list.flatten()).tolist()
gene_id_ppi = list(set(gene_id_ppi))
# From ID list to coordinate list
print(' ==== coordinates ')
# coo1 = coordinate(prot1.tolist(), gene_id_ppi)
# coo2 = coordinate(prot2.tolist(), gene_id_ppi)
coo1 = coordinate(list(prot1), gene_id_ppi)
coo2 = coordinate(list(prot2), gene_id_ppi)
# Adjacency matrix
print(' ==== Adjacency matrix ')
n = len(gene_id_ppi)
weight = np.ones(len(coo1)) # if interaction -> 1
network = sp.coo_matrix((weight, (coo1, coo2)), shape=(n, n))
network = network + network.T # symmetric matrix
network.setdiag(0)
# savemat(PPI_file, {'adj_mat': network, 'entrez_id': gene_id_ppi},
# do_compression=True)
return gene_id_ppi, network
STRING_gene_id, STRING_network = create_adjacency_matrix(entrez1, entrez2)
STRING_network
savemat(data_folder + 'PPI_STRING_v10_5.mat',
{'adj_mat': STRING_network, 'entrez_id': STRING_gene_id},do_compression=True)
996925/2
list1 = [1, 1, 2, 3, 4, 5, 2]
list2 = [2, 4, 6, 6, 6, 5, 1]
df = pd.DataFrame(
{'prot1': list1,
'prot2': list2})
gene, net = create_adjacency_matrix(df['prot1'], df['prot2'])
df
gene
net
net
net.todense()
net.todense()
l1, l2 = zip(*((x, y) for x, y in zip(list1, list2) if x!=y))
l1, l2 = list(l1), list(l2)
l1 = list(l1)
l1
l2 = list(l2)
l2
l2
l1
###Output
_____no_output_____ |
TensorFlow-GettingStarted/GettingStarted1.ipynb | ###Markdown
Example 1
###Code
import tensorflow as tf
# y = mx + b
m = tf.constant(3.0, name='m')
b = tf.constant(1.5, name='b')
x = tf.placeholder(dtype='float32', name='x')
y = m*x + b
sess = tf.Session()
y.eval({x: 2}, session=sess)
###Output
_____no_output_____
###Markdown
Example 2Basic matrix arithmetic
###Code
M = tf.constant([[1,2], [3,4]], dtype='float32')
v = tf.constant([5,2], dtype='float32')
sess.run(M+v)
#Element wise multiplication
sess.run(M*v)
###Output
_____no_output_____
###Markdown
Example 3Matrix multiplication
###Code
sess.run(tf.matmul(M, tf.reshape(v, [2,1])))
###Output
_____no_output_____ |
dense-model.ipynb | ###Markdown
Import Libraries
###Code
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Dropout, LSTM
from sklearn.metrics import mean_absolute_error
from datetime import datetime
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
###Output
_____no_output_____
###Markdown
Import data
###Code
crypto_df = pd.read_csv("../input/g-research-crypto-forecasting/train.csv")
crypto_df.head()
asset_details = pd.read_csv('../input/g-research-crypto-forecasting/asset_details.csv')
asset_details
# Select Asset_ID = 6 for Ethereum
crypto_df = crypto_df[crypto_df["Asset_ID"]==6]
crypto_df.info(show_counts =True)
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1956200 entries, 5 to 24236799
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 timestamp 1956200 non-null int64
1 Asset_ID 1956200 non-null int64
2 Count 1956200 non-null float64
3 Open 1956200 non-null float64
4 High 1956200 non-null float64
5 Low 1956200 non-null float64
6 Close 1956200 non-null float64
7 Volume 1956200 non-null float64
8 VWAP 1956200 non-null float64
9 Target 1955860 non-null float64
dtypes: float64(8), int64(2)
memory usage: 164.2 MB
###Markdown
Preprocessing
###Code
df = crypto_df.copy()
# fill missing values
df = df.reindex(range(df.index[0],df.index[-1]+60,60),method='pad')
df = df.fillna(0)
# rename column timestamp to Date
df.rename({'timestamp': 'Date'}, axis=1, inplace=True)
df.rename(columns={'Close': 'Price'}, inplace=True)
# set index
df.set_index('Date', inplace=True)
# Convert to date array
timesteps = df.index.to_numpy()
prices = df['Price'].to_numpy()
timesteps[:10], prices[:10]
###Output
_____no_output_____
###Markdown
Modeling Dense model
###Code
# Create Window dataset
HORIZON = 1 # predict 1 step at a time
WINDOW_SIZE = 7 # use a week worth of timesteps to predict the horizon
# Create function to label windowed data
def get_labelled_windows(x, horizon=1):
"""
Input: [1, 2, 3, 4, 5, 6] -> Output: ([1, 2, 3, 4, 5], [6])
"""
return x[:, :-horizon], x[:, -horizon:]
# Test the window labelling function
test_window, test_label = get_labelled_windows(tf.expand_dims(tf.range(8)+1, axis=0), horizon=HORIZON)
print(f"Window: {tf.squeeze(test_window).numpy()} -> Label: {tf.squeeze(test_label).numpy()}")
# Create function to view NumPy arrays as windows
def make_windows(x, window_size=7, horizon=1):
"""
Turns a 1D array into a 2D array of sequential windows of window_size.
"""
window_step = np.expand_dims(np.arange(window_size+horizon), axis=0)
window_indexes = window_step + np.expand_dims(np.arange(len(x)-(window_size+horizon-1)), axis=0).T
windowed_array = x[window_indexes]
windows, labels = get_labelled_windows(windowed_array, horizon=horizon)
return windows, labels
full_windows, full_labels = make_windows(prices, window_size=WINDOW_SIZE, horizon=HORIZON)
len(full_windows), len(full_labels)
# Create function for train-test-split
def make_train_test_splits(windows, labels, test_split=0.2):
"""
Splits matching pairs of windows and labels into train and test splits.
"""
split_size = int(len(windows) * (1-test_split))
train_windows = windows[:split_size]
train_labels = labels[:split_size]
test_windows = windows[split_size:]
test_labels = labels[split_size:]
return train_windows, test_windows, train_labels, test_labels
train_windows, test_windows, train_labels, test_labels = make_train_test_splits(full_windows, full_labels)
len(train_windows), len(test_windows), len(train_labels), len(test_labels)
train_windows[:5], train_labels[:5]
# Create model callbacks
import os
# Create a function to implement a ModelCheckpoint callback with a specific filename
def create_model_checkpoint(model_name, save_path="model_experiments"):
return tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(save_path, model_name), # create filepath to save model
verbose=0, # only output a limited amount of text
save_best_only=True) # save only the best model to file
###Output
_____no_output_____
###Markdown
Dense model - window = 7 horizon = 1
###Code
import tensorflow as tf
from tensorflow.keras import layers
# Set random seed for reproducible results
tf.random.set_seed(42)
# Construct the model
dense_model = tf.keras.Sequential(
[
layers.Dense(128, activation="relu"),
layers.Dense(HORIZON, activation="linear") # linear activation is the same as having no activation
], name="dense_model_1") # name of the model to save
# Compile the model
dense_model.compile(loss="mae",
optimizer=tf.keras.optimizers.Adam(),
metrics=["mae"])
# Fit the model
dense_model.fit(x=train_windows, # train windows of 7 timesteps of Ethereum prices
y=train_labels, # horizon value of 1 (using the previous 7 timesteps to predict next day)
epochs=100,
verbose=1,
batch_size=128,
validation_data=(test_windows, test_labels),
callbacks=[create_model_checkpoint(model_name=dense_model.name)]) # create ModelCheckpoint callback
# to save best model
# Evaluate model on the test data
dense_model.evaluate(test_windows, test_labels)
# Load in saved best performing model and evaluate on the test data
dense_model = tf.keras.models.load_model("model_experiments/dense_model_1")
dense_model.evaluate(test_windows, test_labels)
# Function for forecasting on the test dataset
def make_preds(model, input_data):
"""
Uses model to make predictions on input_data.
"""
forecast = model.predict(input_data)
# return 1D array of predictions
return tf.squeeze(forecast)
# Make predictions using dense_model on the test dataset and view the results
dense_model_preds = make_preds(dense_model, test_windows)
len(dense_model_preds), dense_model_preds[:10]
# Function to evaluate prediction
def evaluate_preds(y_true, y_pred):
# Make sure float32 (for metric calculations)
y_true = tf.cast(y_true, dtype=tf.float32)
y_pred = tf.cast(y_pred, dtype=tf.float32)
# Calculate various metrics
mae = tf.keras.metrics.mean_absolute_error(y_true, y_pred)
mse = tf.keras.metrics.mean_squared_error(y_true, y_pred)
rmse = tf.sqrt(mse)
mape = tf.keras.metrics.mean_absolute_percentage_error(y_true, y_pred)
return {"mae": mae.numpy(),
"mse": mse.numpy(),
"rmse": rmse.numpy(),
"mape": mape.numpy()}
# Evaluate prediction
dense_model_results = evaluate_preds(y_true=tf.squeeze(test_labels), # reduce to right shape
y_pred=dense_model_preds)
dense_model_results
###Output
_____no_output_____ |
BiLSTM Models/Models/Duplicate_Question_2Bi-Lstm_Layer.ipynb | ###Markdown
Checking Output class label difference
###Code
data['is_duplicate'].value_counts()
import matplotlib.pyplot as plt
import pandas as pd
data['is_duplicate'].value_counts().plot(kind='bar', color='green')
'''plt.minorticks_on()
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()'''
data.shape
print(data.dtypes)
print(data['question1'].dtypes)
print(data['question2'].dtypes)
type(data['question1'])
###Output
id int64
qid1 int64
qid2 int64
question1 object
question2 object
is_duplicate int64
dtype: object
object
object
###Markdown
Setting target or labelfor each input
###Code
label_oneDimension=data['is_duplicate']
label_oneDimension.head(2)
import numpy as np
from keras.utils.np_utils import to_categorical
label_twoDimension = to_categorical(data['is_duplicate'], num_classes=2)
label_twoDimension[0:1]
question_one=data['question1'].astype(str)
print(question_one.head())
question_two=data['question2'].astype(str)
print(question_two.head())
###Output
0 What is the step by step guide to invest in share market?
1 What would happen if the Indian government stole the Kohinoor (Koh-i-Noor) d...
2 How can Internet speed be increased by hacking through DNS?
3 Find the remainder when [math]23^{24}[/math] is divided by 24,23?
4 Which fish would survive in salt water?
Name: question2, dtype: object
###Markdown
Reading test data and preprocessing
###Code
#Data reading
'''
data_test = pd.read_csv('drive/My Drive/Summer Internship 2020 July/My Test File/Sunil/test.csv')
data_test_sample=data_test.dropna()
#data_test_sample=data_test_sample.head(100)
data_test_sample.head()
'''
'''
question_one_test=data_test_sample['question1'].astype(str)
print(question_one_test.head())
'''
'''
question_two_test=data_test_sample['question2'].astype(str)
print(question_two_test.head())
'''
###Output
_____no_output_____
###Markdown
Fitting text on a single tokenized object
###Code
from keras.preprocessing.text import Tokenizer
tok_all = Tokenizer(filters='!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', lower=True, char_level = False)
tok_all.fit_on_texts(question_one+question_two)
#tok_all.fit_on_texts(question_one+question_two+question_one_test+question_two_test)
vocabulary_all=len(tok_all.word_counts)
print(vocabulary_all)
###Output
89983
###Markdown
Train data Sequencing and Encoding
###Code
#Encoding question 1
encoded_q1=tok_all.texts_to_sequences(question_one)
print(question_one[0])
encoded_q1[0]
#Encoding question 2
encoded_q2=tok_all.texts_to_sequences(question_two)
print(question_two[0])
encoded_q2[0]
###Output
What is the step by step guide to invest in share market?
###Markdown
Pre-Padding on Train data
###Code
#####Padding encoded sequence of words
from keras.preprocessing import sequence
max_length=100
padded_docs_q1 = sequence.pad_sequences(encoded_q1, maxlen=max_length, padding='pre')
#####Padding encoded sequence of words
from keras.preprocessing import sequence
max_length=100
padded_docs_q2 = sequence.pad_sequences(encoded_q2, maxlen=max_length, padding='pre')
###Output
_____no_output_____
###Markdown
Encoding on Test data
###Code
'''
#Encoding question 1
encoded_q1_test=tok_all.texts_to_sequences(question_one_test)
print(question_one_test[0])
encoded_q1_test[0]
'''
'''#Encoding question 1
encoded_q2_test=tok_all.texts_to_sequences(question_two_test)
print(question_two_test[0])
encoded_q2_test[0]'''
###Output
_____no_output_____
###Markdown
Pre-Padding on test data
###Code
'''#####Padding encoded sequence of words
padded_docs_q1_test = sequence.pad_sequences(encoded_q1_test, maxlen=max_length, padding='pre')
padded_docs_q2_test = sequence.pad_sequences(encoded_q2_test, maxlen=max_length, padding='pre')'''
###Output
_____no_output_____
###Markdown
Reading Embedding Vector from Glove
###Code
import os
import numpy as np
embeddings_index = {}
f = open('drive/My Drive/ML Internship IIIT Dharwad/Copy of glove.6B.300d.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
#create embedding matrix
embedding_matrix = np.zeros((vocabulary_all+1, 300))
for word, i in tok_all.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
###Output
_____no_output_____
###Markdown
Defining Input Shape for Model
###Code
Question1_shape= Input(shape=[max_length])
Question1_shape.shape
Question2_shape= Input(shape=[max_length])
Question2_shape.shape
###Output
_____no_output_____
###Markdown
Bi-lstm Network
###Code
Bi_lstm2_network = Sequential()
# Adding Embedding layer
Bi_lstm2_network.add(Embedding(vocabulary_all+1,300,weights=[embedding_matrix], input_length=max_length, trainable=False))
# Adding 2 Bi-Lstm layers
Bi_lstm2_network.add(Bidirectional(LSTM(32, return_sequences=True)))
Bi_lstm2_network.add(Dropout(0.2))
Bi_lstm2_network.add(Bidirectional(LSTM(64, return_sequences=False)))
Bi_lstm2_network.add(Dropout(0.2))
# Adding Dense layer
Bi_lstm2_network.add(Dense(128,activation="linear"))
Bi_lstm2_network.add(Dropout(0.3))
###Output
_____no_output_____
###Markdown
Printing Model summary
###Code
Bi_lstm2_network.summary()
from keras.utils.vis_utils import plot_model
plot_model(Bi_lstm2_network, to_file='Bi_lstm2_network.png', show_shapes=True, show_layer_names=True)
###Output
_____no_output_____
###Markdown
create siamese network from CNN model and store output feature vectors
###Code
Question1_Bi_lstm_feature=Bi_lstm2_network(Question1_shape)
Question2_Bi_lstm_feature=Bi_lstm2_network(Question2_shape)
###Output
_____no_output_____
###Markdown
Adding and multiplying features obtained from Siamese CNN network
###Code
from keras import backend as K
from keras.optimizers import Adam
lamda_function=Lambda(lambda tensor:K.abs(tensor[0]-tensor[1]),name="Absolute_distance")
abs_distance_vector=lamda_function([Question1_Bi_lstm_feature,Question2_Bi_lstm_feature])
lamda_function2=Lambda(lambda tensor:K.abs(tensor[0]*tensor[1]),name="Hamadard_multiplication") #abs() returns absolute value
hamadard_vector=lamda_function2([Question1_Bi_lstm_feature,Question2_Bi_lstm_feature])
###Output
_____no_output_____
###Markdown
Adding abs_distance_vector and hamadard_vector
###Code
from keras.layers import Add
added_vecotr = Add()([abs_distance_vector, hamadard_vector])
###Output
_____no_output_____
###Markdown
Final Model prediction
###Code
predict=Dense(2,activation="sigmoid")(added_vecotr)
###Output
_____no_output_____
###Markdown
Creating sequential model using Model() class and compilation
###Code
from sklearn.metrics import roc_auc_score, roc_curve, accuracy_score
Siamese2_Network=Model(inputs=[Question1_shape,Question2_shape],outputs=predict)
Siamese2_Network.compile(loss = "binary_crossentropy", optimizer=Adam(lr=0.00003), metrics=["accuracy"])
Siamese2_Network.summary()
###Output
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 100)] 0
__________________________________________________________________________________________________
input_2 (InputLayer) [(None, 100)] 0
__________________________________________________________________________________________________
sequential (Sequential) (None, 128) 27163008 input_1[0][0]
input_2[0][0]
__________________________________________________________________________________________________
Absolute_distance (Lambda) (None, 128) 0 sequential[0][0]
sequential[1][0]
__________________________________________________________________________________________________
Hamadard_multiplication (Lambda (None, 128) 0 sequential[0][0]
sequential[1][0]
__________________________________________________________________________________________________
add (Add) (None, 128) 0 Absolute_distance[0][0]
Hamadard_multiplication[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 2) 258 add[0][0]
==================================================================================================
Total params: 27,163,266
Trainable params: 168,066
Non-trainable params: 26,995,200
__________________________________________________________________________________________________
###Markdown
Plot model
###Code
from keras.utils import plot_model
plot_model(Siamese2_Network, to_file='Siamese2_Network.png',show_shapes=True, show_layer_names=True)
###Output
_____no_output_____
###Markdown
Setting hyperparameter for training
###Code
from keras.callbacks import EarlyStopping, ReduceLROnPlateau,ModelCheckpoint
earlystopper = EarlyStopping(patience=8, verbose=1)
#checkpointer = ModelCheckpoint(filepath = 'cnn_model_one_.{epoch:02d}-{val_loss:.6f}.hdf5',
# verbose=1,
# save_best_only=True, save_weights_only = True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.9,
patience=2, min_lr=0.00001, verbose=1)
'''from collections import Counter
from imblearn.over_sampling import SMOTE
x = data['question1']
z = data['question2']
y = label_oneDimension
l = label_twoDimension
# Increase the no of duplicate question pair samples from 149263 to 255027
sm = SMOTE(random_state=42,ratio={1:255027})
padded_docs_q1_SM, padded_docs_q2_SM = sm.fit_sample(x, z)
print('SMOTE dataset shape {}'.format(Counter(padded_docs_q2_SM)))'''
###Output
_____no_output_____
###Markdown
Data split into train and validation set
###Code
# Splitting data into train and test
from sklearn.model_selection import train_test_split
q1_train, q1_val,q2_train, q2_val, label_train, label_val, label_oneD_train, label_oneD_val = train_test_split(padded_docs_q1,padded_docs_q2, label_twoDimension, label_oneDimension, test_size=0.30,
random_state=42)
###Output
_____no_output_____
###Markdown
Model fitting or training
###Code
history = Siamese2_Network.fit([q1_train,q2_train],label_train,
batch_size=32,epochs=100,validation_data=([q1_val,q2_val],label_val),callbacks=[earlystopper, reduce_lr])
###Output
Epoch 1/100
2001/8844 [=====>........................] - ETA: 38:11 - loss: 0.6323 - accuracy: 0.6353
###Markdown
Model Prediction
###Code
Siamese2_Network_predictions = Siamese2_Network.predict([q1_val,q2_val])
#Siamese2_Network_predictions = Siamese2_Network.predict([padded_docs_q1_test,padded_docs_q2_test])
#Siamese2_Network_predictions_testData = Siamese2_Network.predict([padded_docs_q1_test,padded_docs_q1_test])
###Output
_____no_output_____
###Markdown
Log loss
###Code
from sklearn.metrics import log_loss
log_loss_val= log_loss(label_val,Siamese2_Network_predictions)
log_loss_val
###Output
_____no_output_____
###Markdown
Classification report
###Code
predictions = np.zeros_like(Siamese2_Network_predictions)
predictions[np.arange(len(Siamese2_Network_predictions)), Siamese2_Network_predictions.argmax(1)] = 1
predictionInteger=(np.argmax(predictions, axis=1))
#print('np.argmax(a, axis=1): {0}'.format(np.argmax(predictions, axis=1)))
predictionInteger
from sklearn.metrics import classification_report
print(classification_report(label_val,predictions))
from sklearn.metrics import precision_recall_fscore_support
print ("Precision, Recall, F1_score : macro ",precision_recall_fscore_support(label_oneD_val,predictionInteger, average='macro'))
print ("Precision, Recall, F1_score : micro ",precision_recall_fscore_support(label_oneD_val,predictionInteger, average='micro'))
print ("Precision, Recall, F1_score : weighted ",precision_recall_fscore_support(label_oneD_val,predictionInteger, average='weighted'))
###Output
_____no_output_____
###Markdown
Final train and val loss
###Code
min_val_loss = min(history.history["val_loss"])
min_train_loss = min(history.history["loss"])
max_val_acc = max(history.history["val_accuracy"])
max_train_acc = max(history.history["accuracy"])
print("min_train_loss=%g, min_val_loss=%g, max_train_acc=%g, max_val_acc=%g" % (min_train_loss,min_val_loss,max_train_acc,max_val_acc))
###Output
_____no_output_____
###Markdown
Plot epoch Vs loss
###Code
from matplotlib import pyplot as plt
plt.plot(history.history["loss"],color = 'red', label = 'train_loss')
plt.plot(history.history["val_loss"],color = 'blue', label = 'val_loss')
plt.title('Loss Visualisation')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('2Layer_CNN_lossPlot_siamese.pdf',dpi=1000)
from google.colab import files
files.download('2Layer_CNN_lossPlot_siamese.pdf')
###Output
_____no_output_____
###Markdown
Plot Epoch Vs Accuracy
###Code
plt.plot(history.history["accuracy"],color = 'red', label = 'train_accuracy')
plt.plot(history.history["val_accuracy"],color = 'blue', label = 'val_accuracy')
plt.title('Accuracy Visualisation')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('2Layer_CNN_accuracyPlot_siamese.pdf',dpi=1000)
files.download('2Layer_CNN_accuracyPlot_siamese.pdf')
###Output
_____no_output_____
###Markdown
Area Under Curve- ROC
###Code
#pred_test = Siamese2_Network.predict([padded_docs_q1_test,padded_docs_q2_test])
pred_train = Siamese2_Network.predict([q1_train,q2_train])
pred_val = Siamese2_Network.predict([q1_val,q2_val])
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
def plot_AUC_ROC(y_true, y_pred):
n_classes = 2 #change this value according to class value
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_true.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
############################################################################################
lw = 2
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange'])
#classes_list1 = ["DE","NE","DK"]
classes_list1 = ["Non-duplicate","Duplicate"]
for i, color,c in zip(range(n_classes), colors,classes_list1):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (AUC = {1:0.2f})'
''.format(c, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic curve')
plt.legend(loc="lower right")
#plt.show()
plt.savefig('2Layer_CNN_RocPlot_siamese.pdf',dpi=1000)
files.download('2Layer_CNN_RocPlot_siamese.pdf')
# Plot of a ROC curve for a specific class
'''
plt.figure()
lw = 2
plt.plot(fpr[0], tpr[0], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
'''
plot_AUC_ROC(label_val,pred_val)
from sklearn.metrics import roc_auc_score, roc_curve, accuracy_score
auc_val = roc_auc_score(label_val,pred_val)
accuracy_val = accuracy_score(label_val,pred_val>0.5)
auc_train = roc_auc_score(label_train,pred_train)
accuracy_train = accuracy_score(label_train,pred_train>0.5)
print("auc_train=%g, auc_val=%g, accuracy_train=%g, accuracy_val=%g" % (auc_train, auc_val, accuracy_train, accuracy_val))
'''
fpr_train, tpr_train, thresholds_train = roc_curve(label_train,pred_train)
fpr_test, tpr_test, thresholds_test = roc_curve(label_val,pred_val)
#fpr_train, tpr_train, thresholds_train = roc_curve(label_oneD_train,pred_train_final)
#fpr_test, tpr_test, thresholds_test = roc_curve(label_oneD_val,pred_val_final)
plt.plot(fpr_train,tpr_train, color="blue", label="train roc, auc=%g" % (auc_train,))
plt.plot(fpr_test,tpr_test, color="green", label="val roc, auc=%g" % (auc_val,))
plt.plot([0,1], [0,1], color='orange', linestyle='--')
plt.xticks(np.arange(0.0, 1.1, step=0.1))
plt.xlabel("Flase Positive Rate", fontsize=15)
plt.yticks(np.arange(0.0, 1.1, step=0.1))
plt.ylabel("True Positive Rate", fontsize=15)
plt.title('ROC Curve Analysis', fontweight='bold', fontsize=15)
plt.legend(prop={'size':13}, loc='lower right')
plt.savefig('AUC_CURVE_cnn4.pdf',dpi=1000)
#files.download('AUC_CURVE_cnn4.pdf')
'''
###Output
_____no_output_____ |
Untitled25.ipynb | ###Markdown
Dataframe 3
###Code
df=pd.read_csv("employees.csv")
df.info()
df['Start Date']=pd.to_datetime(df['Start Date'])
df
df['Last Login Time']=pd.to_datetime(df['Last Login Time'])
df
df.info()
df['Senior Management']=df['Senior Management'].astype(bool)
df.info()
df['Gender']=df['Gender'].astype('category')
df.info()
df['Team']=df['Team'].astype('category')
df.info()
df
df=pd.read_csv("employees.csv",parse_dates=[['Start Date','Last Login Time']])
df
df[df['Gender']=='Male']
df[df['Team']=='Finance']
###Output
_____no_output_____
###Markdown
###Code
from google.colab import drive
drive.mount('/gdrive')
import zipfile
dataset_path = "/gdrive/My Drive/BRATS2015_Training.zip"
zfile = zipfile.ZipFile(dataset_path)
zfile.extractall()
import zipfile
dataset_path = "/gdrive/My Drive/BRATS2015_Testing.zip"
zfile = zipfile.ZipFile(dataset_path)
zfile.extractall()
import tensorflow as tf
print(tf.__version__)
!pip install tensorflow==1.5.0
!pip install simpleitk
import SimpleITK as sitk # For loading the dataset
import numpy as np # For data manipulation
!pip install niftynet==0.2.0
!python /content/brats17/test.py /content/brats17/config15/test_all_class.txt
!python /content/brats17/train.py /content/brats17/config15/train_wt_ax.txt
###Output
_____no_output_____ |
notebooks/03_categorical_pipeline_ex_02.ipynb | ###Markdown
📝 Exercise 02The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of logisticregression.- The first question is to empirically evaluate whether scaling numerical feature is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "fnlwgt", "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to only select column with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesFor linear models, we have observed that integer coding of categoricalvariables can be very detrimental. However for`HistGradientBoostingClassifier` models, it does not seem to be the case asthe cross-validation of the reference pipeline with `OrdinalEncoder` is good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of a logisticregression.- The first question is to empirically evaluate whether scaling numerical features is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to select only columns with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
print(f"Numerical features: {numerical_columns}")
print(f"Categorical features: {categorical_columns}")
###Output
Numerical features: ['age', 'capital-gain', 'capital-loss', 'hours-per-week']
Categorical features: ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
import time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
start = time.time()
cv_results = cross_validate(model, data, target)
elapsed_time = time.time() - start
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
The mean cross-validation accuracy is: 0.873 +/- 0.003 with a fitting time of 7.939
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
import time
from sklearn.preprocessing import StandardScaler, OrdinalEncoder
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import cross_validate
categorical_transormer = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
numerical_transformer = StandardScaler()
preprocess = ColumnTransformer([
('cat_transform', categorical_transormer, categorical_columns),
('num_transform', numerical_transformer, numerical_columns)
])
model = make_pipeline(preprocess, HistGradientBoostingClassifier())
start_time = time.time()
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print(f"The accuracy is: {scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The accuracy is: 0.873 +/- 0.003
###Markdown
One-hot encoding of categorical variablesWe observed that integer coding of categorical variables can be verydetrimental for linear models. However, it does not seem to be the case for`HistGradientBoostingClassifier` models, as the cross-validation scoreof the reference pipeline with `OrdinalEncoder` is reasonably good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
from sklearn.preprocessing import OneHotEncoder
categorical_transform = OneHotEncoder(handle_unknown="ignore", sparse=False)
preprocess = ColumnTransformer([
('cat_transform', categorical_transormer, categorical_columns)
], remainder="passthrough")
model = make_pipeline(preprocess, HistGradientBoostingClassifier())
scores = cross_validate(model, data, target)["test_score"]
print(f"The accuracy is: {scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The accuracy is: 0.833 +/- 0.003
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of a logisticregression.- The first question is to empirically evaluate whether scaling numerical features is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to select only columns with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
import time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
start = time.time()
cv_results = cross_validate(model, data, target)
elapsed_time = time.time() - start
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesWe observed that integer coding of categorical variables can be verydetrimental for linear models. However, it does not seem to be the case for`HistGradientBoostingClassifier` models, as the cross-validation scoreof the reference pipeline with `OrdinalEncoder` is reasonably good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise 02The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of logisticregression.- The first question is to empirically evaluate whether scaling numerical feature is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "fnlwgt", "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to only select column with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesFor linear models, we have observed that integer coding of categoricalvariables can be very detrimental. However for`HistGradientBoostingClassifier` models, it does not seem to be the case asthe cross-validation of the reference pipeline with `OrdinalEncoder` is good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of logisticregression.- The first question is to empirically evaluate whether scaling numerical feature is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to only select column with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.874 +/- 0.003
Wall time: 8.86 s
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
%%time
from sklearn.preprocessing import StandardScaler
numerical_preprocessor = StandardScaler()
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns),
('numerical', numerical_preprocessor, numerical_columns)
],)
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.874 +/- 0.003
Wall time: 8.72 s
###Markdown
One-hot encoding of categorical variablesFor linear models, we have observed that integer coding of categoricalvariables can be very detrimental. However for`HistGradientBoostingClassifier` models, it does not seem to be the case asthe cross-validation of the reference pipeline with `OrdinalEncoder` is good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
%%time
from sklearn.preprocessing import OneHotEncoder
categorical_preprocessor = OneHotEncoder(handle_unknown="ignore",
sparse=False)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns),
('numerical', numerical_preprocessor, numerical_columns)
],)
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.873 +/- 0.002
Wall time: 19.7 s
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of a logisticregression.- The first question is to empirically evaluate whether scaling numerical features is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to select only columns with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
import time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
start = time.time()
cv_results = cross_validate(model, data, target)
elapsed_time = time.time() - start
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
The mean cross-validation accuracy is: 0.874 +/- 0.003 with a fitting time of 3.762
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
from sklearn.preprocessing import StandardScaler
numerical_preprocessor = StandardScaler()
preprocessorScale = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns),
('standard_scaler', numerical_preprocessor, numerical_columns)])
modelScale = make_pipeline(preprocessorScale, HistGradientBoostingClassifier())
start = time.time()
cv_scale_results = cross_validate(modelScale, data, target)
elapsed_time = time.time() - start
scores_scale = cv_scale_results["test_score"]
print("The mean cross_validation accuracy for scaled integers is: "
f"{scores_scale.mean():.3f} +/- {scores_scale.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
The mean cross_validation accuracy for scaled integers is: 0.874 +/- 0.003 with a fitting time of 3.835
###Markdown
One-hot encoding of categorical variablesWe observed that integer coding of categorical variables can be verydetrimental for linear models. However, it does not seem to be the case for`HistGradientBoostingClassifier` models, as the cross-validation scoreof the reference pipeline with `OrdinalEncoder` is reasonably good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
from sklearn.preprocessing import StandardScaler, OneHotEncoder
numerical_preprocessor = StandardScaler()
categorical_preprocessor = OneHotEncoder(handle_unknown="ignore", sparse=False)
preprocessorOneHot = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)], # creates new catagorical preprocessor
# first assumption was to also scale the numerical data ('standard_scaler', numerical_preprocessor, numerical_columns)])
remainder="passthrough")
modelOneHot = make_pipeline(preprocessorOneHot, HistGradientBoostingClassifier())
start = time.time()
cv_OneHot_results = cross_validate(modelOneHot, data, target)
elapsed_time = time.time() - start
scores_OneHot = cv_OneHot_results["test_score"]
print("The mean cross_validation accuracy for OneHot encoded Categoricals and scaled integers is: "
f"{scores_OneHot.mean():.3f} +/- {scores_OneHot.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
The mean cross_validation accuracy for OneHot encoded Categoricals and scaled integers is: 0.873 +/- 0.003 with a fitting time of 12.344
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of a logisticregression.- The first question is to empirically evaluate whether scaling numerical features is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to select only columns with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesWe observed that integer coding of categorical variables can be verydetrimental for linear models. However, it does not seem to be the case for`HistGradientBoostingClassifier` models, as the cross-validation scoreof the reference pipeline with `OrdinalEncoder` is reasonably good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of a logisticregression.- The first question is to empirically evaluate whether scaling numerical features is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to select only columns with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
import time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
start = time.time()
cv_results = cross_validate(model, data, target)
elapsed_time = time.time() - start
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesWe observed that integer coding of categorical variables can be verydetrimental for linear models. However, it does not seem to be the case for`HistGradientBoostingClassifier` models, as the cross-validation scoreof the reference pipeline with `OrdinalEncoder` is reasonably good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of a logisticregression.- The first question is to empirically evaluate whether scaling numerical features is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to select only columns with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
import time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
start = time.time()
cv_results = cross_validate(model, data, target)
elapsed_time = time.time() - start
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesWe observed that integer coding of categorical variables can be verydetrimental for linear models. However, it does not seem to be the case for`HistGradientBoostingClassifier` models, as the cross-validation scoreof the reference pipeline with `OrdinalEncoder` is reasonably good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of a logisticregression.- The first question is to empirically evaluate whether scaling numerical features is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to select only columns with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
import time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
start = time.time()
cv_results = cross_validate(model, data, target)
elapsed_time = time.time() - start
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f} "
f"with a fitting time of {elapsed_time:.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesWe observed that integer coding of categorical variables can be verydetrimental for linear models. However, it does not seem to be the case for`HistGradientBoostingClassifier` models, as the cross-validation scoreof the reference pipeline with `OrdinalEncoder` is reasonably good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of logisticregression.- The first question is to empirically evaluate whether scaling numerical feature is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to only select column with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesFor linear models, we have observed that integer coding of categoricalvariables can be very detrimental. However for`HistGradientBoostingClassifier` models, it does not seem to be the case asthe cross-validation of the reference pipeline with `OrdinalEncoder` is good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of logisticregression.- The first question is to empirically evaluate whether scaling numerical feature is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to only select column with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.873 +/- 0.002
Wall time: 10.2 s
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
%%time
# Write your code here.
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder, StandardScaler
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(
handle_unknown="use_encoded_value",
unknown_value=-1
)
numerical_preprocessor = StandardScaler()
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns),
('numerical', numerical_preprocessor, numerical_columns)], remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.874 +/- 0.002
Wall time: 9.97 s
###Markdown
One-hot encoding of categorical variablesFor linear models, we have observed that integer coding of categoricalvariables can be very detrimental. However for`HistGradientBoostingClassifier` models, it does not seem to be the case asthe cross-validation of the reference pipeline with `OrdinalEncoder` is good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
%%time
# Write your code here.
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder, StandardScaler, OneHotEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OneHotEncoder(
handle_unknown="ignore",
sparse=False
)
# numerical_preprocessor = StandardScaler()
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns),
# ('numerical', numerical_preprocessor, numerical_columns)
], remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.873 +/- 0.002
Wall time: 21.8 s
###Markdown
📝 Exercise M1.05The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of logisticregression.- The first question is to empirically evaluate whether scaling numerical feature is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to only select column with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesFor linear models, we have observed that integer coding of categoricalvariables can be very detrimental. However for`HistGradientBoostingClassifier` models, it does not seem to be the case asthe cross-validation of the reference pipeline with `OrdinalEncoder` is good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
📝 Exercise 02The goal of this exercise is to evaluate the impact of feature preprocessingon a pipeline that uses a decision-tree-based classifier instead of logisticregression.- The first question is to empirically evaluate whether scaling numerical feature is helpful or not;- The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
As in the previous notebooks, we use the utility `make_column_selector`to only select column with a specific data type. Besides, we list inadvance all categories for the categorical columns.
###Code
from sklearn.compose import make_column_selector as selector
numerical_columns_selector = selector(dtype_exclude=object)
categorical_columns_selector = selector(dtype_include=object)
numerical_columns = numerical_columns_selector(data)
categorical_columns = categorical_columns_selector(data)
###Output
_____no_output_____
###Markdown
Reference pipeline (no numerical scaling and integer-coded categories)First let's time the pipeline we used in the main notebook to serve as areference:
###Code
%%time
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value",
unknown_value=-1)
preprocessor = ColumnTransformer([
('categorical', categorical_preprocessor, categorical_columns)],
remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Scaling numerical featuresLet's write a similar pipeline that also scales the numerical features using`StandardScaler` (or similar):
###Code
# Write your code here.
###Output
_____no_output_____
###Markdown
One-hot encoding of categorical variablesFor linear models, we have observed that integer coding of categoricalvariables can be very detrimental. However for`HistGradientBoostingClassifier` models, it does not seem to be the case asthe cross-validation of the reference pipeline with `OrdinalEncoder` is good.Let's see if we can get an even better accuracy with `OneHotEncoder`.Hint: `HistGradientBoostingClassifier` does not yet support sparse inputdata. You might want to use`OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use of adense representation as a workaround.
###Code
# Write your code here.
###Output
_____no_output_____ |
chemEPy_cookbook.ipynb | ###Markdown
ChemEPy cookbook Welcome to the chemEPy cookbook an interactive Jupyter notebook enviorment designed to teach a python toolchain for chemical engineering. As of now the modules we are going to work on are the IAPWS module for the properties of water/steam, thermo and the chemEPy module currently under developement in this github repo. The way we are currently building this is going to be closely modeled on the scipy module. The current version is built on top of a variety of modules that are common in python scientific computing to see the dependencies please go to requirments.txt. In order to cut down on load times, and keep the module lightweight those modules that do not require reading in tables will load on initialization, but those that do require tables will need to be loaded seperately the same way you load in the optimization or linalg packages in scipy.
###Code
import chemEPy
from chemEPy import eos
from chemEPy import equations
#ignore this cell. I am using it to reload the package after I rebuild it when I modify it
from importlib import reload
reload(chemEPy)
reload(chemEPy.eos)
reload(chemEPy.equations)
###Output
_____no_output_____
###Markdown
As of now there are two available equations of state, ideal gas and van Der Waals. These functions are typical of the design thus far. They are supposed to be generalizable and intuitive, but do not rely on any computer algebra. As of now there is no use of sympy in the module and for the foreseeable future we would like to keep it this way. This means that you the user have one important job, make sure your units line up correctly. The lack of computer algebra greatly simpifies this process and it most cases this means that the only return from a function will be a float or collection of floats. In exchange for careful attention to units we are going to try and make this module easy to use, and as flexible as possible. Let us begin by looking at some of the info functions.
###Code
eos.idealGasInfo()
eos.vdwInfo()
###Output
solves for any of 4 unknowns P,V,n,T units are agnostic
a = bar*L^2/mol^2 and b = L/mol, std R is 0.08314 L*Bar*K^-1*mol^-1
###Markdown
These equations are built using the python patern kwargs which means that you are going to be able to put your arguments in any order that you like, but remember the units are on you. Let us examine how the syntax for these functions work
###Code
eos.idealGas(P = 1, R = 0.08205, n = 1, T = 273)
import numpy as np
from matplotlib import pyplot as plt
import math
Parrow = np.linspace(0.1,1.1,101)
volData1 = eos.idealGas(P = Parrow, R = 0.08205, n = 1, T = 273)
nData1 = eos.idealGas(P = Parrow, R = 0.08205, T = 273, V = 22.4)
Tarrow = np.linspace(100,400,301)
volData2 = eos.idealGas(P = 1, R = 0.08205, n = 1, T = Tarrow)
nData2 = eos.idealGas(P = 1, R = 0.08205, T = Tarrow, V = 22.4)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize = (15,7.5))
ax1.plot(Parrow, volData1)
ax1.set(ylabel = 'Volume(L)')
ax2.plot(Tarrow, volData2, 'tab:green')
ax3.plot(Parrow, nData1, 'tab:red')
ax3.set(xlabel = 'Pressure(atm)', ylabel = '# of moles')
ax4.plot(Tarrow, nData2, 'tab:orange')
ax4.set(xlabel = 'Temperature(K)')
fig.suptitle('Ideal gas plots')
###Output
_____no_output_____
###Markdown
Let us dig into our example above a little bit. As you can see there are several intuitive things about the way the ideal gas law works. You begin by stating your arguements in the function explictly, this means you do not need to worry about the order you put them in. It also means that the function is going to figure out which of your arguments are missing and then return the correct one. You can also feed the function vectors in the form of a numpy array which is what we did to build these graphs. Now we will move on to the Van der Waals eos. If you scroll up you can see that this equation of state does specify units because the correction terms a and b have units. First let us see which materials are available to this function.
###Code
eos.vdwNames()
###Output
Aluminum trichloride
Ammonia
Ammonium chloride
Argon
Boron trichloride
Boron trifluoride
Diborane
Bromine
Perchloryl fluoride
Chlorine pentafluoride
Phosphonium chloride
Chlorine
Trichlorofluorosilane
Fluorine
Germanium tetrachloride
Nitrogen trifluoride
Phosphorus trifluoride
Tetrafluorohydrazine
Germane
Helium
Hydrogen bromide
Hydrogen chloride
Hydrogen cyanide
Hydrogen fluoride
Hydrogen iodide
Hydrogen
Water
Hydrogen sulphide
Hydrogen selenide
Krypton
Silane
Silicon tetrachloride
Silicon tetrafluoride
Titanium(IV) chloride
Mercury
Nitric oxide
Nitrogen dioxide
Nitrogen
Nitrous oxide
Hydrazine
Neon
Oxygen
Ozone
Phosphorus
Phosphine
Radon
Stannic chloride
Sulphur
Sulphur dioxide
Sulphur hexafluoride
Selenium
Uranium(VI) fluoride
Tungsten(VI) fluoride
Xenon
Xenon difluoride
Xenon tetrafluoride
Chlorotrifluoromethane
Trichlorofluoromethane
Tetrachloromethane
Tetrafluoromethane
Carbon monoxide
Carbon oxysulphide
Carbon dioxide
Carbon disulphide
Trichloromethane
Trifluoromethane
Dichloromethane
Difluoromethane
Chloromethane
Fluoromethane
Nitromethane
Methane
Methanol
Methanethiol
Methylamine
1,1,2-Trichlorotrifluoroethane
Tetrafluoroethylene
Cyanogen
Acetylene
1,1-Difluoroethylene
1,1,1-Trichloroethane
Fluroethylene
1,1,1-Trifluoroethane
Acetonitrile
Ethylene
1,1-Dichloroethane
1,2-Dichloroethane
Ethylene oxide
Acetic acid
Methyl formate
Bromoethane
Chloroethane
Fluoroethane
Ethane
Dimethyl ether
Ethanol
Dimethyl sulphide
Ethanethiol
Dimethylamine
Ethylamine
Perfluoropropane
Propanenitrile
Propene
Cyclopropane
Acetone
Propanal
Ethyl formate
Methyl acetate
Propanoic acid
1-Chloropropane
Propane
1-Propanol
2-Propanol
Ethyl methyl ether
Ethyl methyl sulphide
Propylamine
Trimethylamine
Furan
Thiophene
Pyrrole
1,3-Butadiene
Acetic anhydride
Butanenitrile
1-Butene
Cyclobutane
2-Butanone
Tetrahydrofuran
1,4-Dioxane
Ethyl acetate
Methyl propanoate
Propyl formate
Butanoic acid
Pyrrolidine
Butane
Isobutane
1-Butanol
2-Methyl-2-propanol
2-Methyl-1-propanol
Diethyl ether
Diethyl sulphide
Butylamine
Diethylamine
Tetramethylsilane
Furfural
Pyridine
Cyclopentene
1-Pentene
2-Methyl-1-butene
2-Methyl-2-butene
Cyclopentane
Tetrahydropyran
Isobutyl formate
Propyl acetate
Ethyl propanoate
Methyl butanoate
Methyl isobutanoate
Piperidine
Pentane
Isopentane
Neopentane
1-Pentanol
Bromobenzene
Chlorobenzene
Fluorobenzene
Iodobenzene
Benzene
Phenol
Aniline
Cyclohexanone
Hexanenitrile
Cyclohexane
Cyclohexanol
Pentyl formate
Isobutyl acetate
Ethyl butanoate
Ethyl 2-methylpropanoate
Methyl pentanoate
Hexane
2,3-Dimethylbutane
1-Hexanol
Triethylamine
Dipropylamine
Benzonitrile
Benzaldehyde
Toluene
o-Cresol
m-Cresol
p-Cresol
Benzyl alcohol
Anisole
Heptane
Heptanol
Ethylbenzene
o-Xylene
m-Xylene
p-Xylene
Phenetole
N,N-Dimethylaniline
Octane
2,5-Dimethylhexane
1-Octanol
Quinoline
Cumene
Propylbenzene
1,2,4-Trimethylbenzene
Mesitylene
Nonane
1-Nonanol
Naphthalene
Butylbenzene
Isobutylbenzene
o-Cymene
p-Cymene
p-Diethylbenzene
1,2,4,5-Tetramethylbenzene
Decane
1-Decanol
Undecane
Biphenyl
Dodecane
1-Dodecanol
Diphenylmethane
Tridecane
1-Tridecanol
1-Tetradecanol
Pentadecane
###Markdown
Quite a nice variety! As of the time of writing this the goal will be to have materials use a common capitialization format, but when in doubt see if there is a helper function to make your life easier. In the info function we can also see that the gas constant R is given in the correct units.
###Code
temp1 = eos.vdw(name = 'Naphthalene', V = 20, P = 5, n = 1, R = 0.08314)
temp2 = eos.idealGas(V = 20, P = 5, n = 1, R = 0.08314)
v1 = eos.vdw(name = 'Naphthalene', T = 1200, P = 5, n = 1, R = 0.08314)
v2 = eos.idealGas(T = 1200, P = 5, n = 1, R = 0.08314)
print('temperature with Van der Waals eos is:', temp1, 'K')
print('volume with Van der Waals eos is:', v1, 'L \n')
print('temperature with ideal gas eos is:', temp2, 'K')
print('volume with ideal gas eos is:', v2, 'L')
###Output
temperature with Van der Waals eos is: 1671.5531392831367 K
volume with Van der Waals eos is: 19.741084727539914 L
temperature with ideal gas eos is: 1202.7904738994466 K
volume with ideal gas eos is: 19.9536 L
###Markdown
As of now the Van der Waals solver for V and n uses a solver for a non-linear system, Newton's method, with an initial guess supplied by the ideal gas equation. A convergence study is planned for this, but it means that convergence is not guarenteed. Now we will look at a sub-module that loaded on initialization, fluidNumbers, which provides functions for a variety of numbers in fluid dynamics, most of them are dimensionless parameters.
###Code
chemEPy.fluidNumbers.reynoldsInfo()
chemEPy.fluidNumbers.rayleighInfo()
###Output
arguments are rho, u, L, mu OR u, L, nu
arguments are Gr, Pr OR g, beta, Ts, Tinf, L, nu, alpha
###Markdown
Again these functions are designed to parse the information you give them and then determine if they are a valid set of arguements so oftentimes there are multiple combinations of arguments you can feed these.
###Code
print(chemEPy.fluidNumbers.reynolds(rho = 1, u = 2, L = 0.1, mu = 1e-2))
print(chemEPy.fluidNumbers.reynolds(u = 2, L = 0.1, nu = 1e-2))
print(chemEPy.fluidNumbers.rayleigh(g = 9.81, beta = 1/273, Ts = 295, Tinf = 273, L = 1, nu = 1.5e-5, alpha = \
chemEPy.fluidNumbers.thermalDiffusivity(k = 0.025, rho = 1.2, cp = 1000)))
###Output
2529758241.758241
###Markdown
Available functions for the fluidNumbers module are currently: archimedes, biot, graetz, grashoff, nusselt, peclet, prandtl, rayleigh, reynolds, and thermalDiffusivity Now we will explore the nusseltCor submodule which is designed to work through the nusselt number correlations for convective heat transfer
###Code
chemEPy.nusseltCor.nuInfo()
###Output
argument combos are:
forced = True, shape = flatPlate, Re, Pr
forced = True, shape = sphere, general = True, Re, Pr, muS, muInf
forced = True, shape = sphere, general = False, Re, Pr
forced = True, shape = crossCylinder, Re, Pr
forced = True, shape = tube, general = True, uniform = Ts, Gz, Re
forced = True, shape = tube, general = False, uniform = Ts, Gz, Re
forced = True, shape = tube, general = False, uniform = q, Gz, Re, muB, muW
forced = True, shape = tube, general = False, Gz, Re, Pr, heating = T/F
forced = False, shape = verticalPlate, Ra
forced = False, shape = horizontalPlate, Ra
forced = False, shape = cylinder, Ra, Pr
forced = False, shape = sphere, Ra, Pr
###Markdown
Wow thats a lot of possible arguments! But each line guides you through what you will need to gather before you proceed. This submodule combined with the fluidNumbers makes for a powerful quick workflow that can speed you through the process. Let's take a look at an example where we find the convective heat transfer for free convection from a cylinder.
###Code
ra = chemEPy.fluidNumbers.rayleigh(g = 9.81, beta = 1/273, Ts = 323, Tinf = 273, L = 0.1, nu = 1.5e-5, alpha = \
chemEPy.fluidNumbers.thermalDiffusivity(k = 0.025, rho = 1.2, cp = 1000))
#recall that L is the characteristic length, which in this case is the diameter of the cylinder
pr = 0.71 #physical constant lookup
area = math.pi*0.1*2 #this cylinder has a diameter of 0.1 and length 2
ts = 323
tinf = 273
h = chemEPy.nusseltCor.nu(forced = False, shape = 'cylinder', Ra = ra, Pr = pr) * 0.025/0.1
q = h*area*(ts-tinf)
print('The total convective heat transfer is:', q, 'watts')
###Output
The total convective heat transfer is: 188.58624137979706 watts
###Markdown
Now we will look at two modules which are designed to help with physical properties. First iapws which is particulary useful for the properties of water/steam and has some additional features such as heavy water and ammonia. Second thermo which is useful for a broader variety of materials but has a different design philosphy and uses a significant amount of computer algebra. Both packages are on PyPI and have good documentation which can be found at https://pypi.org/project/iapws/ and https://pypi.org/project/thermo/
###Code
import iapws
import thermo
from iapws import IAPWS97 as ia
water = ia(T = 170+273.15, x = 0.5) #saturated water at 170 C with quality = 0.5
print(water.Liquid.cp, water.Vapor.cp) #heat capacities
print(water.Liquid.v, water.Vapor.v, water.v) #specific volumes
print(water.Liquid.f, water.Vapor.f) #fugacity should be equal for VLE
###Output
4.369498593233083 2.5985140134188485
0.0011142624283287058 0.24261579551606938 0.12186502897219904
0.31951664114714096 0.3195068052820659
###Markdown
The iapws module is designed more along the lines of the fluidNumbers submodule we looked at above. It does not take in positional arguments and instead lets the user specify a combination of arguments which it then autochecks to make sure that the system is appriopiatly specified. In the two phase region you will be able to specify one free physical parameter and the quality of the water/steam and in the one phase region you will be able to specify two parameters.
###Code
water = ia(T = 170+273.15, P = 1) #pressure is in MPa so this is slightly less than 10 atm
water.v, water.rho, water.mu
###Output
_____no_output_____
###Markdown
There are other submodules in the iapws package that you can explore and there are additional parameters included in the IAPWS97 data for a full list please see: https://iapws.readthedocs.io/en/latest/iapws.iapws97.htmliapws.iapws97.IAPWS97 Now we will look at some of the functionality in the thermo module. Thermo is large and impressive module with dozens of submodules some of which overlap with the functionality of chemEPy. If you are interested in some of these other submodules you should look further into them, but they are different from chemEPy. First the functions in thermo are primarily written with positional arguments so they are not going to try and parse out the missing arguement and solve for it. This means that some of the functions are more specific and less flexible. That said thermo has a fanstic library that can speed up physical property calculation called chemical. For detailed information on all the functionality please see: https://thermo.readthedocs.io/thermo.chemical.html
###Code
from thermo.chemical import Chemical
ip = Chemical('isopentane') #all chemicals are loaded by default to 298.15 K and 101325 Pa
print(ip.Tm, ip.Tb, ip.rho, ip.Cp, ip.mu) #melting, boiling, density, cp, and dynamic viscosity at current state
ip.calculate(T = 373.15, P = 1e5) #change temperature and pressure
print(ip.phase, ip.alpha) #for pure components we can see the phase, and find thermal diffusivity
ip.VaporPressure.solve_prop(1e5) #solve for a dependent properity
ip.VolumeLiquid.plot_isotherm(T = 250, Pmin = 1e5, Pmax = 1e7)
###Output
_____no_output_____
###Markdown
Now we will come back to the chemEPy module and look at the radiation and conduction submodules
###Code
chemEPy.radiation.qInfo()
###Output
arguments are body1(grey/black), body2(grey/black), area, t1, t2, epsilon1, epsilon2.
Optional args are imperial(T/F) default is False
viewFactor default is 1
epsilon1 default is 1 (black body)
epsilon2 default is 1
area2 if doing 2 grey bodies default is area2 = area
###Markdown
The function q will return the total heat exchanged between two black or grey bodies. You can set the units to imperial if you desire and there are several optional arguements. The viewFactor arguement will be used to compute the composite view factor if both bodies are grey. Let us look at an example where we find the energy exchanged between two grey bodies of uneven areas with a known view factor.
###Code
chemEPy.radiation.q(body1 = 'grey', body2 = 'grey', area = 0.5, area2 = 0.3, t1 = 300, t2 = 500, epsilon1 = 0.9,\
epsilon2 = 0.8, viewFactor = 0.8)
###Output
_____no_output_____
###Markdown
This result is negative because the function is expressing the energy going from body one to body 2. In the future adding functionality on how to compute different view factors will be included in the radiation submodule
###Code
equations.antoine(name = 'Water', P = 1)
###Output
_____no_output_____ |
deep-learning-school/[13]object_detection/[homework]yolov3_detection.ipynb | ###Markdown
Физтех-Школа Прикладной математики и информатики (ФПМИ) МФТИ --- Детектирование объектов с помощью YOLOv3 Составитель: Илья Захаркин (ФИВТ МФТИ, NeurusLab). По всем вопросам в Telegram: @ilyazakharkin На семинаре мы запускали SSD и Mask-RCNN из Tensorflow Object Detection API. На лекции же подробно разбирался алгоритм YOLOv3, давайте же теперь этот самый детектор и попробуем применить на практике. YOLOv3 **Идея детекторов:** использовать сильную свёрточную нейросеть, натренированную на классификации, чтобы извлечь признаки из изображения, потом использовать свёрточные слои для регрессии точек боксов и классификации объектов внутри них. Напомним, что архитектура у YOLOv3 следующая: Словами:1. Картинка подаётся на вход2. Она сжимается до размера 300х300х33. Пропускается через backbone-нейросеть, которая извлекает признаки -- *Darknet53*4. Идут несколько свёрточных слоёв со свёртками 1х1 и 3х35. После них идёт yolo-слой: свёртка 1х1х(1 + 4 + NUM_CLASSES)6. Далее происходит upsampling (увеличение по ширине и высоте) в 2 раза и конкатенация с feature map'ами, которые были до upsampling'а (чтобы улучшить качество)7. Шаги 4-6 повторяются ещё 2 раза, чтобы улучшить качество детектирования мелких объектовПри обучении также: 8. Финальный feature map специальным образом подаётся в Loss для подсчёта ошибки9. Распространятся градиенты, как в обычном backpropagation, обновляются веса сетиВ слоях используются LeakyReLU активации. Перед YOLO-слоями используются линейные активации (то есть нет нелинейности). Как вся архитектура выглядит в коде вы можете посмотреть в этом файле: https://github.com/akozd/tensorflow_yolo_v3/blob/master/models/yolo_v3.py Оригинальная статья с arxiv.org: https://arxiv.org/abs/1804.02767 ***Примечание:*** Вы можете спросить: "Почему именно YOLOv3, ведь много других хороших детекторов?". Да, но на данный момент у YOLOv3 лучшее соотношение скорость/качество из широко применяемых нейросетевых детекторов. В этом плане он State-of-the-Art. Задание (10 баллов) ***Предполагается, что Вы знакомы с TensorFlow и свёрточными нейросетями*** Лучше выполнять этот ноутбук локально, поставив TensorFlow: `pip install tensorflow` (CPU-версия, но слишком долго работать не будет, так как обучения в задании нет, только предсказание).Если Вы выполняете на Google Colab, то будьте готовы активно использовать переходы в подпапки (`os.chdir(*path*)`), как было на семинаре. Писать свой нейросетевой детектор с нуля -- весьма непростая задача, поэтому сейчас просто используем код человека, который смог: https://github.com/akozd/tensorflow_yolo_v3 Напомню, что скачать с Github весь репозиторий можно командой: `git clone *адрес репозитория*`. Например, репозиторий, который нужен в этом задании, скачивается так: `git clone https://github.com/akozd/tensorflow_yolo_v3` Этап 1 (2 балла): первичное ознакомлене с репозиторием Прочитать README этого репозитория: https://github.com/akozd/tensorflow_yolo_v3 ***Вопрос по `README.md` (1 балл)***: что автор репозитория предлагает для того, чтобы улучшить качество предсказания боксов пр обучении на собственных данных?
###Code
<Ответ>
...
###Output
_____no_output_____
###Markdown
Прочитайте файл `train.py` ***Вопрос по `train.py` (1 балл)***: за что отвечает аргумент скрипта `train.py` под названием `--test_model_overfit`?
###Code
<Развёрнутый ответ>
...
###Output
_____no_output_____
###Markdown
Этап 2 (3 балла): чтение кода репозитория Теперь нужно прочитать код автора и понять, что в нём происходит. Этот репозиторий был выбран не спроста -- весь код хорошо документирован и исправно работает. Ваша задача состоит в том, чтобы понять, как связаны файлы друг с другом, какие файлы используются для обучения, какие для предсказания, какие вовсе не используются. Хорошая стратегия: основываясь на README.md начать разбираться с тем, как работает `detect.py`, то есть что принимает на вход и что на выход, какие сторонние файлы использует. ***Задача (3 балла)***: подробно опишите структуру репозитория, пояснив, для чего нужен каждый файл. Чем более подробно вы опишите, что происходит внутри файла (можно прямо в виде "..в строчках 15-20 производится предсказание боксов по изображению.."), тем больше баллов получите.
###Code
<Подробное описание структуры репозитория>
...
###Output
_____no_output_____
###Markdown
Этап 3 (5 баллов): установка нужных зависимостей, скачивание весов (`.ckpt`) и запуск `detect.py` на ваших изображениях Разомнём пальцы и позапускаем код из репозитория на ваших изображениях (любых, однако желательно, чтобы на них были объекты из [этого списка](https://github.com/nightrome/cocostuff/blob/master/labels.md), так как изначально детектор обучен на COCO датсете). Сначала убедитесь, что у вас (или на Colab) стоят все нужные зависимости (5 ссылок в разделе Dependencdies в README.md). Потом либо скриптом `.sh`, либо по ссылке, данной в ридми, скачайте в папку `model_weights` веса обученной на датасете COCO YOLOv3-модели. Баллы в этом задании ставятся следующим образом: * (1 балл) получены предсказания на любом вашем изображении (этот пункт служит подтверждением того, что у вас всё запустилось и вы смогли скачать и настроить репозиторий у себя/в колабе) * (1 балл) найдена кратинка, где у нейросети есть ложные срабатывания (false positives) * (1 балл) найдена картинка, где у нейросети есть пропуски в детекции (false negatives) * (1 балл) найдена картинка, где сеть детектировала успешно все объекты, хотя они сильно перекрыватся* (1 балл) предыдущий пункт, но наоброт -- нейросеть справляется плохо
###Code
<Вашы попытки здесь>
<и здесь>
...
###Output
_____no_output_____
###Markdown
* Дополнительный этап 4 (10 баллов): обучение детектора на собственной выборке В этом задании Вы по-желанию можете обучить свой детектор. Чтобы упростить задачу, вот примеры небольших датасетов, на которых можно обучить и протестировать (**10 баллов ставится за один из двух вариантов, за оба варианта двойной балл ставиться не будет**): ***1). Датасет игровых карт: https://github.com/EdjeElectronics/TensorFlow-Object-Detection-API-Tutorial-Train-Multiple-Objects-Windows-10*** Репозиторий состоит из туториала по обучению детектора с помощью TF Object Detection API. Вы можете либо взять датасет из папки `/images` этого репозитория и обучить текущий YOLOv3 с помощью `train.py` (готовьтесь, предстоит повозиться с переводом разметки данных в нужный формат), либо же пройти тот туториал и обучить любую модель из TF Object Detection API на этом датасете. Главное: продемонстрировать работу вашего детектора не тестовых примерах с картами.
###Code
...
<Ты сможешь!>
...
###Output
_____no_output_____
###Markdown
**2). Датасет из картинок со снитчем из Гарри Поттера, ссылка на статью с подробным описанием задачи: https://apptractor.ru/develop/syigraem-v-kviddich-s-tensorflow-object-detection-api.html**В качестве результата нужно показать тестовые изображения, на которых верно детектирован снитч.
###Code
...
<Торжественно клянусь, что совершаю только шалость>
...
###Output
_____no_output_____
###Markdown
Также есть **ещё два пути, которые должны сработать**, если не работает то, что описано в домашнем ноутбуке: а). **Darkflow** -- репозиторий с разными версиями YOLO, в Readme есть про то, как обучать: https://github.com/thtrieu/darkflow б). **Darknet** -- фреймворк на С++ c авторским YOLOv3 (от Джозефа Редмона). Можно обучить детектор, следуя инструкциям на его сайте: https://pjreddie.com/darknet/yolo/
###Code
...
###Output
_____no_output_____ |
topics/07-Advanced_Classification/02-Practical/files/python/LR_Ens.ipynb | ###Markdown
Optical Character Recognition for MNIST handwritten characters OCR (Optical Character Recognition) is another classification problem. In this example, we wish to recognise hand-written digits from the famous NIST dataset, each of which is presented as an $8\times 8$ array of pixel intensities.We are just going to focus on the problem of classifying each rasterised digit scan, and not on the other steps which include tokenising text, basic data cleaning etc.scikit-learn includes a built-in set of pre-formatted digits which we can use. The set is actually the MNIST (Modified NIST) set, but is functionally equivalent to the original NIST set, in relation to its classification challenges. Looking at the dataThe data is included in `scikit-learn` and so can be loaded easily.
###Code
from sklearn import datasets
digits = datasets.load_digits()
digits.images.shape
###Output
_____no_output_____
###Markdown
As usual, we review the training data before doing anything else. In this case, the data takes the form of rasterised images, so it makes sense to display them as such, overlaying each image with the label it was assigned by a human.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
fig, axes = plt.subplots(10, 10, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
ax.imshow(digits.images[i], cmap='binary', interpolation='nearest')
ax.text(0.05, 0.05, str(digits.target[i]),
transform=ax.transAxes, color='green')
ax.set_xticks([])
ax.set_yticks([])
###Output
_____no_output_____
###Markdown
Here the data per digit is simply each pixel value within an 8x8 grid. The example grid below represents a zero.
###Code
# The images themselves
print(digits.images.shape)
print(digits.images[0])
###Output
_____no_output_____
###Markdown
While it is better to display each instance as an 8x8 grid, each instance needs to be flattened into a single row with 64 elements (columns), as below.
###Code
# The flattened data that is used to train the model.
print(digits.data.shape)
print(digits.data[0])
###Output
_____no_output_____
###Markdown
There are some nice facilities to count the number of different digits.
###Code
# The target label
from collections import Counter # https://stackoverflow.com/a/2392948
c = Counter(digits.target)
print(c.items())
###Output
_____no_output_____
###Markdown
Summarising, the data has 1797 samples in 64 dimensions and 10 ($0,\ldots,9$) levels. The number of instances per level varies from 174 to 183. Classifying the digits using logistic regressionLogistic regression is an extension of regression where a change of variable is used to map the continuous (numerical-valued) prediction into categorical values for classification purposes.
###Code
from sklearn.model_selection import train_test_split
seed=2
Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target,
random_state=seed, stratify=digits.target)
print(Xtrain.shape, Xtest.shape)
###Output
_____no_output_____
###Markdown
We use logistic regression with an $\ell_2$-based regularisation penalty (recall Week 5's discussion of regularisation).
###Code
from sklearn.linear_model import LogisticRegression
# Get and configure a LogisticRegression object, with an L2 regularisation penalty
clf = LogisticRegression(penalty='l2', max_iter=7600)
# Fit the training data
clf.fit(Xtrain, ytrain)
# Using the beta parameters that have just been learned and are in clf, predict (recognise) the test data
ypred = clf.predict(Xtest)
###Output
_____no_output_____
###Markdown
We check the classification accuracy score and confusion matrix as we did for the Iris Data:
###Code
from sklearn.metrics import accuracy_score, confusion_matrix
print(accuracy_score(ytest, ypred))
confusionMat = confusion_matrix(ytest, ypred)
print(confusionMat)
###Output
_____no_output_____
###Markdown
As can be seen, the confusion matrix has several off-diagonal nonzero terms. Because there are 10 labels, the confusion matrix is slightly harder to visualise than the Iris data, which had just 3 labels. We can get a better sense of its layout by plotting it as an image. Because all the values are nonnegative, but there is a large difference in size from the smallest (0) to the largest (45) with most values being at each end of the range, the square root of the values maps better into the Blue colour space used in the plot below: Make sure the directory exists beforehand to store the generated plots
###Code
import os
picDir = "output/pics"
if not os.path.exists(picDir):
os.makedirs(picDir)
import numpy as np
plt.imshow(np.sqrt(confusionMat),cmap='Blues', interpolation='nearest')
plt.grid(False)
plt.ylabel('true')
plt.xlabel('predicted');
plt.savefig(picDir+"/logreg_digits_l2_confusionMatrix.pdf")
###Output
_____no_output_____
###Markdown
We might also take a look at some of the outputs along with their predicted labels. Matching labels are green (as before) and unmatched labels are red:
###Code
fig, axes = plt.subplots(10, 10, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
ax.imshow(Xtest[i].reshape(8, 8), cmap='binary')
ax.text(0.05, 0.05, str(ypred[i]),
transform=ax.transAxes,
color='green' if (ytest[i] == ypred[i]) else 'red')
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(picDir+"/digitsAccuracyCheck.pdf")
###Output
_____no_output_____
###Markdown
Where they do not match, it is arguable what the original writing was meant to represent!
###Code
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
# Configure the bagging classifier
n_estimators=50
baggingClf = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators, random_state=0).fit(Xtrain, ytrain)
ypredBagging = clf.predict(Xtest)
print(accuracy_score(ytest, ypredBagging))
confusionMatBagging = confusion_matrix(ytest, ypredBagging)
print(confusionMatBagging)
import seaborn as sns
plt.figure(figsize=(10,7))
sns.set(font_scale=1.4) # for label size
sns.heatmap(confusionMatBagging, annot=True, annot_kws={"size": 16}) # font size
plt.show()
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
###Output
_____no_output_____ |
docs/refactoring/performance/multiprocessing.ipynb | ###Markdown
Multi-processing exampleWe’ll start with code that is clear, simple, and executed top-down. It’s easy to develop and incrementally testable:
###Code
import requests
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://github.com/veit/jupyter-tutorial/',
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/pyviz-tutorial/',
'https://pyviz-tutorial.readthedocs.io/de/latest/',
'https://cusy.io/en',
]
def sitesize(url):
with requests.get(url) as u:
return url, len(u.content)
pool = Pool(10)
for result in pool.imap_unordered(sitesize, sites):
print(result)
###Output
('https://jupyter-tutorial.readthedocs.io/en/latest/', 6374)
('https://pyviz-tutorial.readthedocs.io/de/latest/', 6556)
('https://github.com/veit/pyviz-tutorial/', 164082)
('https://github.com/veit/jupyter-tutorial/', 183345)
('https://cusy.io/en', 26974)
###Markdown
> **Note 1:** A good development strategy is to use [map](https://docs.python.org/3/library/functions.htmlmap), to test your code in a single process and thread before moving to multi-processing.> **Note 2:** In order to better assess when `ThreadPool` and when process `Pool` should be used, here are some rules of thumb:> > * For CPU-heavy jobs, `multiprocessing.pool.Pool` should be used. Usually we start here with twice the number of CPU cores for the pool size, but at least 4.> > * For I/O-heavy jobs, `multiprocessing.pool.ThreadPool` should be used. Usually we start here with five times the number of CPU cores for the pool size.> > * If we use Python 3 and do not need an interface identical to `pool`, we use [concurrent.future.Executor](https://docs.python.org/3/library/concurrent.futures.htmlconcurrent.futures.Executor) instead of `multiprocessing.pool.ThreadPool`; it has a simpler interface and was designed for threads from the start. Since it returns instances of `concurrent.futures.Future`, it is compatible with many other libraries, including `asyncio`.> > * For CPU- and I/O-heavy jobs, we prefer `multiprocessing.Pool` because it provides better process isolation.
###Code
import requests
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://github.com/veit/jupyter-tutorial/',
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/pyviz-tutorial/',
'https://pyviz-tutorial.readthedocs.io/de/latest/',
'https://cusy.io/en',
]
def sitesize(url):
with requests.get(url) as u:
return url, len(u.content)
for result in map(sitesize, sites):
print(result)
###Output
('https://github.com/veit/jupyter-tutorial/', 183345)
('https://jupyter-tutorial.readthedocs.io/en/latest/', 6374)
('https://github.com/veit/pyviz-tutorial/', 164082)
('https://pyviz-tutorial.readthedocs.io/de/latest/', 6556)
('https://cusy.io/en', 26974)
###Markdown
What can be parallelised? Amdahl’s law> The increase in speed is mainly limited by the sequential part of the problem, since its execution time cannot be reduced by parallelisation. In addition, parallelisation creates additional costs, such as for communication and synchronisation of the processes.In our example, the following tasks can only be processed serially:* UDP DNS request request for the URL* UDP DNS response* Socket from the OS* TCP-Connection* Sending the HTTP request for the root resource* Waiting for the TCP response* Counting characters on the site
###Code
import requests
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://github.com/veit/jupyter-tutorial/',
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/pyviz-tutorial/',
'https://pyviz-tutorial.readthedocs.io/de/latest/',
'https://cusy.io/en',
]
def sitesize(url):
with requests.get(url, stream=True) as u:
return url, len(u.content)
pool = Pool(4)
for result in pool.imap_unordered(sitesize, sites):
print(result)
###Output
('https://github.com/veit/pyviz-tutorial/', 164088)
('https://github.com/veit/jupyter-tutorial/', 183345)
('https://jupyter-tutorial.readthedocs.io/en/latest/', 6374)
('https://pyviz-tutorial.readthedocs.io/de/latest/', 6556)
('https://cusy.io/en', 26974)
###Markdown
Multi-Processing-BeispielWir beginnen hier mit Code, der klar und einfach ist und von oben nach unten ausgeführt wird. Er ist einfach zu entwickeln und inkrementell zu testen:
###Code
import requests
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://github.com/veit/jupyter-tutorial/',
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/pyviz-tutorial/',
'https://pyviz-tutorial.readthedocs.io/de/latest/',
'https://cusy.io/en',
]
def sitesize(url):
with requests.get(url) as u:
return url, len(u.content)
pool = Pool(4)
for result in pool.imap_unordered(sitesize, sites):
print(result)
###Output
('https://jupyter-tutorial.readthedocs.io/en/latest/', 6374)
('https://pyviz-tutorial.readthedocs.io/de/latest/', 6556)
('https://github.com/veit/pyviz-tutorial/', 164082)
('https://github.com/veit/jupyter-tutorial/', 183351)
('https://cusy.io/en', 26974)
###Markdown
> **Hinweis 1:** Eine gute Entwicklungsstrategie ist die Verwendung von [map](https://docs.python.org/3/library/functions.htmlmap), um den Code in einem einzelnen Prozess und einem einzelnen Thread zu testen, bevor zu Multi-Processing gewechselt wird.> **Hinweis 2:** Um besser einschätzen zu können, wann `ThreadPool` und wann `Pool` verwendet werden sollte, hier einige Faustregeln:> > * Für CPU-lastige Jobs sollte `multiprocessing.pool.Pool` verwendet werden. Üblicherweise beginnen wir hier mit der doppelten Anzahl von CPU-Kernen für die Pool-Größe, mindestens jedoch mit 4.> > * Für I/O-lastige Jobs sollte `multiprocessing.pool.ThreadPool` verwendet werden. Üblicherweise beginnen wir hier mit der fünffachen Anzahl von CPU-Kernen für die Pool-Größe.> > * Verwenden wir Python 3 und benötigen kein mit `Pool` identisches Interface, nutzen wir [concurrent.future.Executor](https://docs.python.org/3/library/concurrent.futures.htmlconcurrent.futures.Executor) statt `multiprocessing.pool.ThreadPool`; er hat ein einfacheres Interface und wurde von Anfang an für Threads konzipiert. Da er Instanzen von `concurrent.futures.Future` zurückgibt, ist er kompatibel zu vielen anderen Bibliotheken, einschließlich `asyncio`.> > * Für CPU- und I/O-lastige Jobs bevorzugen wir `multiprocessing.Pool`, da hierdurch eine bessere Prozess-Isolierung erreicht wird.
###Code
import requests
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://github.com/veit/jupyter-tutorial/',
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/pyviz-tutorial/',
'https://pyviz-tutorial.readthedocs.io/de/latest/',
'https://cusy.io/en',
]
def sitesize(url):
with requests.get(url) as u:
return url, len(u.content)
for result in map(sitesize, sites):
print(result)
###Output
('https://github.com/veit/jupyter-tutorial/', 183029)
('https://jupyter-tutorial.readthedocs.io/en/latest/', 6374)
('https://github.com/veit/jupyter-tutorial/', 183345)
('https://github.com/veit/pyviz-tutorial/', 164082)
('https://cusy.io/en', 26974)
###Markdown
Was ist parallelisierbar? Amdahlsche Gesetz> Der Geschwindigkeitszuwachs vor allem durch den sequentiellen Anteil des Problems beschränkt, da sich dessen Ausführungszeit durch Parallelisierung nicht verringern lässt. Zudem entstehen durch Parallelisierung zusätzliche Kosten wie etwa für die Kommunikation und die Synchronisierung der Prozesse.In unserem Beispiel können die folgenden Aufgaben nur seriell abgearbeitet werden:* UDP DNS request für die URL* UDP DNS response* Socket vom OS* TCP-Connection* Senden des HTTP Request für die Root-Ressource* Warten auf die TCP Response* Zählen der Zeichen auf der Website
###Code
import requests
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://github.com/veit/jupyter-tutorial/',
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/pyviz-tutorial/',
'https://pyviz-tutorial.readthedocs.io/de/latest/',
'https://cusy.io/en',
]
def sitesize(url):
''' Determine the size of a website '''
with requests.get(url, stream=True) as u:
return url, len(u.content)
pool = Pool(4)
for result in pool.imap_unordered(sitesize, sites):
print(result)
###Output
('https://github.com/veit/pyviz-tutorial/', 164088)
('https://github.com/veit/jupyter-tutorial/', 183345)
('https://jupyter-tutorial.readthedocs.io/en/latest/', 6374)
('https://pyviz-tutorial.readthedocs.io/de/latest/', 6556)
('https://cusy.io/en', 26974)
###Markdown
Multi-processing exampleWe’ll start with code that is clear, simple, and executed top-down. It’s easy to develop and incrementally testable:
###Code
import urllib.request
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/jupyter-tutorial/',
'https://cusy.io/en',
]
def sitesize(url):
''' Determine the size of a website '''
with urllib.request.urlopen(url) as u:
page = u.read()
return url, len(page)
pool = Pool(10)
for result in pool.imap_unordered(sitesize, sites):
print(result)
###Output
('https://cusy.io/en', 15655)
('https://jupyter-tutorial.readthedocs.io/en/latest/', 12630)
('https://github.com/veit/jupyter-tutorial/', 98527)
###Markdown
> **Note 1:** A good development strategy is to use [map](https://docs.python.org/3/library/functions.htmlmap), to test your code in a single process and thread before moving to multi-processing.> **Note 2:** In order to better assess when `ThreadPool` and when process `Pool` should be used, here are some rules of thumb:> > * `multiprocessing.pool.ThreadPool` should be used for IO-heavy jobs.> * `multiprocessing.Pool` should be used for CPU-heavy jobs.> * For jobs that are heavy on the CPU and IO, I usually prefer `multiprocessing.Pool`, as this achieves better process isolation.> * For Python 3, take a look at the pool implementation of [concurrent.future.Executor](https://docs.python.org/3/library/concurrent.futures.html?highlight=concurrent%20futuresconcurrent.futures.Executor).
###Code
import urllib.request
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/jupyter-tutorial/',
'https://cusy.io/en',
]
def sitesize(url):
''' Determine the size of a website '''
with urllib.request.urlopen(url) as u:
page = u.read()
return url, len(page)
for result in map(sitesize, sites):
print(result)
###Output
('https://jupyter-tutorial.readthedocs.io/en/latest/', 12630)
('https://github.com/veit/jupyter-tutorial/', 98651)
('https://cusy.io/en', 15655)
###Markdown
What can be parallelised? Amdahl’s law> The increase in speed is mainly limited by the sequential part of the problem, since its execution time cannot be reduced by parallelisation. In addition, parallelisation creates additional costs, such as for communication and synchronisation of the processes.In our example, the following tasks can only be processed serially:* UDP DNS request request for the URL* UDP DNS response* Socket from the OS* TCP-Connection* Sending the HTTP request for the root resource* Waiting for the TCP response* Counting characters on the site
###Code
import urllib.request
from multiprocessing.pool import ThreadPool as Pool
sites = [
'https://jupyter-tutorial.readthedocs.io/en/latest/',
'https://github.com/veit/jupyter-tutorial/',
'https://cusy.io/en',
]
def sitesize(url):
''' Determine the size of a website '''
with urllib.request.urlopen(url) as u:
page = u.read()
return url, len(page)
pool = Pool(10)
for result in pool.imap_unordered(sitesize, sites):
print(result)
###Output
('https://cusy.io/en', 15655)
('https://jupyter-tutorial.readthedocs.io/en/latest/', 12630)
('https://github.com/veit/jupyter-tutorial/', 98526)
|
docs/source/example_mcmc.ipynb | ###Markdown
MCMC & why 3d mattersThis example (although quite artificial) shows that viewing a posterior (ok, I have flat priors) in 3d can be quite useful. While the 2d projection may look quite 'bad', the 3d volume rendering shows that much of the volume is empty, and the posterior is much better defined than it seems in 2d.
###Code
import pylab
import scipy.optimize as op
import emcee
import numpy as np
%matplotlib inline
# our 'blackbox' 3 parameter model which is highly degenerate
def f_model(x, a, b, c):
return x * np.sqrt(a**2 +b**2 + c**2) + a*x**2 + b*x**3
N = 100
a_true, b_true, c_true = -1., 2., 1.5
# our input and output
x = np.random.rand(N)*0.5#+0.5
y = f_model(x, a_true, b_true, c_true)
# + some (known) gaussian noise
error = 0.2
y += np.random.normal(0, error, N)
# and plot our data
pylab.scatter(x, y);
pylab.xlabel("$x$")
pylab.ylabel("$y$")
# our likelihood
def lnlike(theta, x, y, error):
a, b, c = theta
model = f_model(x, a, b, c)
chisq = 0.5*(np.sum((y-model)**2/error**2))
return -chisq
result = op.minimize(lambda *args: -lnlike(*args), [a_true, b_true, c_true], args=(x, y, error))
# find the max likelihood
a_ml, b_ml, c_ml = result["x"]
print("estimates", a_ml, b_ml, c_ml)
print("true values", a_true, b_true, c_true)
result["message"]
# do the mcmc walk
ndim, nwalkers = 3, 100
pos = [result["x"] + np.random.randn(ndim)*0.1 for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, args=(x, y, error))
sampler.run_mcmc(pos, 1500);
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
###Output
_____no_output_____
###Markdown
Posterior in 2d
###Code
# plot the 2d pdfs
import corner
fig = corner.corner(samples, labels=["$a$", "$b$", "$c$"],
truths=[a_true, b_true, c_true])
###Output
_____no_output_____
###Markdown
Posterior in 3d
###Code
import vaex
import scipy.ndimage
import ipyvolume
ds = vaex.from_arrays(a=samples[...,0], b=samples[...,1], c=samples[...,2])
# get 2d histogram
v = ds.count(binby=["a", "b", "c"], shape=64)
# smooth it for visual pleasure
v = scipy.ndimage.gaussian_filter(v, 2)
ipyvolume.volshow(v, lighting=True)
###Output
_____no_output_____
###Markdown
MCMC & why 3d mattersThis example (although quite artificial) shows that viewing a posterior (ok, I have flat priors) in 3d can be quite useful. While the 2d projection may look quite 'bad', the 3d volume rendering shows that much of the volume is empty, and the posterior is much better defined than it seems in 2d.
###Code
import pylab
import scipy.optimize as op
import emcee
import numpy as np
%matplotlib inline
# our 'blackbox' 3 parameter model which is highly degenerate
def f_model(x, a, b, c):
return x * np.sqrt(a**2 +b**2 + c**2) + a*x**2 + b*x**3
N = 100
a_true, b_true, c_true = -1., 2., 1.5
# our input and output
x = np.random.rand(N)*0.5#+0.5
y = f_model(x, a_true, b_true, c_true)
# + some (known) gaussian noise
error = 0.2
y += np.random.normal(0, error, N)
# and plot our data
pylab.scatter(x, y);
pylab.xlabel("$x$")
pylab.ylabel("$y$")
# our likelihood
def lnlike(theta, x, y, error):
a, b, c = theta
model = f_model(x, a, b, c)
chisq = 0.5*(np.sum((y-model)**2/error**2))
return -chisq
result = op.minimize(lambda *args: -lnlike(*args), [a_true, b_true, c_true], args=(x, y, error))
# find the max likelihood
a_ml, b_ml, c_ml = result["x"]
print("estimates", a_ml, b_ml, c_ml)
print("true values", a_true, b_true, c_true)
result["message"]
# do the mcmc walk
ndim, nwalkers = 3, 100
pos = [result["x"] + np.random.randn(ndim)*0.1 for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, args=(x, y, error))
sampler.run_mcmc(pos, 1500);
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
###Output
_____no_output_____
###Markdown
Posterior in 2d
###Code
# plot the 2d pdfs
import corner
fig = corner.corner(samples, labels=["$a$", "$b$", "$c$"],
truths=[a_true, b_true, c_true])
###Output
_____no_output_____
###Markdown
Posterior in 3d
###Code
import vaex
import scipy.ndimage
import ipyvolume
ds = vaex.from_arrays(a=samples[...,0].copy(), b=samples[...,1].copy(), c=samples[...,2].copy())
# get 2d histogram
v = ds.count(binby=["a", "b", "c"], shape=64)
# smooth it for visual pleasure
v = scipy.ndimage.gaussian_filter(v, 2)
ipyvolume.quickvolshow(v, lighting=True)
###Output
_____no_output_____ |
samples/notebooks/polyglot/Azure logs.ipynb | ###Markdown
[this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/polyglot)
###Code
Install-Module -Name Az.ApplicationInsights -RequiredVersion 1.0.3 -Scope CurrentUser
Get-PackageProvider
Connect-AzAccount
Set-AzContext -Subscription "64276bd9-d4bf-4fe3-8b77-36d696e84b26"
Install-Module -Name Az.Monitor -Scope CurrentUser
$logs = Get-AzLog -MaxRecord 200 | Select-Object -ExcludeProperty Authorization,Claims,EventTimestamp,HttpRequest,Level,Properties,SubmissionTimestamp
$logJson = ConvertTo-Json $logs -Depth 3
$logJson | Out-Display -MimeType "application/json"
$entryByResourceGroup = [Graph.Histogram]::new()
$entryByResourceGroup.name = "By ResourceGroup"
$entryByResourceGroup.x = $logs.ResourceGroupName
$entryByResourceProvider = [Graph.Histogram]::new()
$entryByResourceProvider.name = "By ResourceProvider"
$entryByResourceProvider.x = $logs.ResourceProviderName.value
$entryByStatus = [Graph.Histogram]::new()
$entryByStatus.name = "By Status"
$entryByStatus.x = $logs.Status.value
New-PlotlyChart -Trace @($entryByResourceGroup,$entryByResourceProvider,$entryByStatus) -Title "Events" | Out-Display
$entryByResourceProviderSuccess = [Graph.Histogram]::new()
$entryByResourceProviderSuccess.name = "Success By ResourceProvider"
$entryByResourceProviderSuccess.x = ($logs | where-object { $_.Status.value -eq "Succeeded"}).ResourceProviderName.value
$entryByResourceProviderFailure = [Graph.Histogram]::new()
$entryByResourceProviderFailure.name = "Failure By ResourceProvider"
$entryByResourceProviderFailure.x = ($logs | where-object { $_.Status.value -ne "Succeeded"}).ResourceProviderName.value
$layout = [Layout]::new()
$layout.barmode = "stack"
New-PlotlyChart -Layout $layout -Trace @($entryByResourceProviderSuccess, $entryByResourceProviderFailure) -Title "Events outcome by Resource Provider" | Out-Display
###Output
_____no_output_____
###Markdown
[this doc on github](https://github.com/dotnet/interactive/tree/main/samples/notebooks/polyglot)
###Code
Install-Module -Name Az.ApplicationInsights -RequiredVersion 1.0.3 -Scope CurrentUser
Get-PackageProvider
Connect-AzAccount
Set-AzContext -Subscription "64276bd9-d4bf-4fe3-8b77-36d696e84b26"
Install-Module -Name Az.Monitor -Scope CurrentUser
$logs = Get-AzLog -MaxRecord 200 | Select-Object -ExcludeProperty Authorization,Claims,EventTimestamp,HttpRequest,Level,Properties,SubmissionTimestamp
$logJson = ConvertTo-Json $logs -Depth 3
$logJson | Out-Display -MimeType "application/json"
$entryByResourceGroup = [Graph.Histogram]::new()
$entryByResourceGroup.name = "By ResourceGroup"
$entryByResourceGroup.x = $logs.ResourceGroupName
$entryByResourceProvider = [Graph.Histogram]::new()
$entryByResourceProvider.name = "By ResourceProvider"
$entryByResourceProvider.x = $logs.ResourceProviderName.value
$entryByStatus = [Graph.Histogram]::new()
$entryByStatus.name = "By Status"
$entryByStatus.x = $logs.Status.value
New-PlotlyChart -Trace @($entryByResourceGroup,$entryByResourceProvider,$entryByStatus) -Title "Events" | Out-Display
$entryByResourceProviderSuccess = [Graph.Histogram]::new()
$entryByResourceProviderSuccess.name = "Success By ResourceProvider"
$entryByResourceProviderSuccess.x = ($logs | where-object { $_.Status.value -eq "Succeeded"}).ResourceProviderName.value
$entryByResourceProviderFailure = [Graph.Histogram]::new()
$entryByResourceProviderFailure.name = "Failure By ResourceProvider"
$entryByResourceProviderFailure.x = ($logs | where-object { $_.Status.value -ne "Succeeded"}).ResourceProviderName.value
$layout = [Layout]::new()
$layout.barmode = "stack"
New-PlotlyChart -Layout $layout -Trace @($entryByResourceProviderSuccess, $entryByResourceProviderFailure) -Title "Events outcome by Resource Provider" | Out-Display
###Output
_____no_output_____
###Markdown
[this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/polyglot)
###Code
Install-Module -Name Az.ApplicationInsights -RequiredVersion 1.0.3 -Scope CurrentUser
Get-PackageProvider
Connect-AzAccount
Set-AzContext -Subscription "64276bd9-d4bf-4fe3-8b77-36d696e84b26"
Install-Module -Name Az.Monitor -Scope CurrentUser
$logs = Get-AzLog -MaxRecord 200 | Select-Object -ExcludeProperty Authorization,Claims,EventTimestamp,HttpRequest,Level,Properties,SubmissionTimestamp
$logJson = ConvertTo-Json $logs -Depth 3
$logJson | Out-Display -MimeType "application/json"
$entryByResourceGroup = [Graph.Histogram]::new()
$entryByResourceGroup.name = "By ResourceGroup"
$entryByResourceGroup.x = $logs.ResourceGroupName
$entryByResourceProvider = [Graph.Histogram]::new()
$entryByResourceProvider.name = "By ResourceProvider"
$entryByResourceProvider.x = $logs.ResourceProviderName.value
$entryByStatus = [Graph.Histogram]::new()
$entryByStatus.name = "By Status"
$entryByStatus.x = $logs.Status.value
New-PlotlyChart -Trace @($entryByResourceGroup,$entryByResourceProvider,$entryByStatus) -Title "Events" | Out-Display
$entryByResourceProviderSuccess = [Graph.Histogram]::new()
$entryByResourceProviderSuccess.name = "Success By ResourceProvider"
$entryByResourceProviderSuccess.x = ($logs | where-object { $_.Status.value -eq "Succeeded"}).ResourceProviderName.value
$entryByResourceProviderFailure = [Graph.Histogram]::new()
$entryByResourceProviderFailure.name = "Failure By ResourceProvider"
$entryByResourceProviderFailure.x = ($logs | where-object { $_.Status.value -ne "Succeeded"}).ResourceProviderName.value
$layout = [Layout]::new()
$layout.barmode = "stack"
New-PlotlyChart -Layout $layout -Trace @($entryByResourceProviderSuccess, $entryByResourceProviderFailure) -Title "Events outcome by Resource Provider" | Out-Display
###Output
_____no_output_____ |
sound/simple_audio_working_vggish_clean_freeze_vggish_weights.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Simple audio recognition: Recognizing keywords View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook This tutorial will show you how to build a basic speech recognition network that recognizes ten different words. It's important to know that real speech and audio recognition systems are much more complex, but like MNIST for images, it should give you a basic understanding of the techniques involved. Once you've completed this tutorial, you'll have a model that tries to classify a one second audio clip as "down", "go", "left", "no", "right", "stop", "up" and "yes".
###Code
!ls
from google.colab import files
import os
if not os.path.exists('custom_dataset.zip'):
files.upload()
!unzip custom_dataset.zip
!ls
!git clone https://github.com/google-coral/project-keyword-spotter.git
!ls project-keyword-spotter/
!cp project-keyword-spotter/mel_features.py .
!ls
import mel_features
###Output
_____no_output_____
###Markdown
SetupImport necessary modules and dependencies.
###Code
import os
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras import layers
from tensorflow.keras import models
from IPython import display
# Set seed for experiment reproducibility
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
###Output
_____no_output_____
###Markdown
Import the Speech Commands datasetYou'll write a script to download a portion of the [Speech Commands dataset](https://www.tensorflow.org/datasets/catalog/speech_commands). The original dataset consists of over 105,000 WAV audio files of people saying thirty different words. This data was collected by Google and released under a CC BY license, and you can help improve it by [contributing five minutes of your own voice](https://aiyprojects.withgoogle.com/open_speech_recording).You'll be using a portion of the dataset to save time with data loading. Extract the `mini_speech_commands.zip` and load it in using the `tf.data` API.
###Code
data_dir = pathlib.Path('data/mini_speech_commands')
if not data_dir.exists():
tf.keras.utils.get_file(
'mini_speech_commands.zip',
origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
extract=True,
cache_dir='.', cache_subdir='data')
###Output
_____no_output_____
###Markdown
Check basic statistics about the dataset.
###Code
!ls data/mini_speech_commands
!mv data/mini_speech_commands data/mini_speech_commands.bak
!mkdir data/mini_speech_commands
!#cp -r data/mini_speech_commands.bak/left data/mini_speech_commands/left
!#cp -r data/mini_speech_commands.bak/stop data/mini_speech_commands/stop
!mkdir data/mini_speech_commands/unknown
!#cp data/mini_speech_commands.bak/up/*.wav data/mini_speech_commands/unknown
!#cp data/mini_speech_commands.bak/go/*.wav data/mini_speech_commands/unknown
!#cp data/mini_speech_commands.bak/stop/*.wav data/mini_speech_commands/unknown
!#cp data/mini_speech_commands.bak/no/*.wav data/mini_speech_commands/unknown
!#cp data/mini_speech_commands.bak/yes/*.wav data/mini_speech_commands/unknown
!#cp data/mini_speech_commands.bak/down/*.wav data/mini_speech_commands/unknown
!cp custom_dataset/background/*.wav data/mini_speech_commands/unknown
!mkdir data/mini_speech_commands/cough
!cp custom_dataset/cough/*.wav data/mini_speech_commands/cough
!ls data/mini_speech_commands/unknown
commands = np.array(tf.io.gfile.listdir(str(data_dir)))
commands = commands[commands != 'README.md']
print('Commands:', commands)
###Output
_____no_output_____
###Markdown
Extract the audio files into a list and shuffle it.
###Code
filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')
filenames = tf.random.shuffle(filenames)
num_samples = len(filenames)
print('Number of total examples:', num_samples)
print('Number of examples per label:',
len(tf.io.gfile.listdir(str(data_dir/commands[0]))))
print('Example file tensor:', filenames[0])
###Output
_____no_output_____
###Markdown
Split the files into training, validation and test sets using a 80:10:10 ratio, respectively.
###Code
train_files = filenames[:-20]
val_files = filenames[-20: -10]
test_files = filenames[-10:]
print('Training set size', len(train_files))
print('Validation set size', len(val_files))
print('Test set size', len(test_files))
###Output
_____no_output_____
###Markdown
Reading audio files and their labels The audio file will initially be read as a binary file, which you'll want to convert into a numerical tensor.To load an audio file, you will use [`tf.audio.decode_wav`](https://www.tensorflow.org/api_docs/python/tf/audio/decode_wav), which returns the WAV-encoded audio as a Tensor and the sample rate.A WAV file contains time series data with a set number of samples per second. Each sample represents the amplitude of the audio signal at that specific time. In a 16-bit system, like the files in `mini_speech_commands`, the values range from -32768 to 32767. The sample rate for this dataset is 16kHz.Note that `tf.audio.decode_wav` will normalize the values to the range [-1.0, 1.0].
###Code
def decode_audio(audio_binary):
audio, _ = tf.audio.decode_wav(audio_binary)
return tf.squeeze(audio, axis=-1)
###Output
_____no_output_____
###Markdown
The label for each WAV file is its parent directory.
###Code
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
# Note: You'll use indexing here instead of tuple unpacking to enable this
# to work in a TensorFlow graph.
return parts[-2]
###Output
_____no_output_____
###Markdown
Let's define a method that will take in the filename of the WAV file and output a tuple containing the audio and labels for supervised training.
###Code
def get_waveform_and_label(file_path):
label = get_label(file_path)
audio_binary = tf.io.read_file(file_path)
waveform = decode_audio(audio_binary)
return waveform, label
###Output
_____no_output_____
###Markdown
You will now apply `process_path` to build your training set to extract the audio-label pairs and check the results. You'll build the validation and test sets using a similar procedure later on.
###Code
AUTOTUNE = tf.data.AUTOTUNE
files_ds = tf.data.Dataset.from_tensor_slices(train_files)
waveform_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Let's examine a few audio waveforms with their corresponding labels.
###Code
rows = 3
cols = 3
n = rows*cols
fig, axes = plt.subplots(rows, cols, figsize=(10, 12))
for i, (audio, label) in enumerate(waveform_ds.take(n)):
r = i // cols
c = i % cols
ax = axes[r][c]
ax.plot(audio.numpy())
ax.set_yticks(np.arange(-1.2, 1.2, 0.2))
label = label.numpy().decode('utf-8')
ax.set_title(label)
plt.show()
###Output
_____no_output_____
###Markdown
SpectrogramYou'll convert the waveform into a spectrogram, which shows frequency changes over time and can be represented as a 2D image. This can be done by applying the short-time Fourier transform (STFT) to convert the audio into the time-frequency domain.A Fourier transform ([`tf.signal.fft`](https://www.tensorflow.org/api_docs/python/tf/signal/fft)) converts a signal to its component frequencies, but loses all time information. The STFT ([`tf.signal.stft`](https://www.tensorflow.org/api_docs/python/tf/signal/stft)) splits the signal into windows of time and runs a Fourier transform on each window, preserving some time information, and returning a 2D tensor that you can run standard convolutions on.STFT produces an array of complex numbers representing magnitude and phase. However, you'll only need the magnitude for this tutorial, which can be derived by applying `tf.abs` on the output of `tf.signal.stft`. Choose `frame_length` and `frame_step` parameters such that the generated spectrogram "image" is almost square. For more information on STFT parameters choice, you can refer to [this video](https://www.coursera.org/lecture/audio-signal-processing/stft-2-tjEQe) on audio signal processing. You also want the waveforms to have the same length, so that when you convert it to a spectrogram image, the results will have similar dimensions. This can be done by simply zero padding the audio clips that are shorter than one second.
###Code
def get_spectrogram(waveform):
# Padding for files with less than 16000 samples
zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32)
# Concatenate audio with padding so that all audio clips will be of the
# same length
waveform = tf.cast(waveform, tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(
equal_length, frame_length=255, frame_step=128)
spectrogram = tf.abs(spectrogram)
return spectrogram
import numpy as np
class Uint8LogMelFeatureExtractor(object):
"""Provide uint8 log mel spectrogram slices from an AudioRecorder object.
This class provides one public method, get_next_spectrogram(), which gets
a specified number of spectral slices from an AudioRecorder.
"""
def __init__(self, num_frames_hop=48):
self.spectrogram_window_length_seconds = 0.025
self.spectrogram_hop_length_seconds = 0.010
self.num_mel_bins = 64 #32
self.frame_length_spectra = 96 #98
if self.frame_length_spectra % num_frames_hop:
raise ValueError('Invalid num_frames_hop value (%d), '
'must devide %d' % (num_frames_hop,
self.frame_length_spectra))
self.frame_hop_spectra = num_frames_hop
self._norm_factor = 3
self._clear_buffers()
def _clear_buffers(self):
self._audio_buffer = np.array([], dtype=np.int16).reshape(0, 1)
self._spectrogram = np.zeros((self.frame_length_spectra, self.num_mel_bins),
dtype=np.float32)
def _spectrogram_underlap_samples(self, audio_sample_rate_hz):
return int((self.spectrogram_window_length_seconds -
self.spectrogram_hop_length_seconds) * audio_sample_rate_hz)
def _frame_duration_seconds(self, num_spectra):
return (self.spectrogram_window_length_seconds +
(num_spectra - 1) * self.spectrogram_hop_length_seconds)
def compute_spectrogram_and_normalize(self, audio_samples, audio_sample_rate_hz):
spectrogram = self._compute_spectrogram(audio_samples, audio_sample_rate_hz)
spectrogram -= np.mean(spectrogram, axis=0)
if self._norm_factor:
spectrogram /= self._norm_factor * np.std(spectrogram, axis=0)
spectrogram += 1
spectrogram *= 127.5
return np.maximum(0, np.minimum(255, spectrogram)).astype(np.float32)
def _compute_spectrogram(self, audio_samples, audio_sample_rate_hz):
"""Compute log-mel spectrogram and scale it to uint8."""
samples = audio_samples.flatten() / float(2**15)
spectrogram = 30 * (
mel_features.log_mel_spectrogram(
samples,
audio_sample_rate_hz,
log_offset=0.001,
window_length_secs=self.spectrogram_window_length_seconds,
hop_length_secs=self.spectrogram_hop_length_seconds,
num_mel_bins=self.num_mel_bins,
lower_edge_hertz=60,
upper_edge_hertz=3800) - np.log(1e-3))
return spectrogram
def _get_next_spectra(self, recorder, num_spectra):
"""Returns the next spectrogram.
Compute num_spectra spectrogram samples from an AudioRecorder.
Blocks until num_spectra spectrogram slices are available.
Args:
recorder: an AudioRecorder object from which to get raw audio samples.
num_spectra: the number of spectrogram slices to return.
Returns:
num_spectra spectrogram slices computed from the samples.
"""
required_audio_duration_seconds = self._frame_duration_seconds(num_spectra)
logger.info("required_audio_duration_seconds %f",
required_audio_duration_seconds)
required_num_samples = int(
np.ceil(required_audio_duration_seconds *
recorder.audio_sample_rate_hz))
logger.info("required_num_samples %d, %s", required_num_samples,
str(self._audio_buffer.shape))
audio_samples = np.concatenate(
(self._audio_buffer,
recorder.get_audio(required_num_samples - len(self._audio_buffer))[0]))
self._audio_buffer = audio_samples[
required_num_samples -
self._spectrogram_underlap_samples(recorder.audio_sample_rate_hz):]
spectrogram = self._compute_spectrogram(
audio_samples[:required_num_samples], recorder.audio_sample_rate_hz)
assert len(spectrogram) == num_spectra
return spectrogram
def get_next_spectrogram(self, recorder):
"""Get the most recent spectrogram frame.
Blocks until the frame is available.
Args:
recorder: an AudioRecorder instance which provides the audio samples.
Returns:
The next spectrogram frame as a uint8 numpy array.
"""
assert recorder.is_active
logger.info("self._spectrogram shape %s", str(self._spectrogram.shape))
self._spectrogram[:-self.frame_hop_spectra] = (
self._spectrogram[self.frame_hop_spectra:])
self._spectrogram[-self.frame_hop_spectra:] = (
self._get_next_spectra(recorder, self.frame_hop_spectra))
# Return a copy of the internal state that's safe to persist and won't
# change the next time we call this function.
logger.info("self._spectrogram shape %s", str(self._spectrogram.shape))
spectrogram = self._spectrogram.copy()
spectrogram -= np.mean(spectrogram, axis=0)
if self._norm_factor:
spectrogram /= self._norm_factor * np.std(spectrogram, axis=0)
spectrogram += 1
spectrogram *= 127.5
return np.maximum(0, np.minimum(255, spectrogram)).astype(np.uint8)
feature_extractor = Uint8LogMelFeatureExtractor()
def get_spectrogram2(waveform):
"""
# Padding for files with less than 16000 samples
zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32)
# Concatenate audio with padding so that all audio clips will be of the
# same length
waveform = tf.cast(waveform, tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(
equal_length, frame_length=255, frame_step=128)
spectrogram = tf.abs(spectrogram)
return spectrogram
"""
waveform = waveform.numpy()
#print(waveform.shape)
#print(type(waveform))
spectrogram = feature_extractor.compute_spectrogram_and_normalize(waveform[:15680], 16000)
return spectrogram
for waveform, label in waveform_ds.take(1):
label2 = label.numpy().decode('utf-8')
spectrogram2 = get_spectrogram2(waveform)
print('Label:', label2)
print('Waveform shape:', waveform.shape)
print('Spectrogram shape:', spectrogram2.shape)
print('Spectrogram type:', spectrogram2.dtype)
###Output
_____no_output_____
###Markdown
Next, you will explore the data. Compare the waveform, the spectrogram and the actual audio of one example from the dataset.
###Code
for waveform, label in waveform_ds.take(1):
label = label.numpy().decode('utf-8')
print(waveform.shape)
spectrogram = get_spectrogram(waveform)
print('Label:', label)
print('Waveform shape:', waveform.shape)
print('Spectrogram shape:', spectrogram.shape)
print('Audio playback')
print('Spectrogram type:', spectrogram.dtype)
display.display(display.Audio(waveform, rate=16000))
def plot_spectrogram(spectrogram, ax):
# Convert to frequencies to log scale and transpose so that the time is
# represented in the x-axis (columns).
log_spec = np.log(spectrogram.T)
height = log_spec.shape[0]
X = np.arange(16000, step=height + 1)
Y = range(height)
ax.pcolormesh(X, Y, log_spec)
fig, axes = plt.subplots(2, figsize=(12, 8))
timescale = np.arange(waveform.shape[0])
axes[0].plot(timescale, waveform.numpy())
axes[0].set_title('Waveform')
axes[0].set_xlim([0, 16000])
plot_spectrogram(spectrogram.numpy(), axes[1])
axes[1].set_title('Spectrogram')
plt.show()
###Output
_____no_output_____
###Markdown
Now transform the waveform dataset to have spectrogram images and their corresponding labels as integer IDs.
###Code
def get_spectrogram_and_label_id(audio, label):
spectrogram = get_spectrogram(audio)
spectrogram = tf.expand_dims(spectrogram, -1)
label_id = tf.argmax(label == commands)
return spectrogram, label_id
spectrogram_ds = waveform_ds.map(
get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Examine the spectrogram "images" for different samples of the dataset.
###Code
rows = 3
cols = 3
n = rows*cols
fig, axes = plt.subplots(rows, cols, figsize=(10, 10))
for i, (spectrogram, label_id) in enumerate(spectrogram_ds.take(n)):
r = i // cols
c = i % cols
ax = axes[r][c]
plot_spectrogram(np.squeeze(spectrogram.numpy()), ax)
ax.set_title(commands[label_id.numpy()])
ax.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Build and train the modelNow you can build and train your model. But before you do that, you'll need to repeat the training set preprocessing on the validation and test sets.
###Code
def preprocess_dataset(files):
files_ds = tf.data.Dataset.from_tensor_slices(files)
output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
output_ds = output_ds.map(
get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE)
return output_ds
train_ds = spectrogram_ds
val_ds = preprocess_dataset(val_files)
test_ds = preprocess_dataset(test_files)
def only_load_dataset(files):
files_ds = tf.data.Dataset.from_tensor_slices(files)
output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
return output_ds
train_waveform_data = only_load_dataset(train_files)
val_waveform_data = only_load_dataset(val_files)
test_waveform_data = only_load_dataset(test_files)
###Output
_____no_output_____
###Markdown
Batch the training and validation sets for model training.
###Code
batch_size = 64
train_ds = train_ds.batch(batch_size)
val_ds = val_ds.batch(batch_size)
###Output
_____no_output_____
###Markdown
Add dataset [`cache()`](https://www.tensorflow.org/api_docs/python/tf/data/Datasetcache) and [`prefetch()`](https://www.tensorflow.org/api_docs/python/tf/data/Datasetprefetch) operations to reduce read latency while training the model.
###Code
train_ds = train_ds.cache().prefetch(AUTOTUNE)
val_ds = val_ds.cache().prefetch(AUTOTUNE)
###Output
_____no_output_____
###Markdown
For the model, you'll use a simple convolutional neural network (CNN), since you have transformed the audio files into spectrogram images.The model also has the following additional preprocessing layers:- A [`Resizing`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Resizing) layer to downsample the input to enable the model to train faster.- A [`Normalization`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Normalization) layer to normalize each pixel in the image based on its mean and standard deviation.For the `Normalization` layer, its `adapt` method would first need to be called on the training data in order to compute aggregate statistics (i.e. mean and standard deviation).
###Code
#for spectrogram, _ in spectrogram_ds.take(1):
# input_shape = spectrogram.shape
for data_item, label in train_waveform_data.take(10):
spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000)
print(spectrogram.shape)
if spectrogram.shape[0] != 96:
continue
input_shape = (spectrogram.shape[0], spectrogram.shape[1], 1)
print('Input shape:', input_shape)
num_labels = len(commands)
norm_layer = preprocessing.Normalization()
norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))
#preprocessing.Resizing(32, 32),
model = models.Sequential([
layers.Input(shape=input_shape),
norm_layer,
layers.Conv2D(32, 3, activation='relu'),
layers.Conv2D(64, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_labels),
])
model.summary()
# https://github.com/antoinemrcr/vggish2Keras
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.models import Model
def get_vggish_keras():
NUM_FRAMES = 96 # Frames in input mel-spectrogram patch
NUM_BANDS = 64 # Frequency bands in input mel-spectrogram patch
EMBEDDING_SIZE = 128 # Size of embedding layer
input_shape = (NUM_FRAMES,NUM_BANDS,1)
img_input = Input( shape=input_shape)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1')(img_input)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)
# Block fc
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1_1')(x)
x = Dense(4096, activation='relu', name='fc1_2')(x)
x = Dense(EMBEDDING_SIZE, activation='relu', name='fc2')(x)
model = Model(img_input, x, name='vggish')
return model
model_vggish = get_vggish_keras()
model_vggish.summary()
!ls
!du -sh vggish_weights.ckpt
# The file should be around 275M
checkpoint_path = 'vggish_weights.ckpt'
if os.path.exists(checkpoint_path):
print('Loading VGGish Checkpoint Path')
model_vggish.load_weights(checkpoint_path)
else:
print('{} not detected, weights not loaded'.format(checkpoint_path))
new_model = tf.keras.Sequential()
model_vggish.trainable = False
new_model.add(model_vggish)
new_model.add(layers.Dense(2, name='last'))
new_model.summary()
model = new_model
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
new_train_data = []
new_train_labels = []
new_val_data = []
new_val_labels = []
new_test_data = []
new_test_labels = []
for data_item, label in train_waveform_data:
spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000)
label = label.numpy().decode('utf-8')
label_id = tf.argmax(label == commands)
# NOTE: Spectrogram shape is not always the same
if spectrogram.shape[0] != 96:
continue
new_train_data.append(spectrogram)
new_train_labels.append(label_id)
for data_item, label in val_waveform_data:
spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000)
label = label.numpy().decode('utf-8')
label_id = tf.argmax(label == commands)
if spectrogram.shape[0] != 96:
continue
new_val_data.append(spectrogram)
new_val_labels.append(label_id)
for data_item, label in test_waveform_data:
spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000)
label = label.numpy().decode('utf-8')
label_id = tf.argmax(label == commands)
if spectrogram.shape[0] != 96:
continue
new_test_data.append(spectrogram)
new_test_labels.append(label_id)
new_train_data = np.array(new_train_data).astype('float32')
new_val_data = np.array(new_val_data).astype('float32')
new_test_data = np.array(new_test_data).astype('float32')
new_train_labels = np.array(new_train_labels)
new_val_labels = np.array(new_val_labels)
new_test_labels = np.array(new_test_labels)
# (1, 98, 32, 1)
new_train_data = np.expand_dims(new_train_data, axis=3)
new_val_data = np.expand_dims(new_val_data, axis=3)
new_test_data = np.expand_dims(new_test_data, axis=3)
print('--------')
print(new_train_data.shape)
print(new_val_data.shape)
print(new_test_data.shape)
print(new_train_labels.shape)
print(new_val_labels.shape)
print(new_test_labels.shape)
print('--------')
EPOCHS = 30
#history = model.fit(
# train_ds,
# validation_data=val_ds,
# epochs=EPOCHS,
# callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2),
#)
history = model.fit(
new_train_data, new_train_labels,
validation_data=(new_val_data, new_val_labels),
epochs=EPOCHS,
#callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2),
)
###Output
_____no_output_____
###Markdown
Let's check the training and validation loss curves to see how your model has improved during training.
###Code
metrics = history.history
plt.plot(history.epoch, metrics['loss'], metrics['val_loss'])
plt.legend(['loss', 'val_loss'])
plt.show()
###Output
_____no_output_____
###Markdown
Evaluate test set performanceLet's run the model on the test set and check performance.
###Code
#test_audio = []
#test_labels = []
#for audio, label in test_ds:
# test_audio.append(audio.numpy())
# test_labels.append(label.numpy())
#test_audio = np.array(test_audio)
#test_labels = np.array(test_labels)
test_audio = new_test_data
test_labels = new_test_labels
y_pred = np.argmax(model.predict(test_audio), axis=1)
y_true = test_labels
test_acc = sum(y_pred == y_true) / len(y_true)
print(f'Test set accuracy: {test_acc:.0%}')
###Output
_____no_output_____
###Markdown
Display a confusion matrixA confusion matrix is helpful to see how well the model did on each of the commands in the test set.
###Code
confusion_mtx = tf.math.confusion_matrix(y_true, y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(confusion_mtx, xticklabels=commands, yticklabels=commands,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
###Output
_____no_output_____
###Markdown
Run inference on an audio fileFinally, verify the model's prediction output using an input audio file of someone saying "no." How well does your model perform?
###Code
!ls data/mini_speech_commands/cough
#sample_file = data_dir/'no/01bb6a2a_nohash_0.wav'
#sample_file = data_dir/'left/b46e8153_nohash_0.wav'
#sample_file = data_dir/'no/ac7840d8_nohash_1.wav'
#sample_file = data_dir/'no/5588c7e6_nohash_0.wav'
#sample_file = data_dir/'up/52e228e9_nohash_0.wav'
sample_file = data_dir/'cough/pos-0422-096-cough-m-31-8.wav'
#sample_ds = preprocess_dataset([str(sample_file)])
X = only_load_dataset([str(sample_file)])
for waveform, label in X.take(1):
label = label.numpy().decode('utf-8')
print(waveform, label)
spectrogram = feature_extractor.compute_spectrogram_and_normalize(waveform.numpy()[:15680], 16000)
# NOTE: Dimensions need to be expanded
spectrogram = np.expand_dims(spectrogram, axis=-1)
spectrogram = np.expand_dims(spectrogram, axis=0)
print(spectrogram.shape)
prediction = model(spectrogram)
print(prediction.shape)
plt.bar(commands, tf.nn.softmax(prediction[0]))
plt.title(f'Predictions for "{label}"')
plt.show()
#for spectrogram, label in sample_ds.batch(1):
# prediction = model(spectrogram)
# plt.bar(commands, tf.nn.softmax(prediction[0]))
# plt.title(f'Predictions for "{commands[label[0]]}"')
# plt.show()
print(model)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the model.
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
! curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
! echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
! sudo apt-get update
! sudo apt-get install edgetpu-compiler
# Define representative dataset
print(new_test_data.shape)
def representative_dataset():
yield [new_test_data]
# Add quantization in order to run on the EdgeTPU
converter2 = tf.lite.TFLiteConverter.from_keras_model(model)
converter2.optimizations = [tf.lite.Optimize.DEFAULT]
converter2.representative_dataset = representative_dataset
tflite_quant_model = converter2.convert()
with open('model_quantized.tflite', 'wb') as f:
f.write(tflite_quant_model)
!edgetpu_compiler model_quantized.tflite
!ls -l
!ls -l
# https://www.tensorflow.org/lite/guide/inference
interpreter = tf.lite.Interpreter(model_path="model.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
# Test the model on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)
#sample_file = data_dir/'no/01bb6a2a_nohash_0.wav'
#sample_file = data_dir/'left/b46e8153_nohash_0.wav'
sample_file = data_dir/'cough/pos-0422-096-cough-m-31-8.wav'
#sample_ds = preprocess_dataset([str(sample_file)])
#waveform, label = get_waveform_and_label(sample_file)
#spectrogram = feature_extractor._compute_spectrogram(waveform, 16000)
X = only_load_dataset([str(sample_file)])
for waveform, label in X.take(1):
label = label.numpy().decode('utf-8')
spectrogram = feature_extractor.compute_spectrogram_and_normalize(waveform.numpy()[:15680], 16000)
spectrogram = np.expand_dims(spectrogram, axis=-1)
spectrogram = np.expand_dims(spectrogram, axis=0)
print('Original--------------------')
print(spectrogram.shape)
prediction = model(spectrogram)
print(prediction)
print('TFLITE--------------------')
# NOTE: dtype needs to be np.float32
input_data = np.array(spectrogram, dtype=np.float32)
print(input_data.shape)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
prediction2 = interpreter.get_tensor(output_details[0]['index'])
print(prediction2)
print(np.argmax(np.array(prediction).flatten()))
print(np.argmax(np.array(prediction2).flatten()))
# NOTE: Remember to add softmax after the prediction
plt.bar(commands, tf.nn.softmax(prediction[0]))
plt.title(f'Predictions for "{label}"')
plt.show()
plt.imshow(np.squeeze(spectrogram).T)
plt.show()
###Output
_____no_output_____
###Markdown
You can see that your model very clearly recognized the audio command as "no."
###Code
from google.colab import files
files.download('model.tflite')
from google.colab import files
files.download('model_quantized_edgetpu.tflite')
###Output
_____no_output_____ |
module2-loadingdata/LS_DS_112_Loading_Data.ipynb | ###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
from google.colab import files
uploaded = files.upload()
columnDes= ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'class value']
#put csv in pandas dataframe
df = pd.read_csv('car.data', header= None, names=columnDes)
#displayed data
df.head(100)
#checked for any null values
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
space= pd.read_csv('https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets')
space.head(20)
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
df = pd.read_csv()
from google.colab import drive
drive.mount('/content/drive')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code
Enter your authorization code:
··········
Mounted at /content/drive
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# The columns aren't labeled
#flag_data.plot.scatter('population', 'area')
# Histogram
flag_data.area.hist(bins=50);
# Seaborn Density Plot
flag_data.plot.kde()
# Seaborn Pairplot
import seaborn as sns;
sns.set(style='ticks', color_codes=True)
g = sns.pairplot(flag_data)
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
df = pd.read_csv('example')
df.head()
df.isnull().sum()
#This shows no missing values, but UIC said there are missing values.
#df = pd.read_csv('example', na_values='?')
#This is to replace na values with ?, but it didn't work
# import numpy as np
# df.replace('?', np.NaN, inplace=True)
#STILL didn't work...
#Lets inspect an exact question mark, row 14, country column
#df.county.iloc[14]
#returns ' ?' issue is the space, all of the data has a space in front
#df = df.apply(lambda x: x.str.strip() if x.dtype == 'object' else x)
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals. **WINE_FRAME**
###Code
#Import the wine database through pd read csv with a URL
import pandas as pd
wine_frame = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data')
wine_frame.head()
#Head() showed there was no column headers, create columns
columns = ['Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280/OD315', 'Proline']
#Re-create the dataframe to include headers
wine_frame = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None, names=columns)
wine_frame.head()
#Dataframe has '1' index for every case, need to re-index
wine_frame = wine_frame.reset_index()
#Index column should not be there
del wine_frame['index']
wine_frame.head()
#Everything looks good, double check for missing values
wine_frame.isnull().sum()
wine_frame.plot.scatter('Alcohol', 'Total phenols');
wine_frame.plot.scatter('Ash', 'Alcalinity of ash');
wine_frame.hist(column='Total phenols');
wine_frame.plot.kde();
import seaborn as sns;
sns.set(style='ticks', color_codes=True)
g = sns.pairplot(wine_frame)
###Output
_____no_output_____
###Markdown
**ADULT DATA SET**
###Code
from google.colab import files
uploaded = files.upload()
adult_df = pd.read_csv('adult.data')
adult_df.head(20)
#column headers are absent
columns = ['age', 'workclass', 'fnlwgt', 'ed', 'ed-num', 'marital', 'occ', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours', 'native', '50k']
adult_df = pd.read_csv('adult.data', names=columns)
adult_df.head()
adult_df.shape
adult_df.isnull().sum()
#No nulls shown, but dataset said missing data
adult_df.head(20)
#Question marks are in place of null values, previous exercise showed this issue is due to ' ?'
#Let's approach it a different way
len(adult_df[adult_df['native'] == ' ?'])
import random
countries = []
for x in adult_df['native']:
if x == ' ?':
pass
else:
countries.append(x)
adult_df['native'] = adult_df['native'].replace(' ?', random.choice(countries))
adult_df.head(20)
len(adult_df[adult_df['native'] == ' ?'])
print(countries)
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.php- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url, header=None)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
columns = """1. name: Name of the country concerned
2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania
3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW
4. area: in thousands of square km
5. population: in round millions
6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others
7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others
8. bars: Number of vertical bars in the flag
9. stripes: Number of horizontal stripes in the flag
10. colours: Number of different colours in the flag
11. red: 0 if red absent, 1 if red present in the flag
12. green: same for green
13. blue: same for blue
14. gold: same for gold (also yellow)
15. white: same for white
16. black: same for black
17. orange: same for orange (also brown)
18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)
19. circles: Number of circles in the flag
20. crosses: Number of (upright) crosses
21. saltires: Number of diagonal crosses
22. quarters: Number of quartered sections
23. sunstars: Number of sun or star symbols
24. crescent: 1 if a crescent moon symbol present, else 0
25. triangle: 1 if any triangles present, 0 otherwise
26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 0
27. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise
28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise
29. topleft: colour in the top-left corner (moving right to decide tie-breaks)
30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)"""
columns = columns.splitlines()
columns = [name.split(':')[0].split('.')[1].strip() for name in columns ]
print(columns)
languages = "1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others".split(', ')
languages = [language.split("=")[1] for language in languages]
languages = { i + 1 : languages[i] for i in range(0, len(languages) ) }
print(languages)
flag_data.columns = columns
flag_data.head()
flag_data['language'] = flag_data['language'].map(languages)
flag_data.head()
link1 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions.csv'
link2 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_index.csv'
link3 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_header.csv'
df3 = pd.read_csv(link3, header=None)
df3.head()
columns = df3.iloc[3].values
columns
df3 = df3.iloc[4:]
df3.columns = columns
df3.head()
# unnecessary?
df3 = df3.reset_index(drop=True)
df3 = pd.read_csv(link3, skiprows=3)
df3.head()
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
from google.colab import files
upload = files.upload()
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
plt.scatter(df3.beer_servings, df3.wine_servings)
plt.xlabel('beer_servings')
plt.ylabel('wine_servings')
plt.show()
df3.plot.scatter('beer_servings', 'wine_servings');
# Histogram
plt.hist(df3['total_litres_of_pure_alcohol'], bins=50);
df3['total_litres_of_pure_alcohol'].hist(bins=60);
import seaborn as sns
# Seaborn Density Plot
sns.distplot(df3['total_litres_of_pure_alcohol'], bins=60, )
# Seaborn Pairplot
sns.pairplot(df3)
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
adult = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', skipinitialspace=True, na_values="?")
print(adult.shape)
adult.head()
###Output
(32561, 15)
###Markdown
Fill Missing Values
###Code
adult.isna().sum()
adult.describe(include='all')
adult_old = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', header=None)
print(adult_old.shape)
adult_old.head()
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Change.
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
#Nick Flannery makes a change
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
#hum dum dee
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
from google.colab import drive
drive.mount('/content/drive')
# data_url = ('https://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip')
# cols = [
# 'Date',
# 'Time',
# 'Avg. Concentration',
# 'Avg. Sensor Response',
# 'Non Metanic HydroCarbons Concentration',
# 'Benzene Concentration',
# ]
# df = pd.read_csv(data_url, header=None, names=cols)
# df.head()
data = ('/content/drive/My Drive/data/AirQualityUCI.xlsx')
#using read_excel because that sheet is cleaner
df = pd.read_excel(data, header=None)
df.head()
df.describe()
df.describe
df.shape
df.shape()
df.count()
df.isna().sum()
dir(df)
df.info()
df.head()
###Output
_____no_output_____
###Markdown
This data can seem to be quite confusing at first glance... But this is the attribute information:```date: date recordedtime: time recordedCO: measure of carbon monoxidePT08.S1: tin oxide measured with a sensor by the hour (all gases are measured by the hour)NMHC: acronym for 'non metanic hydrocarbons'C6H6: Benzene concentration (very flammable liquid) PT08.S2: Titania NOx: oxides of nitrogen as atmospheric pollutants PT08.S3: Tungsten oxide (NOx targeted) NO2: Nitrogen Dioxide PT08.S4: Tungsten oxide (NO2 targeted) PT08.S5: Indium Oxide T: Temperature in AtomicCelcius RH: Relative Humidity (%) AH: Absolute Humidity ```
###Code
#### Strech goal content below
import requests
import json
api_url = ('https://deckofcardsapi.com/api/deck/new/shuffle/?deck_count=1')
response = requests.get(api_url)
response.text
!curl https://deckofcardsapi.com/api/deck/new/shuffle/?deck_count=1
def shuffle_cards():
response = requests.get('https://deckofcardsapi.com/api/deck/new/shuffle/?deck_count=1')
return json.loads(response.text)
# create a variable to store the shuffled cards so we can take features
deck = shuffle_cards()
deck
type(shuffle_cards())
### turning api dict into pandas dataframe using .from_dict
my_shuffled_deck = pd.DataFrame.from_dict(deck,orient='index')
my_shuffled_deck
#i think what we really need is that deck_id to continue with using the second
#API that lets us draw a card from said deck
deck_id = my_shuffled_deck.loc['deck_id']
### now we make into a string.. kind of redundant but might be the only way to do it?
deck_id = deck_id.to_string()
### we have to remove that 0 and the white space
deck_id
deck_id_cut = deck_id[5:17]
deck_id_cut
## wonderful
#### API stretch goal continued below
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
# api_url2 = ('https://deckofcardsapi.com/api/deck/1h43pn60ijj7/draw/?count=2')
# response2 = requests.get(api_url2)
# response2
## we are using
## https://deckofcardsapi.com/api/deck/<<deck_id>>/draw/?count=2
## where <<deck_id>> = deck_id from two cells above
def draw_card(deck_id):
response = requests.get('https://deckofcardsapi.com/api/deck/{}/draw/?count=2'.format(deck_id))
return json.loads(response.text)
draw_card_df = draw_card(deck_id_cut)
draw_card_df
# there we go
# time to turn the 'images' into processed images with pillows
# but first we should extract all of this data as a dataframe
# unsure if i have to reassign variables here or create new ones
df_draw_card = pd.DataFrame.from_dict(draw_card_df, orient='index')
cards_from_dict = df_draw_card.loc['cards']
#don't think that worked
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
!ls
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
feature_map = {0: 'name',
1: 'landmass',
2: 'zone',
3: 'area',
4: 'population',
5: 'language',
6: 'religion',
7: 'bars',
8: 'stripes',
9: 'colours',
10: 'red',
11: 'green',
12: 'blue',
13: 'gold',
14: 'white',
15: 'black',
16: 'orange',
17: 'mainhue',
18: 'circles',
19: 'crosses',
20: 'saltires',
21: 'quarters',
22: 'sunstars',
23: 'crescent',
24: 'triangle',
25: 'icon',
26: 'animate',
27: 'text',
28: 'topleft',
29: 'botright'}
flag_data.rename(columns=feature_map, inplace=True)
flag_data.head()
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
auto = pd.read_csv(url,names=['symboling','norm_loss','make','fuel','aspiration','doors',
'bod_style','drv_wheels','eng_loc','wheel_base','length','width',
'height','curb_weight','engine','cylinders','engine_size',
'fuel_system','bore','stroke','compression','hp','peak_rpm',
'city_mpg','hgwy_mpg','price'])
auto.head()
import numpy as np
auto.replace('?', np.NaN, inplace=True)
auto.head()
auto.isnull().sum()
auto.describe()
###Output
_____no_output_____
###Markdown
Project Time Comics Data Set[Source](https://www.kaggle.com/fivethirtyeight/fivethirtyeight-comic-characters-datasetdc-wikia-data.csv)
###Code
#importing from url is possible with this data set but practicing from desktop
from google.colab import files
upload = files.upload()
#reading in the marvel csv successfully
marvel_df = pd.read_csv('marvel-wikia-data.csv')
marvel_df.head()
#rerun the above file import cell to import DC csv
#reading in the DC csv successfully
dc_df = pd.read_csv('dc-wikia-data.csv')
dc_df.head()
#I want to explore the data as combined data frame so before I do I need to add a column indicating publisher(Marvel or DC)
marvel_df['publisher'] = 'Marvel'
marvel_df.head()
#Had to come back and rename 'Year' as it doesn't match the formatting of DC's and the append was creating 2 year columns
marvel_df = marvel_df.rename(columns = {'Year':'YEAR'})
marvel_df.head()
#success with both
dc_df['publisher'] = 'DC'
dc_df.head()
#now combine with append
comic_chars_df = marvel_df.append(dc_df)
comic_chars_df.head()
#looks like it worked but lets make sure all of Marvel and DC are there
comic_chars_df['publisher'].value_counts()
#compare entry count to make sure nothing was loss
dc_df.count()
#it looks like our counts match
marvel_df.count()
#begin cleaning our new combined data set by exploring the data a bit
comic_chars_df.isnull().sum()
comic_chars_df.dtypes
#fast and dirty way to impute values
from pandas.api.types import is_numeric_dtype
for header in comic_chars_df:
if is_numeric_dtype(comic_chars_df[header]):
comic_chars_df[header] = comic_chars_df[header].fillna(-1)
else:
comic_chars_df[header] = comic_chars_df[header].fillna('unknown')
comic_chars_df.isnull().sum()
###Output
_____no_output_____
###Markdown
Comments on cleaning comics data setMany of the null values can be handled in different ways depending on the use of the data set. I did not continue cleaning and getting into the nitty gritty so I could continue with a new data setname: The names have an item in parentheses that is sometimes the characters secret identity and sometimes their universe of origin (New Earth, 616, etc.) This could be parsed out into 'hero name', 'alt_name', and 'universe'identity, align, eye, hair, alive: one-hot encoding or label encoding could work here depending on what we want to do with the data but I would still add a slot for 'unknown' to remove the nullssex, GSM: you could give a binary encoding (is_female or is_male) but since the dataset includes 'GSM' (gender or sexual minority) that might be too limiting. one-hot or categorical with a large variety of options including 'unknown' and 'other' would probably be betterappearances: this can vary wildly for characters so options range from -1 as a special indicator of unknown to using a minimum value or mode although this may be misleadingfirst appearance: This should be parsed into a time encoding. Unsure of whether time encoding can have only month and year, if day is required then encode with 0, or 32 to indicate the unkown valueyear: might possibly be combined with first appearance, would have to comb the data and orgin to be sure Graduate Admissions Data Set [Source](https://www.kaggle.com/mohansacharya/graduate-admissions)
###Code
upload = files.upload()
admission = pd.read_csv('Admission_Predict_Ver1.1.csv')
admission.head()
#mount my google drive and then re-read admissions into CSV
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
from google.colab import drive
drive.mount('/content/gdrive')
admission = pd.read_csv('Admission_Predict_Ver1.1.csv')
admission.head()
admission.isnull().sum()
admission.count()
#double checking the data is actually clean before moving on
admission.head(500)
###Output
_____no_output_____
###Markdown
Looking at API's Resources:[swcarpentry - working with data on the web](http://swcarpentry.github.io/web-data-python/01-getdata/)[World Bank API](https://datahelpdesk.worldbank.org/knowledgebase/articles/902061-climate-data-api)
###Code
!pip install requests
#successful example
import requests
url = 'http://climatedataapi.worldbank.org/climateweb/rest/v1/country/cru/tas/year/CAN.csv'
response = requests.get(url)
if response.status_code != 200:
print('Failed to get data: ', response.status_code)
else:
print('First 100 characters of data are:')
print(response.text[:100])
###Output
First 100 characters of data are:
year,data
1901,-7.67241907119751
1902,-7.862711429595947
1903,-7.910782814025879
1904,-8.15572929382
###Markdown
The tutorial breaks down the url for us so lets try pulling down a couple countries and putting them in a single CSV[Country codes](https://unstats.un.org/unsd/tradekb/knowledgebase/country-code) to use: 1. Canada: CAN2. United States: USA3. Mexico: MEX
###Code
#Test we aren't an idiot and that our assumption is correct. Proof we can pull down csv's one at a time from this source.
url = 'http://climatedataapi.worldbank.org/climateweb/rest/v1/country/cru/tas/year/USA.csv'
response = requests.get(url)
if response.status_code != 200:
print('Failed to get data: ', response.status_code)
else:
print('First 100 characters of data are:')
print(response.text[:100])
#create a dictionary pairing country names to their ISO3 Code, this could be expanded to include all countries
countries = {'Canada':'CAN',
'UnitedStates':'USA',
'Mexico':'MEX'}
#going to loop through all countries and concat the ISO code and file type to the end of the URL
partial_url = 'http://climatedataapi.worldbank.org/climateweb/rest/v1/country/cru/tas/year/'
file_type = '.csv'
#testing and successful
for key in countries:
url = (partial_url + countries[key] + file_type)
print(url)
#create an empty data frame for each csv to merge into
north_amer_temps = pd.DataFrame()
for key in countries:
url = (partial_url + countries[key] + file_type)
response = pd.read_csv(url)
north_amer_temps['year'] = response['year']
north_amer_temps[key] = response['data']
north_amer_temps = north_amer_temps.set_index('year')
north_amer_temps.head(120)
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Ryan Allred Makes a Change
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
import pandas as pd
pd.read_csv?
dataset_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
df = pd.read_csv(dataset_url, header=None)
print(df.shape)
df.head()
# Don't want to use the header None!
column_headers = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital-status', 'occupation', 'relationship', 'race', 'sex',
'capital-gain', 'capital-loss', 'hours-per-week',
'native-country', 'income']
# df = pd.read_csv(dataset_url, names=('age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country'))
# change it to
df = pd.read_csv(dataset_url, names=column_headers)
print(df.shape)
df.head()
# Look at the documentation of the read_csv method
?pd.read_csv()
###Output
Object `pd.read_csv()` not found.
###Markdown
From a local file
###Code
# Google: How to upload a csv to google colab
# upload from google colab worked for me insted of direct uploat command
# from google.colab import files
# uploaded = files.upload()
# column_headers = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
# 'marital-status', 'occupation', 'relationship', 'race', 'sex',
# 'capital-gain', 'capital-loss', 'hours-per-week',
# 'native-country', 'income']
# Upload from your hard drive to the google colab, then open it
# Change dataset_url to adult.data
df = pd.read_csv('adult.data', names=column_headers)
print(df.shape)
df.head()
###Output
(32561, 15)
###Markdown
Using the `!wget` command
###Code
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data
###Output
/bin/sh: wget: command not found
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
import numpy as np
df = df.replace(' ?', np.NaN)
# ? has a space before it
# After pressing -space- before the ?, ? cleares to NaN
df.head(20)
# must read data before using it. You might be mistaken by absence of the 0 NaN values. ? is present
df.isna().sum()
df['native-country'].iloc[14]
df['native-country'].iloc[17]
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
df = pd.read_csv('adult.data', names=column_headers, na_values='?', sep=', ')
# One of the way to fix -space- issue with the data set
# No -space- before ? needed
print(df.shape)
df.head(17)
# It's might be the point to make a copy before making any changes
# df_no_NaN = df.fillna('Unknown')
df = df.fillna('Unknown')
df[14:15]
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics
###Code
df[df['native-country'] == ' India'].shape
len(df[df['native-country'] == ' India'])
###Output
_____no_output_____
###Markdown
Numeric
###Code
df.describe().T
df.describe(percentiles=[.25, .3, .5, .75, .95]).T
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
df.describe(exclude='number').T
###Output
_____no_output_____
###Markdown
Look at Categorical Values
###Code
df['marital-status'].value_counts()
# Make it in %
df['marital-status'].value_counts(normalize=True)
df['native-country'].value_counts(normalize=True, dropna=False)[:20]
###Output
_____no_output_____
###Markdown
Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
# df['age'].hist();
df['age'].hist(bins=20); # bins=20 more smoother
df['hours-per-week'].hist(bins=20);
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
df['age'].plot.density(); # Smooth way of histogram
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
df.plot.scatter('age', 'hours-per-week');
###Output
_____no_output_____
###Markdown
Bonus
###Code
bike_data_url = "https://raw.githubusercontent.com/jvns/pandas-cookbook/master/data/bikes.csv"
df = pd.read_csv(bike_data_url,
encoding='latin1',
sep=';',
parse_dates=['Date'],
dayfirst=True,
index_col='Date'
)
df.head()
df[:3]
df.head(3)
pip install Unidecode
import unidecode
df.columns
# now we need to unicode all of the index columns to replace unreadable data with .lower
unidecode.unidecode("Côte-Sainte-Catherine").lower()
# List comprehensive
list_1 = [10, 20, 30]
list_2 = [i//10 for i in list_1]
list_2
new_cols = [unidecode.unidecode(col).lower() for col in df.columns]
new_cols
df.columns = new_cols
df.head()
df.tail()
df['berri 1'].plot();
df.plot();
import matplotlib.pyplot as plt
plt.style.use('seaborn')
df.plot(figsize=(15, 10));
import requests
r = requests.get('https://cat-fact.herokuapp.com/facts')
json_data = r.json()
type(json_data)
json_data.keys()
len(json_data['all'])
res = json_data['all']
res[:3]
for i in res[:3]:
print(f"{i['user']['name']['first']} {i['user']['name']['last']}: {i['text']}")
###Output
Alex Wohlbruck: gato loves grace
Alex Wohlbruck: The Egyptian Mau’s name is derived from the Middle Egyptian word mjw, which means cat. But contrary to its name, it’s unclear whether the modern Egyptian Mau actually originated in Egypt.
Alex Wohlbruck: Cats aren’t the only animals that purr — squirrels, lemurs, elephants, and even gorillas purr too.
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
import pandas as pd
import numpy as np
# Shell commands to get, unzip zip file, via Aaron Gallant-see Slack
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/00296/dataset_diabetes.zip
!unzip dataset_diabetes.zip
df = pd.read_csv('dataset_diabetes/diabetic_data.csv')
print(df.shape)
df.head()
df1 = df.copy()
# Check for missing or nan data
print(df.isnull().sum().sum())
df.isna().sum().sum()
# However, multiple '?' in weight column
df1 = df.copy()
print(df1.shape)
df1.weight.value_counts()
for column in df1.columns[:11]:
print(df1[column].value_counts(), '\n\n')
###Output
96210942 1
89943846 1
384306986 1
94650156 1
83156784 1
2674482 1
281345844 1
193616274 1
355508024 1
165973818 1
125278944 1
420873188 1
157241154 1
161161032 1
174855390 1
134950734 1
154128210 1
96993108 1
122064144 1
297770840 1
382612616 1
165134172 1
108244830 1
210578766 1
443842340 1
151469730 1
289146210 1
154590960 1
145948404 1
176328594 1
..
249722520 1
111830682 1
126506652 1
80193186 1
186881700 1
147162726 1
263120844 1
249665124 1
151295556 1
113303472 1
73909806 1
422050106 1
13655088 1
168523320 1
296140568 1
98784828 1
157333056 1
280536642 1
130655706 1
190162530 1
107017800 1
103828530 1
176744010 1
172279374 1
297285200 1
74454612 1
208073976 1
166229592 1
38340702 1
77856768 1
Name: encounter_id, Length: 101766, dtype: int64
88785891 40
43140906 28
23199021 23
1660293 23
88227540 23
23643405 22
84428613 22
92709351 21
23398488 20
90609804 20
88789707 20
37096866 20
89472402 20
29903877 20
88681950 19
88479036 19
97391007 19
24011577 18
3481272 18
91160280 18
84348792 18
3401055 18
91751121 18
106757478 17
90489195 17
41699412 17
84676248 16
384939 16
90164655 16
41617368 16
..
141459593 1
54207855 1
71579169 1
23406147 1
6348348 1
137952824 1
23234103 1
78943797 1
43683723 1
85241394 1
18267696 1
45161577 1
32417442 1
61105707 1
106231896 1
3397149 1
39734766 1
23850522 1
42977016 1
113160366 1
8105490 1
16600590 1
92990970 1
783198 1
105551478 1
71081460 1
30060018 1
67443444 1
141344240 1
93251151 1
Name: patient_nbr, Length: 71518, dtype: int64
Caucasian 76099
AfricanAmerican 19210
? 2273
Hispanic 2037
Other 1506
Asian 641
Name: race, dtype: int64
Female 54708
Male 47055
Unknown/Invalid 3
Name: gender, dtype: int64
[70-80) 26068
[60-70) 22483
[50-60) 17256
[80-90) 17197
[40-50) 9685
[30-40) 3775
[90-100) 2793
[20-30) 1657
[10-20) 691
[0-10) 161
Name: age, dtype: int64
? 98569
[75-100) 1336
[50-75) 897
[100-125) 625
[125-150) 145
[25-50) 97
[0-25) 48
[150-175) 35
[175-200) 11
>200 3
Name: weight, dtype: int64
1 53990
3 18869
2 18480
6 5291
5 4785
8 320
7 21
4 10
Name: admission_type_id, dtype: int64
1 60234
3 13954
6 12902
18 3691
2 2128
22 1993
11 1642
5 1184
25 989
4 815
7 623
23 412
13 399
14 372
28 139
8 108
15 63
24 48
9 21
17 14
16 11
19 8
10 6
27 5
12 3
20 2
Name: discharge_disposition_id, dtype: int64
7 57494
1 29565
17 6781
4 3187
6 2264
2 1104
5 855
3 187
20 161
9 125
8 16
22 12
10 8
11 2
14 2
25 2
13 1
Name: admission_source_id, dtype: int64
3 17756
2 17224
1 14208
4 13924
5 9966
6 7539
7 5859
8 4391
9 3002
10 2342
11 1855
12 1448
13 1210
14 1042
Name: time_in_hospital, dtype: int64
? 40256
MC 32439
HM 6274
SP 5007
BC 4655
MD 3532
CP 2533
UN 2448
CM 1937
OG 1033
PO 592
DM 549
CH 146
WC 135
OT 95
MP 79
SI 55
FR 1
Name: payer_code, dtype: int64
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
import pandas as pd
chess_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/chess/king-rook-vs-king/krkopt.data'
'''
Attribute Information:
1. White King file (column)
2. White King rank (row)
3. White Rook file
4. White Rook rank
5. Black King file
6. Black King rank
7. optimal depth-of-win for White in 0 to 16 moves, otherwise drawn
{draw, zero, one, two, ..., sixteen}.
'''
chess_col_names = ['WKF','WKR','WRF','WRR','BKF','BKR','Moves']
chess_data = pd.read_csv(chess_data_url, header=None, names=chess_col_names)
chess_data.head()
chess_data.isnull().sum().sum()
###Output
_____no_output_____
###Markdown
New data set, now audiology.URL: https://archive.ics.uci.edu/ml/datasets/Audiology+%28Standardized%29Folder: https://archive.ics.uci.edu/ml/machine-learning-databases/audiology/Data itself: https://archive.ics.uci.edu/ml/machine-learning-databases/audiology/audiology.standardized.data
###Code
audio_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/audiology/audiology.standardized.data'
audio_data = pd.read_csv(audio_url, header=None) #Fixed lack of headers
audio_data.head(10)
import numpy as np
# Replace the question marks with actual NaNs
audio_data.replace('?',np.nan, inplace=True)
audio_data.isnull().sum().sum()
audio_data.head()
# Replace the Ts and Fs with real booleans
audio_data.replace('t',True, inplace=True)
audio_data.replace('f',False, inplace=True)
audio_data.head()
audio_data_filled = audio_data.copy()
for column in audio_data.columns:
column_mode = audio_data[column].mode()[0]
audio_data_filled[column].fillna(column_mode, inplace=True)
audio_data_filled.isnull().sum().sum()
audio_data_filled.head(15)
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
'''
Looking around the UCI database, I found this one that seems good.
Medium-sized dataset with missing values.
URL: https://archive.ics.uci.edu/ml/datasets/Cylinder+Bands
Names: https://archive.ics.uci.edu/ml/machine-learning-databases/cylinder-bands/bands.names
Data: https://archive.ics.uci.edu/ml/machine-learning-databases/cylinder-bands/bands.data
'''
# First of all, load the data. By opening it in a browser, I see from the start
# that there's no header, so I note that when reading the CSV.
# I also make sure that Pandas displays the full dataframe
pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
bands_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/cylinder-bands/bands.data'
bands_raw = pd.read_csv(bands_url, header=None)
print(bands_raw.shape)
bands_raw.head(10)
# Alright, let's check the missing values.
bands_raw.isnull().sum()
'''
Hmm. This list of null values doesn't actually seem right. I notice that there's
a '?' in the table above, and searching for that character in the browser view
of the whole dataset shows that it's all over the place, way more times than
twice per row as suggested here. These missing values must be something else.
But what the hell are they?
I'll now re-load the dataset setting the null value to '?' and hoping that
those other mysterious NaNs will just be added to the list. In fact, rather
than hoping that I'll verify. I'll count how many '?'s there are and make sure
that turning them into NaNs gives us the right total (increased by whatever
amount, usually 2, listed in the column above)
'''
bands_raw.isin(['?']).sum()
'''
Alright, then. That's the list of numbers that should increase by 2 once I
turn all the question marks into NaNs and then count the NaNs.
'''
bands = bands_raw.replace('?', np.nan)
bands.isnull().sum()
'''
Success! Looking at the last few columns, they've increased by 2 as expected.
Whatever the source of those original NaNs, they've all been merged now.
'''
bands.head()
'''
Now let's look at what those columns are, so as to know how to fill in the
missing values. From the website:
6. Number of Attributes: 40 including the class attribute
-- 20 attributes are numeric, 20 are nominal
7. Attribute Information:
1. timestamp: numeric;19500101 - 21001231
2. cylinder number: nominal
3. customer: nominal;
4. job number: nominal;
5. grain screened: nominal; yes, no
6. ink color: nominal; key, type
7. proof on ctd ink: nominal; yes, no
8. blade mfg: nominal; benton, daetwyler, uddeholm
9. cylinder division: nominal; gallatin, warsaw, mattoon
10. paper type: nominal; uncoated, coated, super
11. ink type: nominal; uncoated, coated, cover
12. direct steam: nominal; use; yes, no *
13. solvent type: nominal; xylol, lactol, naptha, line, other
14. type on cylinder: nominal; yes, no
15. press type: nominal; use; 70 wood hoe, 70 motter, 70 albert, 94 motter
16. press: nominal; 821, 802, 813, 824, 815, 816, 827, 828
17. unit number: nominal; 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
18. cylinder size: nominal; catalog, spiegel, tabloid
19. paper mill location: nominal; north us, south us, canadian,
scandanavian, mid european
20. plating tank: nominal; 1910, 1911, other
21. proof cut: numeric; 0-100
22. viscosity: numeric; 0-100
23. caliper: numeric; 0-1.0
24. ink temperature: numeric; 5-30
25. humifity: numeric; 5-120
26. roughness: numeric; 0-2
27. blade pressure: numeric; 10-75
28. varnish pct: numeric; 0-100
29. press speed: numeric; 0-4000
30. ink pct: numeric; 0-100
31. solvent pct: numeric; 0-100
32. ESA Voltage: numeric; 0-16
33. ESA Amperage: numeric; 0-10
34. wax: numeric ; 0-4.0
35. hardener: numeric; 0-3.0
36. roller durometer: numeric; 15-120
37. current density: numeric; 20-50
38. anode space ratio: numeric; 70-130
39. chrome content: numeric; 80-120
40. band type: nominal; class; band, no band *
'''
# I used RegExr (and like 90 minutes) to parse the text above and extract the
# column names and a list of the data types (nominal or numeric). Learning
# Regex is totally worth my time, as I've come to this problem many times.
col_names = ['timestamp','cylinder number','customer','job number','grain screened','ink color','proof on ctd ink','blade mfg','cylinder division','paper type','ink type','direct steam','solvent type','type on cylinder','press type','press','unit number','cylinder size','paper mill location','plating tank','proof cut','viscosity','caliper','ink temperature','humifity','roughness','blade pressure','varnish pct','press speed','ink pct','solvent pct','ESA Voltage','ESA Amperage','wax','hardener','roller durometer','current density','anode space ratio','chrome content','band type']
data_types = ['numeric','nominal','nominal','nominal','nominal','nominal',' nominal','nominal','nominal','nominal','nominal','nominal','nominal',' nominal','nominal','nominal','nominal','nominal','nominal','nominal','numeric','numeric','numeric','numeric','numeric','numeric','numeric','numeric','numeric','numeric','numeric','numeric','numeric','numeric ',' numeric',' numeric',' numeric',' numeric','numeric','nominal']
# First, I properly name all the column headers and verify the change.
bands.columns = col_names
bands.head(20)
'''My initial plan was to use a loop to remove NaNs according to the data
type of that column. In nominal columns, I'd replace NaN with the mode for that column.
In numerical columns, I'd use interpolation or something. Looking at the data
more closely, though, it seems like it has timestamps but all the entries are
actually independent of each other. It looks like they're all individual sales
or something, so that the rows are uncorrelated. Therefore, interpolating
makes no sense. Instead, I'll just replace with the mode in all cases.
'''
bands_clean = bands.copy()
for col in bands.columns:
the_mode = bands_clean[col].mode()[0]
bands_clean[col].fillna(the_mode, inplace=True)
bands_clean.isna().sum()
# All set!
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.dataa
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
col_headers = ['name','landmass','zone','area','population','language','religion','bars','stripes','colours','red',
'green','blue','gold','white','black','orange','mainhue','circles','crosses','saltires','quarters',
'sunstars','crescent','triangle','icon','animate','text','topleft','botright']
flag_data = pd.read_csv(flag_data_url, header=None, names=col_headers)
flag_data.head()
flag_data['language'] = flag_data['language'].map({1: 'English', 2:'Spanish', 3:'French', 4:'German', 5:'Slavic', 6:'Other Indo-European', 7:'Chinese', 8:'Arabic', 9:'Japanese/Turkish/Finnish/Magyar', 10:'Others'})
flag_data.head()
flag_data.language.value_counts()
###Output
_____no_output_____
###Markdown
Reading other CSVs
###Code
link1 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions.csv'
link2 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_index.csv'
link3 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_header.csv'
df = pd.read_csv(link1)
print(df.shape)
df.head()
df.to_csv('test.csv')
df.columns
df = pd.read_csv(link2)
#df = pd.read_csv(link2, usecols=range(1,8))
df = df.drop(['country', 'beer_servings', 'spirit_servings'], axis=1)
print(df.shape)
df.head()
df = pd.read_csv(link3, skiprows=3)
print(df.shape)
df.head()
help(pd.read_csv)
###Output
Help on function read_csv in module pandas.io.parsers:
read_csv(filepath_or_buffer, sep=',', delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None)
Read a comma-separated values (csv) file into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts either
``pathlib.Path`` or ``py._path.local.LocalPath``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handler (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default ','
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If file contains no header row, then you
should explicitly pass ``header=None``. Duplicates in this list will cause
a ``UserWarning`` to be issued.
index_col : int, sequence or bool, optional
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider ``index_col=False`` to force pandas to
not use the first column as the index (row names).
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32,
'c': 'Int64'}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {'c', 'python'}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',
'1.#IND', '1.#QNAN', 'N/A', 'NA', 'NULL', 'NaN', 'n/a', 'nan',
'null'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparseable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\na,b,c\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
tupleize_cols : bool, default False
Leave a list of tuples on columns as is (default is to convert to
a MultiIndex on the columns).
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``' '``) will be
used as the sep. Equivalent to setting ``sep='\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
.. versionadded:: 0.18.1 support for the Python parser.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are `None` for the ordinary converter,
`high` for the high-precision converter, and `round_trip` for the
round-trip converter.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.read_csv('data.csv') # doctest: +SKIP
###Markdown
Loading from a local CSV to Google Colab
###Code
from google.colab import files
uploaded = files.upload()
df = pd.read_csv('drinks_with_regions_header.csv', skiprows=3)
df.head()
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
plt.scatter(df.beer_servings, df.wine_servings)
plt.xlabel('beer_servings')
plt.ylabel('wine_servings')
plt.show()
df.plot.scatter('beer_servings','wine_servings');
plt.hist(df.total_litres_of_pure_alcohol, bins=20)
df.total_litres_of_pure_alcohol.hist(bins=20);
# Seaborn Density Plot
import seaborn as sns
sns.pairplot(df)
df.isna().sum()
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=' ?')
print(df.shape)
df.head()
df.isna().sum()
df.country.value_counts()
df.workclass.value_counts()
df.dropna(subset=['country'],inplace=True)
df.shape
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
df.mode().iloc[0]
df = df.fillna(df.mode().iloc[0])
df.isna().sum()
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
import os
import matplotlib.pyplot as plt
print(os.listdir())
df = pd.read_csv('https://data.maryland.gov/api/views/is7h-kp6x/rows.csv?accessType=DOWNLOAD')
df.head()
df.columns
df.describe()
df['Percent Male'] = df['Male'] / (df['Male'] + df['Female'])
plt.scatter(df['Median Household Income ($)'],df['Percent Male'])
plt.scatter(df['Total Population'],df['Median Household Income ($)'])
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
!pip install python-congress
from congress import Congress
key = 'G6aXQNUGsIFV2LS6ZbKse3iW8L0EcgsAst6KJs5E'
congress = Congress(key)
introd = congress.bills.introduced(chamber='house')
df = pd.DataFrame.from_dict(introd)
df.head()
!unzip('https://www.kaggle.com/mhixon/college-football-statistics#collegefootballstatistics.zip')
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 0 - Make a Change
# Step .5 - save to Drive
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# import drug consumption dataset
from google.colab import files
drug_data = files.upload()
import pandas as pd
import numpy as np
headers = ['ID', 'Age', 'Gender', 'Education', 'Country', 'Ethnicity', 'Neuroticism-Score', 'Extraversion-Score', 'Open-Score', 'Agreeable-Score', 'Conscientious-Score', 'Impulsive-Score', 'Sensation-Seeing-Score', 'Alcohol', 'Amphetamine', 'Amyl', 'Benzos', 'Caffeine', 'Cannabis', 'Chocolate', 'Cocaine', 'Crack', 'Ecstasy', 'Heroin', 'Ketamine', 'LegalH', 'LSD', 'Methadone', 'Mushrooms', 'Nicotine', 'Semeron', 'VSA']
df = pd.read_csv('drug_consumption.data', names=headers)
print(df.shape)
df.head()
df.describe()
df.isna().sum()
# confirms that there are no null values. I also looked at the data directly.
# Too bad! I need to find a messier dataset.
drugs = ['Alcohol', 'Amphetamine', 'Amyl', 'Benzos', 'Caffeine', 'Cannabis', 'Chocolate', 'Cocaine', 'Crack', 'Ecstasy', 'Heroin', 'Ketamine', 'LegalH', 'LSD', 'Methadone', 'Mushrooms', 'Nicotine', 'Semeron', 'VSA']
df1 = df.copy()
df1['Alcohol']
# Remove the first two letters of the values.
# df1['drugs'] = df1['drugs'].map(lambda x: str(x)[2:])
# This did not work.
for column in df1[drugs]:
df1[column] = df1[column].map(lambda x: str(x)[2:])
df1[drugs].head()
df1['Alcohol']
# Convert str to int
for column in df1[drugs]:
df1[column] = df1[column].astype(str).astype(int)
# I had a lot of trouble with this.
# Problem 1: to_numeric() failed to convert my str to int. Switched to the above.
# Problem 2: I had to split up the above two lines into different cells.
# Upon re-running the cell, map() would remove the remaining numerical characters.
# Problem 3: Once I got it working for Alcohol, I made for loops encoding all the drug columns.
# But stripping it all made an empty Alcohol column.
# Scratch that. It was another problem like the above. RERUN ALL CELLS
df1[drugs].head()
df1.plot(x='Alcohol',y='Caffeine')
# Wow that looks odd. But it confirms the intuition that usage of these drugs are correlated
import seaborn as sb
import matplotlib.pyplot as plt
heat_map = sb.heatmap(df1[drugs])
plt.show()
# This is more interesting. You see that darker columns indicate less popular drugs.
# Eventually I want to find some correlations in here between various levels of drug use.
user_keys = []
for key, column in df1[drugs].iteritems():
users = 0
for i, j in column.iteritems():
if j > 0:
users += 1
user_keys.append(users)
fig, ax = plt.subplots()
ax.scatter(user_keys, drugs, label="# users by drug")
ax.legend()
plt.show()
# Still working on getting a horizontal bar plot. Scatter() or plot() work in a pinch.
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
#MADE GICERISH CHANGE
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Here is a change that I have made.
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
dataset_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00397/LasVegasTripAdvisorReviews-Dataset.csv'
import pandas as pd
vegas_data = pd.read_csv(dataset_url, delimiter=';')
vegas_data.head(78)
#this data set used semicolons as the delimiter but had some values with commas which confused pandas
#check number of colums and rows (20x504 according to UCI)
vegas_data.shape
#find any missing data. UCI doesn't provide a value
vegas_data.isna().sum()
#scatter plot
vegas_data.plot.scatter('Nr. hotel reviews', 'Member years');
# we can see above anomalous data which turns out to be row 76 so we drop that row and try again
new_data=vegas_data.drop(vegas_data.index[75])
new_data.plot.scatter('Nr. hotel reviews', 'Member years');
new_data.Score.hist();
#density plot
new_data.Score.plot.kde();
#pair plot
import seaborn as sns
sns.set(style='ticks', color_codes=True)
pplt = sns.pairplot(new_data)
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
#Search github for job openings mentioning words: data, science
jobs_url='https://jobs.github.com/positions.json?page=1&search=data%20science'
job_search=pd.read_json(jobs_url)
job_search.head(10)
#cell with raw data to scan for errors or incomplete data
!curl "https://archive.ics.uci.edu/ml/machine-learning-databases/00397/LasVegasTripAdvisorReviews-Dataset.csv"
###Output
User country;Nr. reviews;Nr. hotel reviews;Helpful votes;Score;Period of stay;Traveler type;Pool;Gym;Tennis court;Spa;Casino;Free internet;Hotel name;Hotel stars;Nr. rooms;User continent;Member years;Review month;Review weekday
USA;11;4;13;5;Dec-Feb;Friends;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;9;January;Thursday
USA;119;21;75;3;Dec-Feb;Business;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;3;January;Friday
USA;36;9;25;5;Mar-May;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;2;February;Saturday
UK;14;7;14;4;Mar-May;Friends;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Europe;6;February;Friday
Canada;5;5;2;4;Mar-May;Solo;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;7;March;Tuesday
Canada;31;8;27;3;Mar-May;Couples;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;2;March;Tuesday
UK;45;12;46;4;Mar-May;Couples;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Europe;4;April;Friday
USA;2;1;4;4;Mar-May;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;0;April;Tuesday
India;24;3;8;4;Mar-May;Friends;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Asia;3;May;Saturday
Canada;12;7;11;3;Mar-May;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;5;May;Tuesday
USA;102;24;58;2;Jun-Aug;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;9;June;Friday
Australia;20;9;24;3;Jun-Aug;Friends;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Oceania;4;June;Saturday
USA;7;6;9;2;Jun-Aug;Friends;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;1;July;Wednesday
USA;22;5;13;3;Jun-Aug;Friends;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;1;July;Thursday
UK;3;3;0;3;Jun-Aug;Friends;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Europe;1;August;Sunday
New Zeland;146;17;33;4;Jun-Aug;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Oceania;2;August;Saturday
Canada;8;8;9;1;Sep-Nov;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;4;September;Wednesday
USA;9;3;1;4;Sep-Nov;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;2;September;Saturday
Canada;41;9;19;3;Sep-Nov;Couples;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;2;October;Tuesday
USA;8;7;26;2;Sep-Nov;Couples;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;10;October;Monday
UK;10;5;2;4;Sep-Nov;Couples;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Europe;7;November;Saturday
New Zeland;4;3;3;1;Sep-Nov;Couples;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Oceania;3;November;Monday
UK;18;7;19;4;Dec-Feb;Families;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;Europe;0;December;Saturday
USA;4;4;3;2;Dec-Feb;Couples;NO;YES;NO;NO;YES;YES;Circus Circus Hotel & Casino Las Vegas;3;3773;North America;5;December;Sunday
Ireland;29;11;15;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;3;January;Monday
USA;114;42;52;4;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;11;January;Saturday
Canada;30;12;17;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;8;February;Friday
UK;87;18;36;3;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;3;February;Thursday
USA;26;10;28;5;Mar-May;Solo;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;1;March;Wednesday
Ireland;8;7;9;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;8;March;Wednesday
Canada;11;8;13;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;7;April;Friday
Australia;4;3;2;3;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Oceania;0;April;Friday
Canada;56;8;7;3;Mar-May;Solo;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;1;May;Tuesday
Egypt;13;12;8;3;Mar-May;Friends;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Africa;4;May;Wednesday
Australia;58;9;15;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Oceania;0;June;Saturday
Finland;20;7;4;3;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;6;June;Saturday
USA;70;27;24;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;6;July;Friday
Kenya;6;3;7;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Africa;1;July;Thursday
USA;290;263;299;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;10;August;Monday
USA;24;6;9;2;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;2;August;Wednesday
Jordan;29;8;21;3;Sep-Nov;Business;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;2;September;Wednesday
Canada;20;5;59;3;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;8;September;Saturday
Netherlands;3;3;3;3;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;4;October;Saturday
Ireland;47;6;27;4;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;5;October;Monday
USA;35;8;19;4;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;North America;6;November;Sunday
UK;6;0;4;4;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;1;November;Sunday
UK;74;47;54;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Europe;7;December;Wednesday
Syria;34;8;30;3;Dec-Feb;Solo;YES;YES;NO;YES;YES;YES;Excalibur Hotel & Casino;3;3981;Asia;4;December;Tuesday
UK;576;43;340;3;Dec-Feb;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;3;January;Monday
USA;20;8;11;4;Dec-Feb;Solo;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;5;January;Saturday
USA;418;32;132;2;Dec-Feb;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;3;February;Tuesday
USA;73;13;22;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;2;February;Tuesday
Canada;30;10;32;3;Mar-May;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;2;March;Wednesday
USA;63;15;17;4;Mar-May;Business;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;2;March;Tuesday
Scotland;24;10;13;5;Mar-May;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;5;April;Wednesday
South Africa;54;18;16;2;Mar-May;Business;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Africa;5;April;Tuesday
Australia;20;7;11;4;Mar-May;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Oceania;2;May;Friday
UK;41;7;24;4;Mar-May;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;3;May;Monday
Ireland;7;5;7;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;9;June;Tuesday
Canada;13;9;15;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;1;June;Monday
UK;10;5;4;4;Jun-Aug;Families;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;1;July;Friday
New Zeland;9;6;19;3;Jun-Aug;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Oceania;7;July;Wednesday
Swiss;36;19;36;2;Jun-Aug;Solo;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;5;August;Thursday
UK;33;12;19;3;Jun-Aug;Families;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;5;August;Saturday
United Arab Emirates;156;126;142;3;Sep-Nov;Friends;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Asia;8;September;Friday
Ireland;19;17;16;4;Sep-Nov;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;5;September;Wednesday
USA;23;17;11;3;Sep-Nov;Families;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;1;October;Friday
USA;13;3;3;2;Sep-Nov;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;1;October;Sunday
Hungary;8;5;8;4;Sep-Nov;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;8;November;Friday
China;1;0;2;1;Sep-Nov;Business;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Asia;0;November;Wednesday
Greece;21;18;6;2;Dec-Feb;Business;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;Europe;0;December;Sunday
Mexico;56;14;36;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;NO;Monte Carlo Resort&Casino;4;3003;North America;3;December;Monday
Croatia;29;11;14;3;Dec-Feb;Business;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Europe;6;January;Sunday
Australia;11;5;8;4;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Oceania;1;January;Thursday
Canada;19;12;167;4;Dec-Feb;Friends;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;9;February;Monday
USA;17;9;16;5;Dec-Feb;Solo;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;-1806;February;Monday
USA;43;8;20;4;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;1;March;Friday
Canada;12;8;3;4;Mar-May;Friends;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;2;March;Saturday
USA;15;14;7;4;Mar-May;Friends;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;9;April;Tuesday
Australia;16;13;16;3;Mar-May;Families;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Oceania;6;April;Wednesday
India;12;4;25;3;Mar-May;Families;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Asia;1;May;Friday
Germany;10;0;5;4;Mar-May;Friends;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Europe;3;May;Sunday
USA;27;17;16;3;Jun-Aug;Families;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;6;June;Thursday
Canada;6;5;5;4;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;2;June;Tuesday
Australia;21;20;14;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Oceania;3;July;Wednesday
Malaysia;43;14;27;4;Jun-Aug;Solo;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Asia;5;July;Thursday
Mexico;97;31;37;4;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;South America;8;August;Saturday
UK;7;3;4;4;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Europe;5;August;Friday
UK;11;9;6;3;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Europe;0;September;Friday
USA;78;11;30;4;Sep-Nov;Business;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;2;September;Tuesday
Australia;12;7;4;5;Sep-Nov;Families;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Oceania;5;October;Monday
USA;27;11;5;3;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;2;October;Monday
Thailand;4;3;1;5;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Asia;7;November;Friday
Australia;27;9;8;4;Sep-Nov;Families;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;Oceania;2;November;Friday
Canada;12;3;7;4;Dec-Feb;Friends;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;4;December;Saturday
Canada;21;16;48;5;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Treasure Island- TI Hotel & Casino;4;2884;North America;12;December;Thursday
UK;34;17;30;4;Dec-Feb;Families;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;8;January;Sunday
USA;12;6;1;5;Dec-Feb;Friends;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;North America;2;January;Sunday
Phillippines;79;39;51;3;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Asia;5;February;Wednesday
Israel;18;10;16;3;Dec-Feb;Business;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Asia;2;February;Thursday
Canada;13;3;8;4;Mar-May;Business;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;North America;3;March;Wednesday
UK;14;4;9;4;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;1;March;Friday
Ireland;19;9;28;5;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;3;April;Friday
India ;88;15;103;4;Mar-May;Business;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Asia;3;April;Monday
New Zeland;8;4;0;4;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Oceania;1;May;Tuesday
Belgium;39;15;31;3;Mar-May;Friends;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;1;May;Sunday
UK;130;41;61;4;Jun-Aug;Business;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;3;June;Tuesday
Australia;79;78;105;4;Jun-Aug;Friends;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Oceania;7;June;Wednesday
UK;13;10;16;4;Jun-Aug;Solo;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;10;July;Wednesday
UK;18;6;17;4;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;1;July;Monday
New Zeland;8;7;3;5;Jun-Aug;Friends;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Oceania;1;August;Saturday
Australia;15;4;6;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Oceania;0;August;Friday
Canada;27;0;9;1;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;North America;0;September;Tuesday
Australia;5;3;2;5;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Oceania;5;September;Wednesday
UK;22;6;6;5;Sep-Nov;Friends;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;1;October;Tuesday
USA;5;3;11;2;Sep-Nov;Friends;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;North America;1;October;Thursday
India;31;8;11;4;Sep-Nov;Solo;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Asia;1;November;Sunday
Netherlands;33;8;8;5;Sep-Nov;Friends;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;7;November;Saturday
UK;10;6;11;5;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;4;December;Saturday
UK;12;3;3;5;Dec-Feb;Families;YES;YES;YES;YES;YES;YES;Tropicana Las Vegas - A Double Tree by Hilton Hotel;4;1467;Europe;4;December;Thursday
USA;50;14;24;5;Dec-Feb;Friends;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;7;January;Wednesday
USA;161;33;85;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;3;January;Friday
Puerto Rico;153;38;81;5;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;10;February;Tuesday
Canada;14;3;12;5;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;2;February;Thursday
USA;31;12;10;5;Mar-May;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;4;March;Friday
USA;15;10;29;3;Mar-May;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;9;March;Friday
Canada;5;5;4;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;3;April;Thursday
USA;46;4;21;4;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;2;April;Saturday
UK;15;6;39;1;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;Europe;4;May;Sunday
UK;63;27;35;4;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;Europe;5;May;Wednesday
USA;17;15;13;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;2;June;Sunday
USA;25;22;36;1;Jun-Aug;Solo;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;9;June;Tuesday
USA;14;8;4;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;1;July;Monday
UK;59;56;37;3;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;Europe;6;July;Tuesday
UK;11;10;17;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;Europe;3;August;Friday
Canada;54;18;31;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;5;August;Wednesday
USA;4;3;4;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;3;September;Monday
USA;10;7;7;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;3;September;Tuesday
USA;14;6;7;5;Sep-Nov;Business;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;11;October;Sunday
USA;39;12;16;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;2;October;Thursday
Australia;35;24;43;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;Oceania;4;November;Sunday
Australia;6;6;9;3;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;Oceania;0;November;Sunday
USA;23;7;2;4;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;3;December;Friday
USA;48;21;67;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Caesars Palace;5;3348;North America;13;December;Sunday
Australia;16;9;14;4;Dec-Feb;Friends;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Oceania;3;January;Thursday
Australia;12;6;3;5;Dec-Feb;Friends;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Oceania;1;January;Thursday
Switzerland;36;9;15;4;Dec-Feb;Friends;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;2;February;Saturday
UK;127;40;80;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;8;February;Wednesday
USA;12;5;1;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;1;March;Monday
USA;17;17;74;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;10;March;Saturday
Ireland;23;7;21;4;Mar-May;Friends;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;7;April;Thursday
USA;102;17;45;5;Mar-May;Business;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;2;April;Monday
Germany;113;15;55;5;Mar-May;Families;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;8;May;Thursday
USA;30;8;10;2;Mar-May;Business;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;1;May;Wednesday
Germany;24;24;24;2;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;7;June;Wednesday
Ireland;15;5;18;5;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;3;June;Wednesday
USA;6;4;14;1;Jun-Aug;Business;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;1;July;Wednesday
Australia;3;3;0;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Oceania;7;July;Tuesday
Canada;41;10;56;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;4;August;Saturday
USA;19;8;56;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;10;August;Sunday
UK;18;9;10;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;1;September;Wednesday
Norway;6;6;7;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;7;September;Tuesday
USA;103;5;98;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;7;October;Monday
UK;18;4;16;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Europe;0;October;Friday
Egypt;9;7;7;5;Sep-Nov;Business;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;Africa;5;November;Wednesday
USA;50;19;45;5;Sep-Nov;Business;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;0;November;Friday
Canada;4;3;8;2;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;8;December;Tuesday
USA;7;3;8;5;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Cosmopolitan Las Vegas;5;2959;North America;5;December;Sunday
Mexico;28;23;31;3;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;4;January;Friday
USA;32;6;11;5;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;10;January;Friday
Canada;4;0;4;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;9;February;Friday
USA;37;8;9;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;3;February;Tuesday
Canada;10;3;7;5;Mar-May;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;6;March;Wednesday
Canada;36;30;80;5;Mar-May;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;8;March;Friday
USA;31;10;14;4;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;5;April;Sunday
Canada;66;33;60;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;10;April;Saturday
France;17;4;8;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;Europe;0;May;Saturday
UK;57;20;71;5;Mar-May;Business;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;Europe;0;May;Friday
USA;42;8;22;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;6;June;Monday
USA;28;20;14;3;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;8;June;Friday
Spain;19;8;10;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;Europe;3;July;Wednesday
USA;20;11;21;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;9;July;Friday
UK;75;19;93;4;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;Europe;8;August;Sunday
USA;91;16;26;4;Jun-Aug;Business;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;3;August;Wednesday
UK;52;48;78;3;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;Europe;8;September;Saturday
USA;93;4;8;5;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;2;September;Saturday
Canada;9;3;15;5;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;1;October;Wednesday
Australia;5;3;9;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;Oceania;2;October;Thursday
Singapore;37;7;10;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;Asia;0;November;Sunday
Brazil;31;25;35;3;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;South America;7;November;Sunday
USA;6;4;4;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;7;December;Thursday
USA;252;55;113;4;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Palazzo Resort Hotel Casino;5;3025;North America;8;December;Monday
USA;164;48;82;5;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;4;January;Saturday
USA;5;3;9;5;Dec-Feb;Families;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;4;January;Tuesday
Canada;7;5;20;4;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;3;February;Saturday
USA;18;11;15;5;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;7;February;Tuesday
USA;20;13;16;5;Mar-May;Friends;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;6;March;Friday
USA;21;5;12;4;Mar-May;Business;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;8;March;Monday
USA;125;35;48;5;Mar-May;Business;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;4;April;Sunday
USA;11;7;10;4;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;1;April;Monday
Singapore;16;10;16;3;Mar-May;Solo;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;Asia;7;May;Monday
UK;19;5;5;5;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;Europe;3;May;Monday
USA;73;27;365;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;6;June;Wednesday
USA;5;4;4;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;11;June;Wednesday
USA;30;10;17;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;3;July;Monday
Canada;7;4;10;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;3;July;Sunday
Canada;67;46;99;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;6;August;Sunday
Finland;18;13;25;5;Jun-Aug;Solo;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;Europe;9;August;Saturday
Canada;12;7;11;2;Sep-Nov;Business;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;4;September;Monday
Canada;40;12;26;5;Sep-Nov;Friends;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;7;September;Saturday
USA;142;17;31;4;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;3;October;Monday
UK;8;4;16;5;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;Europe;7;October;Saturday
Costa Rica;15;9;3;5;Sep-Nov;Business;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;2;November;Monday
UK;2;18;14;5;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;Europe;8;November;Wednesday
UK;10;6;10;5;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;Europe;5;December;Tuesday
USA;16;13;55;5;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Wynn Las Vegas;5;2700;North America;9;December;Tuesday
UK;21;7;13;5;Dec-Feb;Solo;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Europe;10;January;Tuesday
India;69;58;41;5;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Asia;5;January;Saturday
USA;33;13;8;5;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;5;February;Wednesday
USA;1;0;2;5;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;0;February;Tuesday
USA;49;8;23;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;5;March;Monday
USA;775;52;255;3;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;2;March;Wednesday
United Arab Emirates;16;7;6;4;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Asia;7;April;Monday
USA;1;0;1;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;0;April;Wednesday
Singapore;19;11;17;4;Mar-May;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Asia;7;May;Friday
Brazil;7;4;8;5;Mar-May;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;South America;7;May;Sunday
USA;38;18;16;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;8;June;Sunday
USA;48;14;30;5;Jun-Aug;Business;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;9;June;Wednesday
Canada;136;20;55;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;3;July;Sunday
USA;26;8;1;4;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;6;July;Monday
Iran;3;3;2;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Asia;4;August;Sunday
USA;78;17;24;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;5;August;Monday
UK;24;18;12;5;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Europe;0;September;Thursday
Egypt;2;0;15;1;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Africa;1;September;Monday
Ireland;4;3;3;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Europe;11;October;Thursday
Australia;11;7;17;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Oceania;2;October;Saturday
Germany;1;0;1;2;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Europe;0;November;Wednesday
Australia;35;16;17;4;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;Oceania;2;November;Tuesday
USA;110;19;76;4;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;2;December;Tuesday
USA;53;25;24;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Trump International Hotel Las Vegas;5;1282;North America;3;December;Sunday
Saudi Arabia;320;45;102;3;Dec-Feb;Business;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;Asia;6;January;Tuesday
USA;2;0;2;4;Dec-Feb;Business;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;1;January;Saturday
USA;26;6;12;5;Dec-Feb;Families;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;1;February;Sunday
USA;4;4;0;5;Dec-Feb;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;2;February;Monday
USA;9;9;6;2;Mar-May;Solo;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;1;March;Wednesday
USA;17;14;70;5;Mar-May;Friends;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;10;March;Sunday
Canada;3;0;1;5;Mar-May;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;1;April;Tuesday
Canada;14;0;4;2;Mar-May;Families;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;4;April;Tuesday
USA;235;111;267;5;Mar-May;Friends;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;9;May;Friday
USA;59;9;21;5;Mar-May;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;3;May;Monday
USA;2;0;2;5;Jun-Aug;Friends;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;0;June;Wednesday
USA;2;0;1;5;Jun-Aug;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;0;June;Wednesday
USA;21;18;53;5;Jun-Aug;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;11;July;Wednesday
USA;4;0;16;1;Jun-Aug;Friends;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;0;July;Thursday
Netherlands;184;52;70;5;Jun-Aug;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;Europe;3;August;Wednesday
UK;262;75;150;4;Jun-Aug;Families;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;Europe;2;August;Monday
USA;69;11;17;5;Sep-Nov;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;0;September;Sunday
Australia;92;12;27;4;Sep-Nov;Business;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;Oceania;2;September;Saturday
USA;131;61;116;3;Sep-Nov;Families;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;8;October;Thursday
Norway;25;10;16;5;Sep-Nov;Friends;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;Europe;8;October;Tuesday
Canada;49;22;53;3;Sep-Nov;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;5;November;Sunday
USA;148;33;55;4;Sep-Nov;Business;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;3;November;Wednesday
Canada;26;7;22;4;Dec-Feb;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;4;December;Wednesday
USA;13;4;1;4;Dec-Feb;Couples;YES;NO;NO;NO;YES;YES;The Cromwell;4,5;188;North America;2;December;Wednesday
Thailand;3;3;20;1;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Asia;5;January;Tuesday
USA;46;12;87;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;3;January;Tuesday
UK;127;40;81;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Europe;8;February;Thursday
USA;15;5;12;5;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;4;February;Sunday
USA;16;14;23;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;7;March;Friday
USA;113;38;27;4;Mar-May;Friends;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;1;March;Tuesday
UK;7;5;5;3;Mar-May;Families;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Europe;7;April;Monday
Honduras;73;32;99;4;Mar-May;Friends;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;South America;10;April;Thursday
USA;35;12;3;5;Mar-May;Business;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;2;May;Friday
UK;139;20;86;5;Mar-May;Friends;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Europe;5;May;Monday
UK;28;12;30;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Europe;2;June;Saturday
USA;34;8;26;4;Jun-Aug;Business;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;4;June;Tuesday
USA;4;3;5;5;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;3;July;Thursday
USA;32;11;14;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;3;July;Sunday
UK;9;3;4;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Europe;1;August;Thursday
USA;333;58;200;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;4;August;Sunday
USA;29;11;15;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;6;September;Monday
UK;85;43;68;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Europe;5;September;Wednesday
USA;12;5;1;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;5;October;Friday
UK;34;10;7;4;Sep-Nov;Business;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Europe;4;October;Thursday
India;13;7;8;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;Asia;5;November;Sunday
USA;5;5;14;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;5;November;Sunday
USA;169;25;41;5;Dec-Feb;Friends;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;8;December;Tuesday
USA;11;10;6;5;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Encore at wynn Las Vegas;5;2034;North America;7;December;Tuesday
USA;4;3;2;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;6;January;Friday
USA;50;39;48;2;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;10;January;Thursday
Mexico;10;3;1;5;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;4;February;Tuesday
Canada;7;4;4;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;1;February;Friday
UK;60;7;42;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Europe;8;March;Tuesday
Netherlands;92;49;18;4;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Europe;5;March;Thursday
Australia;106;37;58;4;Mar-May;Business;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Oceania;3;April;Wednesday
Canada;7;5;45;4;Mar-May;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;10;April;Monday
Brazil;37;31;38;5;Mar-May;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;South America;9;May;Friday
USA;70;25;19;1;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;3;May;Friday
USA;9;3;4;5;Jun-Aug;Friends;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;1;June;Monday
USA;11;6;7;5;Jun-Aug;Solo;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;1;June;Monday
Finland;39;18;9;4;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Europe;4;July;Thursday
UK;10;0;9;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Europe;1;July;Wednesday
USA;78;17;24;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;5;August;Monday
UK;48;21;19;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Europe;4;August;Friday
Canada;112;34;41;5;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;7;September;Sunday
USA;28;17;14;5;Sep-Nov;Solo;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;2;September;Wednesday
UK;11;3;16;2;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Europe;2;October;Wednesday
USA;9;5;11;4;Sep-Nov;Business;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;9;October;Wednesday
USA;4;4;1;3;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;0;November;Wednesday
Mexico;22;15;25;4;Sep-Nov;Families;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;6;November;Monday
USA;42;11;36;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;North America;7;December;Monday
Australia;20;16;9;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Hilton Grand Vacations on the Boulevard;3,5;1228;Oceania;7;December;Tuesday
Canada;160;34;88;4;Dec-Feb;Business;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;6;January;Saturday
USA;372;78;169;4;Dec-Feb;Business;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;4;January;Sunday
USA;84;18;36;5;Dec-Feb;Families;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;4;February;Wednesday
USA;21;13;20;5;Dec-Feb;Business;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;5;February;Thursday
UK;95;37;55;4;Mar-May;Friends;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Europe;6;March;Tuesday
Germany;118;107;131;5;Mar-May;Business;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Europe;10;March;Monday
Canada;6;0;8;5;Mar-May;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;10;April;Wednesday
USA;60;4;31;4;Mar-May;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;2;April;Tuesday
Australia;275;79;339;4;Mar-May;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Oceania;7;May;Sunday
Canada;46;39;42;4;Mar-May;Business;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;6;May;Wednesday
USA;6;5;6;5;Jun-Aug;Families;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;4;June;Tuesday
Australia;11;0;5;5;Jun-Aug;Families;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Oceania;0;June;Thursday
USA;75;37;48;5;Jun-Aug;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;6;July;Saturday
Denmark;240;76;115;5;Jun-Aug;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Europe;3;July;Monday
Taiwan;20;12;7;5;Jun-Aug;Families;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Asia;1;August;Wednesday
Israel;62;40;25;5;Jun-Aug;Families;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Asia;5;August;Sunday
Hawaii;88;19;58;5;Sep-Nov;Friends;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;1;September;Wednesday
USA;6;3;5;5;Sep-Nov;Friends;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;0;September;Tuesday
Thailand;18;9;8;3;Sep-Nov;Business;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Asia;7;October;Monday
Mexico;159;43;78;5;Sep-Nov;Families;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;4;October;Wednesday
Kuwait;47;23;20;4;Sep-Nov;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Asia;1;November;Thursday
Norway;86;12;46;4;Sep-Nov;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Europe;5;November;Sunday
USA;42;9;13;5;Dec-Feb;Couples;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;North America;1;December;Tuesday
Germany;64;33;23;4;Dec-Feb;Families;YES;YES;NO;NO;YES;YES;Marriott's Grand Chateau;3,5;732;Europe;7;December;Thursday
USA;167;71;119;5;Dec-Feb;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;8;January;Saturday
USA;25;5;12;5;Dec-Feb;Friends;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;8;January;Thursday
UK;12;12;6;3;Dec-Feb;Business;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;Europe;4;February;Friday
USA;608;117;319;5;Dec-Feb;Business;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;5;February;Saturday
USA;5;4;3;5;Mar-May;Friends;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;8;March;Sunday
USA;26;17;34;5;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;7;March;Thursday
Brazil;11;5;9;5;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;South America;7;April;Monday
USA;39;10;13;4;Mar-May;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;3;April;Wednesday
India;1;0;3;2;Mar-May;Families;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;Asia;0;May;Thursday
USA;108;20;22;3;Mar-May;Business;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;5;May;Thursday
Malaysia;119;20;46;5;Jun-Aug;Friends;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;Asia;1;June;Sunday
USA;27;15;17;3;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;6;June;Tuesday
Germany;55;55;134;5;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;Europe;5;July;Wednesday
Canada;7;3;7;4;Jun-Aug;Business;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;3;July;Friday
Canada;3;3;2;3;Jun-Aug;Business;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;0;August;Wednesday
USA;11;3;19;4;Jun-Aug;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;6;August;Monday
Australia;27;15;16;5;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;Oceania;5;September;Sunday
USA;13;4;6;5;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;0;September;Wednesday
UK;19;11;14;3;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;Europe;5;October;Sunday
Canada;38;12;17;5;Sep-Nov;Business;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;7;October;Friday
USA;167;71;119;4;Sep-Nov;Couples;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;8;November;Sunday
USA;28;6;9;3;Sep-Nov;Business;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;3;November;Tuesday
Canada;9;8;5;5;Dec-Feb;Families;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;4;December;Sunday
USA;13;4;4;5;Dec-Feb;Friends;YES;YES;YES;YES;YES;YES;Tuscany Las Vegas Suites & Casino;3;716;North America;5;December;Tuesday
USA;65;24;72;3;Dec-Feb;Business;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;6;January;Monday
USA;8;3;3;5;Dec-Feb;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;3;January;Thursday
UK;61;13;26;2;Dec-Feb;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;Europe;4;February;Sunday
USA;3;3;8;2;Dec-Feb;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;1;February;Saturday
USA;66;13;21;5;Mar-May;Friends;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;9;March;Friday
Hawaii;9;7;4;5;Mar-May;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;2;March;Tuesday
Hawaii;82;14;61;3;Mar-May;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;1;April;Wednesday
Canada;113;48;33;3;Mar-May;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;4;April;Sunday
USA;11;8;12;3;Mar-May;Friends;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;6;May;Sunday
Canada;2;1;1;3;Mar-May;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;3;May;Tuesday
USA;31;11;24;5;Jun-Aug;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;2;June;Wednesday
Australia;32;12;22;4;Jun-Aug;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;Oceania;5;June;Wednesday
USA;56;7;25;3;Jun-Aug;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;7;July;Sunday
Canada;45;6;22;5;Jun-Aug;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;3;July;Thursday
USA;17;4;4;4;Jun-Aug;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;0;August;Sunday
USA;23;23;11;5;Jun-Aug;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;1;August;Sunday
Canada;56;18;24;5;Sep-Nov;Friends;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;4;September;Saturday
USA;290;263;300;4;Sep-Nov;Business;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;10;September;Saturday
Australia;26;7;9;4;Sep-Nov;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;Oceania;0;October;Friday
India;116;44;53;4;Sep-Nov;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;Asia;5;October;Saturday
Czech Republic;14;5;15;4;Sep-Nov;Business;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;Europe;5;November;Thursday
USA;13;7;1;5;Sep-Nov;Business;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;4;November;Sunday
USA;20;9;25;4;Dec-Feb;Couples;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;8;December;Tuesday
Mexico;3;3;3;5;Dec-Feb;Families;YES;YES;NO;NO;NO;YES;Hilton Grand Vacations at the Flamingo;3;315;North America;2;December;Wednesday
USA;17;3;12;4;Dec-Feb;Friends;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;2;January;Monday
Australia;21;11;13;5;Dec-Feb;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Oceania;4;January;Sunday
USA;60;11;33;5;Dec-Feb;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;3;February;Thursday
Canada;12;4;15;5;Dec-Feb;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;11;February;Monday
USA;27;12;51;5;Mar-May;Friends;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;4;March;Tuesday
USA;34;12;35;4;Mar-May;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;3;March;Tuesday
Canada;69;22;38;5;Mar-May;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;6;April;Thursday
USA;16;4;7;5;Mar-May;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;4;April;Saturday
Australia;26;5;5;4;Mar-May;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Oceania;1;May;Thursday
USA;54;10;15;4;Mar-May;Business;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;5;May;Tuesday
UK;35;17;27;5;Jun-Aug;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Europe;6;June;Tuesday
India;23;12;7;5;Jun-Aug;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Asia;7;June;Saturday
UK;6;3;14;5;Jun-Aug;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Europe;3;July;Sunday
USA;27;8;16;4;Jun-Aug;Friends;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;4;July;Thursday
USA;41;8;14;3;Jun-Aug;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;4;August;Wednesday
Canada;12;4;6;5;Jun-Aug;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;2;August;Monday
USA;14;3;14;5;Sep-Nov;Friends;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;6;September;Tuesday
USA;415;162;265;4;Sep-Nov;Business;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;8;September;Sunday
Ireland;76;27;32;3;Sep-Nov;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Europe;6;October;Thursday
UK;24;12;30;4;Sep-Nov;Couples;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Europe;4;October;Monday
Japan;23;9;23;3;Sep-Nov;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;Asia;4;November;Monday
USA;182;24;47;4;Sep-Nov;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;2;November;Sunday
USA;289;28;133;4;Dec-Feb;Families;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;5;December;Tuesday
Canada;101;35;46;5;Dec-Feb;Friends;YES;YES;YES;NO;NO;YES;Wyndham Grand Desert;3,5;787;North America;5;December;Saturday
Ireland;31;11;20;4;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;2;January;Friday
USA;20;6;9;5;Dec-Feb;Solo;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;0;January;Wednesday
USA;60;16;15;4;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;5;February;Saturday
Mexico;1;0;2;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;0;February;Sunday
USA;9;3;13;3;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;2;March;Thursday
USA;19;7;5;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;5;March;Wednesday
Israel;116;78;206;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Asia;9;April;Sunday
Australia;22;13;26;4;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Oceania;2;April;Friday
Australia;83;10;26;4;Mar-May;Families;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Oceania;1;May;Tuesday
UK;20;3;9;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;0;May;Monday
USA;7;5;6;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;1;June;Monday
USA;95;34;46;5;Jun-Aug;Business;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;6;June;Sunday
Australia;15;6;19;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Oceania;4;July;Wednesday
UK;13;7;12;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;5;July;Sunday
Canada;19;8;24;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;10;August;Wednesday
UK;21;6;15;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;3;August;Saturday
UK;56;11;14;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;5;September;Friday
UK;29;7;15;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;7;September;Thursday
Brazil;20;0;8;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;South America;2;October;Friday
UK;39;20;31;4;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;8;October;Thursday
Switzerland;9;6;3;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Europe;5;November;Saturday
Australia;43;38;29;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Oceania;4;November;Saturday
Singapore;16;10;11;5;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;Asia;9;December;Thursday
USA;6;6;12;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Venetian Las Vegas Hotel;5;4027;North America;10;December;Wednesday
Australia;16;6;3;4;Dec-Feb;Solo;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Oceania;3;January;Saturday
USA;15;3;13;5;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;5;January;Thursday
USA;30;6;26;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;3;February;Tuesday
USA;41;9;111;5;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;1;February;Friday
India;44;35;53;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Asia;6;March;Tuesday
USA;30;9;17;2;Mar-May;Business;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;0;March;Tuesday
Ireland;23;10;18;5;Mar-May;Business;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;4;April;Monday
Spain;3;0;8;2;Mar-May;Families;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;3;April;Tuesday
UK;12;6;16;3;Mar-May;Friends;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;3;May;Friday
UK;15;6;17;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;1;May;Thursday
UK;5;0;5;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;0;June;Wednesday
USA;32;5;11;4;Jun-Aug;Business;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;6;June;Sunday
USA;28;5;23;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;5;July;Friday
Korea;77;18;48;4;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Asia;10;July;Saturday
USA;11;8;2;4;Jun-Aug;Solo;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;0;August;Monday
Scotland;102;25;37;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;6;August;Sunday
Ireland;13;3;2;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;6;September;Tuesday
USA;62;13;31;4;Sep-Nov;Business;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;2;September;Tuesday
UK;26;22;42;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;5;October;Sunday
UK;13;6;12;4;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;1;October;Saturday
UK;26;18;43;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Europe;4;November;Monday
Canada;17;10;39;5;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;North America;3;November;Saturday
India;4;4;1;4;Dec-Feb;Business;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Asia;1;December;Monday
Malaysia;20;18;19;2;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;Bellagio Las Vegas;5;3933;Asia;4;December;Friday
USA;14;3;7;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;7;January;Saturday
Canada;19;3;12;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;1;January;Wednesday
USA;4;4;2;2;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;1;February;Wednesday
USA;116;12;43;4;Dec-Feb;Friends;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;4;February;Tuesday
USA;121;13;59;2;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;9;March;Tuesday
USA;14;7;6;5;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;2;March;Friday
USA;56;16;54;4;Mar-May;Business;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;9;April;Tuesday
Canada;73;4;39;3;Mar-May;Friends;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;3;April;Monday
USA;9;5;4;5;Mar-May;Friends;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;6;May;Thursday
USA;137;42;51;3;Mar-May;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;5;May;Sunday
Australia;30;12;38;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;Oceania;4;June;Saturday
Costa Rica;130;19;49;3;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;6;June;Sunday
USA;50;15;23;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;3;July;Friday
UK;22;9;11;4;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;Europe;3;July;Wednesday
Egypt;85;37;65;5;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;Africa;4;August;Friday
USA;73;7;26;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;3;August;Thursday
USA;20;10;27;4;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;2;September;Thursday
Canada;13;6;13;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;2;September;Tuesday
Canada;18;3;10;2;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;2;October;Monday
USA;6;4;6;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;0;October;Saturday
USA;48;8;25;4;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;6;November;Monday
USA;24;3;5;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;0;November;Sunday
Canada;123;26;63;5;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;4;December;Saturday
USA;24;7;6;4;Dec-Feb;Friends;YES;YES;NO;YES;YES;YES;Paris Las Vegas;4;2916;North America;1;December;Thursday
Canada;20;13;17;4;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;4;January;Monday
USA;15;3;4;3;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;7;January;Tuesday
Italy;189;72;129;4;Dec-Feb;Solo;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;Europe;3;February;Friday
USA;25;19;27;3;Dec-Feb;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;8;February;Monday
USA;33;11;12;5;Mar-May;Business;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;4;March;Wednesday
USA;14;7;13;5;Mar-May;Friends;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;4;March;Thursday
USA;38;22;47;4;Mar-May;Business;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;4;April;Monday
Egypt;169;43;85;4;Mar-May;Solo;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;Africa;12;April;Sunday
USA;20;17;21;3;Mar-May;Friends;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;4;May;Sunday
USA;17;10;18;4;Mar-May;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;3;May;Tuesday
USA;96;47;161;4;Jun-Aug;Solo;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;11;June;Monday
USA;38;13;11;3;Jun-Aug;Business;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;5;June;Sunday
India;12;11;9;4;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;Asia;6;July;Monday
USA;9;8;1;5;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;6;July;Monday
USA;23;21;22;3;Jun-Aug;Families;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;7;August;Wednesday
USA;21;6;3;4;Jun-Aug;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;5;August;Tuesday
USA;30;27;19;5;Sep-Nov;Solo;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;6;September;Friday
USA;63;16;59;3;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;11;September;Wednesday
UK;35;3;25;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;Europe;1;October;Monday
UK;15;4;8;5;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;Europe;1;October;Sunday
Canada;50;13;29;4;Sep-Nov;Couples;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;8;November;Thursday
USA;154;23;31;4;Sep-Nov;Friends;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;4;November;Thursday
USA;9;6;5;2;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;9;December;Wednesday
USA;20;19;112;4;Dec-Feb;Families;YES;YES;NO;YES;YES;YES;The Westin las Vegas Hotel Casino & Spa;4;826;North America;5;December;Tuesday
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Bhavani makes changes
# blah blah blah
# another blah blah blah
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
car_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data'
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data
import pandas as pd
car_data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data')
car_data.head()
car_data.count()
car_data = pd.read_csv(car_data_url, header=None, names = ['Cost','Maintenance','# Of Doors','# Of People','Lug Boot Size','Safety Rating','Class Distribution'])
car_data.head()
car_data.count()
car_data.isna().sum()
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
#I was trying to import a csv file into Google Drive and import the data.
#I found this code at: https://www.geeksforgeeks.org/working-csv-files-python/
#I couldn't get it to work in CoLab or Geany.
#Same error message that it did NOT recognize the cod of filename = "forest fires.csv"
# importing csv module
import csv
# csv file name
filename = "forestfires.csv"
# initializing the titles and rows list
fields = []
rows = []
# reading csv file
with open(filename, 'r') as csvfile:
# creasting a csv reader object
csvreader = csv.reader(csvfile)
# extracting field names through first row
fields = csvreader.next()
# extracting each data row one by one
for row in csvreader:
rows.append(row)
# get total number of rows
print("Total no. of rows: %d"%(csvreader.line_num))
# printing the field names
print('Field name:' + ','.join(field for field in fields))
# printing first 5 rows
print('\nFirst 5 rows are:\n')
for row in rows[:5]:
# parsing each column of a row
for col in row:
print("%10s"%col),
print('\n')
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
#imported a test dataset from UCI showing the odds of different poker hands being dealt.
#I opted to import the dataset using the pd.read.csv Pandas function shown in class today.
#Initially, the first instance of data was being shown as attribute headers, using "header=None" solved this small issue.
import pandas as pd
card_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-testing.data"
card_data = pd.read_csv(card_data_url, header=None)
card_data.head(5)
#calculating the sum of NAN values in our data; our data does not seem to contain any missing values - this will make our life easier.
card_data.isna().sum()
#The card suit correponds to one of the four standard suits in a poker card deck; idem for rank.
#Converted the column headers into categorial variables in order to improve data visualization.
#Each attribute alternates from "card rank" to "card suit", the 13th attribute is the "hand" the player was dealt.
card_data = card_data.rename({0: "suit", 1: "rank", 2: "suit", 3: "rank", 4: "suit", 5: "rank", 6: "suit", 7: "rank", 8: "suit", 9: "rank", 10: "hand"}, axis = "columns")
card_data.head()
#I converted the numeric data under the attribute "hand" into categorical data in order to illustrate, however, in practice it would be preferrable to maintain the numeric data as is, as it would be easier for our machine learning model to work with.
import numpy as np
hand_names = {"hand": {0: "nothing", 1: "1 pair", 2: "2 pairs", 3: "3 of a kind", 4: "Straight", 5: "flush", 6: "full house", 7: "four of a kind", 8: "straight flush", 9: "royal flush"}}
card_data.replace(hand_names, inplace=True)
card_data.head()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
#fdsjlgfiaobvfjldhfjlda
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
#Get the data url
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data"
#Read the data url
data = pd.read_csv(url)
print(data)
#Let's modify the row indexes
datarow = data.rename({0:"zero", 1:"one", 2:"two", 3:"three"}, axis=0)
print(datarow.head())
#Let's modify the column indexes to test the function
datacolumn= data.rename({"vhigh": 0, "vhigh.1":1, "small":3, "low":4, "unacc":5}, axis = 1)
print(datacolumn.head())
data.count()
#Let's give a proper tags to the header based on additional database file information
data2 = pd.read_csv(url, header=None, names=['Buying','Maintenance','Doors','Persons','Lug_Boot','Safety', "Class_Value"])
data2.head()
#Let's code the text values in the columns into numerical values
data3 = data2.replace('?', np.NaN)
data3.head()
data3.isnull().sum()
#The data is pretty clean
#Now let's code the text values into numerical values
#class values: unacc:0, acc:1, good:2, vgood:3
#buying: vhigh:3, high:2, med:1, low:0.
#maintenance: vhigh:3, high:2, med:1, low:0.
#doors: 2, 3, 4, 5more:5.
#persons: 2, 4, more:5.
#lug_boot: small:0, med:1, big:2.
#safety: low:0, med:1, high:2.
#Swap the string values with numerical values using the map function
newclass = {'unacc':0, 'acc':1, 'good':2, 'vgood':3};
newbuying= {'vhigh':3, 'high':2, 'med':1, 'low':0};
newmaintenance={'vhigh':3, 'high':2, 'med':1, 'low':0};
newdoors={'5more':5};
newpersons={2:2, 4:4,'more':5};
newlugboot={'small':0, "med":1, 'big':2};
newsafety={"low":0, "med":1, "high":2}
#Using the map function
data3['Class_Value']= data3['Class_Value'].map(newclass);
data3['Buying']= data3['Buying'].map(newbuying);
data3['Maintenance']= data3['Maintenance'].map(newmaintenance);
#data3['Doors']= data2['Doors'].map(newdoors);
#data3['Persons']= data3['Persons'].map(newpersons);
data3['Safety']= data3['Safety'].map(newsafety);
data3['Lug_Boot']= data3['Lug_Boot'].map(newlugboot);
#Using an if condition for the DOORS and the Persons columns
for x in range (0, len(data2["Doors"])):
if data2['Doors'][x]=='5more':
data3['Doors'][x]=5
if data2['Persons'][x]=='more':
data3["Persons"][x]=5
#Print the data3 dataframe
print(data3)
#Count the NaN Values
data3.isnull().sum()
data3['Doors'].head()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
import numpy as np
# Loading CSV File here
df = pd.read_csv('https://raw.githubusercontent.com/TheJoys2019/DS-Unit-1-Sprint-1-Dealing-With-Data/master/LA%20Citations%2010k.csv')
# Using head to test if file uploaded properly, looks good here
df.head()
# Finding out how many rows/columns
df.shape
# Trying to find any NaN values
df.isna().sum()
# Trying to see if we have any null values.
df.isnull().sum()
# Based on the analysis above, due to the extremely high value of NaN values,
# it's in our best interest to remove those from the dataset.
df = df.drop(["Meter Id", "Marked Time", "VIN", "Plate Expiry Date", "Make", "Body Style", "Color", "Route", "Fine amount"], axis=1)
df.isnull().sum()
df.head(15)
df = df[~(df.isnull().any(axis=1))]
print(df.shape)
df.head()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 9994 entries, 0 to 9998
Data columns (total 10 columns):
Ticket number 9994 non-null int64
Issue Date 9994 non-null object
Issue time 9994 non-null float64
RP State Plate 9994 non-null object
Location 9994 non-null object
Agency 9994 non-null int64
Violation code 9994 non-null object
Violation Description 9994 non-null object
Latitude 9994 non-null float64
Longitude 9994 non-null float64
dtypes: float64(3), int64(2), object(5)
memory usage: 858.9+ KB
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
b_cancer = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data
import pandas as pd
bc_data = pd.read_csv(b_cancer, header=None)
bc_data.head()
bc_data.count()
bc_data.isna().sum() # Data is very clean. Nothing is missing.
'''
Attribute Information:
1. Sample code number id number
2. Clump Thickness 1 - 10
3. Uniformity of Cell Size 1 - 10
4. Uniformity of Cell Shape 1 - 10
5. Marginal Adhesion 1 - 10
6. Single Epithelial Cell Size 1 - 10
7. Bare Nuclei 1 - 10
8. Bland Chromatin 1 - 10
9. Normal Nucleoli 1 - 10
10. Mitoses 1 - 10
11. Class: 2 for benign, 4 for malignant
'''
col_names = ['code_number', 'clump_thickness',
'cell_size_uniformity', 'cell_shape_uniformity',
'marginal_adhesion', 'single_ep',
'bare_nuclei', 'bland_chromatin',
'normal_nucleoli', 'mitoses',
'class']
bc_data = pd.read_csv(b_cancer, header=None, names=col_names)
bc_data.head()
bc_data.isna().sum().sum() # Verifying that no data is missing.
income_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
income_data = pd.read_csv(income_url, header=None)
income_data.head()
'''
Attribute Information:
age: continuous.
workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
fnlwgt: continuous.
education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
education-num: continuous.
marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
sex: Female, Male.
capital-gain: continuous.
capital-loss: continuous.
hours-per-week: continuous.
native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany,
Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras,
Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France,
Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua,
Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
'''
income_col_names = ['age', 'work_class', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'sex',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income']
income_data = pd.read_csv(income_url, header=None, names=income_col_names)
income_data.head()
income_data.isna().sum()
income_data.iloc[14]
income_data = pd.read_csv(income_url, header=None, names=income_col_names, na_values=[' ?'])
income_data.isna().sum().sum()
income_data.head(15)
import numpy as np
income_data = pd.read_csv(income_url, header=None, names=income_col_names)
income_data.replace(' ?', np.nan, inplace=True)
income_data.isna().sum().sum()
income_data.isna().sum()
income_data.info()
income_data.shape
income_data_dropped = income_data.dropna()
income_data_dropped.shape
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
import requests
dog_api_response = requests.get('https://dog.ceo/api/breeds/image/random')
print(dog_api_response.status_code)
print(dog_api_response.content)
###Output
b'{"status":"success","message":"https:\\/\\/images.dog.ceo\\/breeds\\/pembroke\\/n02113023_1380.jpg"}'
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/index.php- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/soybean/soybean-large.names
# Use attribute information to get class names
soy_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/soybean/soybean-large.data'
soy = pd.read_csv(soy_data_url, header = None)
columns = [ 'class-name','date','plant-stand','precip','temp','hail','crop-hist','area-damaged','severity','seed-tmt',
'germination','plant-growth','leaves','leafspots-halo','leafspots-marg','leafspot-size','leaf-shread',
'leaf-malf','leaf-mild','stem','lodging','stem-cankers','canker-lesion','fruiting-bodies','external decay',
'mycelium','int-discolor','sclerotia','fruit-pods','fruit spots','seed','mold-growth','seed-discolor',
'seed-size','shriveling','roots' ]
soy.columns = columns
print(soy.head())
print(soy.shape)
soy.isna().sum()
# From looking at the data, we see that NA values are coded as '?'.
# Replace these values with NaN
soy_with_nas = soy.replace('?', np.nan)
soy_with_nas.isna().sum()
# To see more rows when you print dataframes
pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
print(soy_with_nas.head(400))
soy_with_nas.dtypes
# Looks like everything here is an object (categorical), except for leaves which is a number.
# Let's look at the data of leaves to see if it is lopsided.
# If it's lopsided, we can use median to replace missing values, if not, we can use mean.
print('Values unique in leaves column:', soy_with_nas.leaves.unique()) # Print all unique values of a column
plt.hist(soy_with_nas.leaves, 2)
soy_with_nas.leaves
# Since there are only two unqiue values in leaves, that column is categorical as well.
# Maximum missing values are 41 out of 307 rows (13%).
# We will replace all values with the median value for each column.
soy_temp = soy_with_nas.copy()
print('Shape of dataframe before dropping rows:', soy_temp.shape)
soy_after_dropna_rows = soy_temp.dropna()
print('Shape of dataframe after dropping rows:', soy_after_dropna_rows.shape)
soy_temp = soy_with_nas.copy()
print('Shape of dataframe before dropping cols:', soy_temp.shape)
soy_after_dropna_cols = soy_temp.dropna(axis = 1)
print('Shape of dataframe after dropping cols:', soy_after_dropna_cols.shape)
# So we only have 2 columns left after dropping columns - won't work !!
# With rows, we still lost 13% of rows.
# Let's replace missing values with the median of each column.
def replace_missing(df):
df_copy = df.copy()
for col in df.columns.values:
df_copy[col].fillna(df[col].mode()[0], inplace = True)
return df_copy
soy_after_replacement = replace_missing(soy_with_nas)
print('Number of NAs after replacement:', soy_after_replacement.isna().sum().sum())
print(soy_after_replacement.head(400))
###Output
Number of NAs after replacement: 0
class-name date plant-stand precip temp hail crop-hist area-damaged severity seed-tmt germination plant-growth leaves leafspots-halo leafspots-marg leafspot-size leaf-shread leaf-malf leaf-mild stem lodging stem-cankers canker-lesion fruiting-bodies external decay mycelium int-discolor sclerotia fruit-pods fruit spots seed mold-growth seed-discolor seed-size shriveling roots
0 diaporthe-stem-canker 6 0 2 1 0 1 1 1 0 0 1 1 0 2 2 0 0 0 1 1 3 1 1 1 0 0 0 0 4 0 0 0 0 0 0
1 diaporthe-stem-canker 4 0 2 1 0 2 0 2 1 1 1 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 0 4 0 0 0 0 0 0
2 diaporthe-stem-canker 3 0 2 1 0 1 0 2 1 2 1 1 0 2 2 0 0 0 1 0 3 0 1 1 0 0 0 0 4 0 0 0 0 0 0
3 diaporthe-stem-canker 3 0 2 1 0 1 0 2 0 1 1 1 0 2 2 0 0 0 1 0 3 0 1 1 0 0 0 0 4 0 0 0 0 0 0
4 diaporthe-stem-canker 6 0 2 1 0 2 0 1 0 2 1 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 0 4 0 0 0 0 0 0
5 diaporthe-stem-canker 5 0 2 1 0 3 0 1 0 1 1 1 0 2 2 0 0 0 1 0 3 0 1 1 0 0 0 0 4 0 0 0 0 0 0
6 diaporthe-stem-canker 5 0 2 1 0 2 0 1 1 0 1 1 0 2 2 0 0 0 1 1 3 1 1 1 0 0 0 0 4 0 0 0 0 0 0
7 diaporthe-stem-canker 4 0 2 1 1 1 0 1 0 2 1 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 0 4 0 0 0 0 0 0
8 diaporthe-stem-canker 6 0 2 1 0 3 0 1 1 1 1 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 0 4 0 0 0 0 0 0
9 diaporthe-stem-canker 4 0 2 1 0 2 0 2 0 2 1 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 0 4 0 0 0 0 0 0
10 charcoal-rot 6 0 0 2 0 1 3 1 1 0 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
11 charcoal-rot 4 0 0 1 1 1 3 1 1 1 1 1 0 2 2 0 0 0 1 1 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
12 charcoal-rot 3 0 0 1 0 1 2 1 0 0 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
13 charcoal-rot 6 0 0 1 1 3 3 1 1 0 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
14 charcoal-rot 6 0 0 2 0 1 3 1 1 1 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
15 charcoal-rot 5 0 0 2 1 3 3 1 1 2 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
16 charcoal-rot 6 0 0 2 1 0 2 1 0 0 1 1 0 2 2 0 0 0 1 1 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
17 charcoal-rot 4 0 0 1 0 2 2 1 0 1 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
18 charcoal-rot 3 0 0 2 0 2 2 1 0 2 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
19 charcoal-rot 5 0 0 2 1 2 2 1 0 2 1 1 0 2 2 0 0 0 1 0 0 3 0 0 0 2 1 0 4 0 0 0 0 0 0
20 rhizoctonia-root-rot 1 1 2 0 0 2 1 2 0 2 1 0 0 2 2 0 0 0 1 0 1 1 0 1 1 0 0 3 4 0 0 0 0 0 0
21 rhizoctonia-root-rot 1 1 2 0 0 1 1 2 0 1 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
22 rhizoctonia-root-rot 3 0 2 0 1 3 1 2 0 1 1 0 0 2 2 0 0 0 1 1 1 1 0 1 1 0 0 3 4 0 0 0 0 0 0
23 rhizoctonia-root-rot 0 1 2 0 0 0 1 1 1 2 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
24 rhizoctonia-root-rot 0 1 2 0 0 1 1 2 1 2 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
25 rhizoctonia-root-rot 1 1 2 0 0 3 1 2 0 2 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
26 rhizoctonia-root-rot 1 1 2 0 0 0 1 1 0 1 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
27 rhizoctonia-root-rot 2 1 2 0 0 2 1 1 0 1 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
28 rhizoctonia-root-rot 1 1 2 0 0 1 1 2 0 2 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
29 rhizoctonia-root-rot 2 1 2 0 0 1 1 2 0 2 1 0 0 2 2 0 0 0 1 0 1 1 0 1 0 0 0 3 4 0 0 0 0 0 0
30 phytophthora-rot 0 1 2 1 1 1 1 1 0 0 1 1 0 2 2 0 0 0 1 0 1 2 0 1 0 0 0 3 4 0 0 0 0 0 0
31 phytophthora-rot 1 1 2 1 0 3 1 1 0 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
32 phytophthora-rot 2 1 2 2 0 2 1 1 0 1 1 1 2 0 1 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
33 phytophthora-rot 1 1 2 0 0 2 1 2 1 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 3 4 0 0 0 0 0 0
34 phytophthora-rot 2 1 2 2 0 2 1 1 0 1 1 1 2 0 1 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
35 phytophthora-rot 3 1 2 1 0 2 1 1 0 1 1 1 2 0 1 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
36 phytophthora-rot 0 1 1 1 0 1 1 1 0 0 1 1 0 2 2 0 0 0 1 0 1 2 0 0 0 0 0 3 4 0 0 0 0 0 0
37 phytophthora-rot 3 1 2 0 0 2 1 2 1 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 3 4 0 0 0 0 0 0
38 phytophthora-rot 2 1 1 1 0 0 1 1 0 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
39 phytophthora-rot 2 1 2 0 0 1 1 2 0 1 1 1 0 2 2 0 0 0 1 0 1 2 0 0 0 0 0 3 4 0 0 0 0 0 0
40 phytophthora-rot 2 1 2 1 0 1 1 1 0 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
41 phytophthora-rot 1 1 2 1 0 1 1 1 0 1 1 1 2 0 1 0 0 0 1 0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 1
42 phytophthora-rot 0 1 2 1 0 3 1 1 0 0 1 1 0 2 2 0 0 0 1 0 1 2 0 0 0 0 0 3 4 0 0 0 0 0 0
43 phytophthora-rot 0 1 1 1 1 2 1 2 1 0 1 1 0 2 2 0 0 0 1 1 2 2 0 1 0 0 0 3 4 0 0 0 0 0 0
44 phytophthora-rot 3 1 2 0 0 1 1 2 1 0 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 3 4 0 0 0 0 0 0
45 phytophthora-rot 2 1 2 2 0 3 1 1 0 1 1 1 2 0 1 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
46 phytophthora-rot 0 1 2 1 0 2 1 1 0 1 1 1 0 2 2 0 0 0 1 0 1 2 0 0 0 0 0 3 4 0 0 0 0 0 0
47 phytophthora-rot 2 1 1 2 0 2 1 1 0 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
48 phytophthora-rot 2 1 2 1 1 1 1 2 0 2 1 1 0 2 2 0 0 0 1 0 1 2 0 1 0 0 0 3 4 0 0 0 0 0 0
49 phytophthora-rot 0 1 2 1 0 3 1 1 0 2 1 1 0 2 2 0 0 0 1 0 1 2 0 0 0 0 0 3 4 0 0 0 0 0 0
50 phytophthora-rot 1 1 2 1 0 0 1 2 1 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 3 4 0 0 0 0 0 0
51 phytophthora-rot 1 1 2 1 0 0 1 1 0 1 1 1 0 2 2 0 0 0 1 0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 1
52 phytophthora-rot 3 1 2 1 0 1 1 1 0 1 1 1 2 0 1 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
53 phytophthora-rot 2 1 2 1 0 1 1 1 0 1 1 1 0 2 2 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
54 phytophthora-rot 3 1 2 2 0 2 1 1 0 1 1 1 2 0 1 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
55 phytophthora-rot 1 1 2 1 1 3 1 2 0 1 1 1 0 2 2 0 0 0 1 1 1 2 0 1 0 0 0 3 4 0 0 0 0 0 0
56 phytophthora-rot 3 1 1 1 0 3 1 1 0 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
57 phytophthora-rot 2 1 2 2 0 1 1 1 0 1 1 1 2 0 1 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
58 phytophthora-rot 3 1 1 2 0 2 1 1 0 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
59 phytophthora-rot 1 1 2 2 0 1 1 1 0 1 1 1 2 0 1 0 0 0 1 0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 1
60 phytophthora-rot 2 1 2 2 0 3 1 1 0 1 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
61 phytophthora-rot 3 1 1 1 0 0 1 1 0 1 1 1 0 2 2 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
62 phytophthora-rot 2 1 2 0 0 1 1 2 0 0 1 1 0 2 2 0 0 0 1 0 1 2 0 0 0 0 0 3 4 0 0 0 0 0 0
63 phytophthora-rot 3 1 1 1 0 1 1 1 0 1 1 1 0 2 2 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
64 phytophthora-rot 2 1 2 2 0 1 1 1 0 1 1 1 2 0 1 0 0 0 1 0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 1
65 phytophthora-rot 1 1 2 0 0 0 1 2 1 0 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 3 4 0 0 0 0 0 0
66 phytophthora-rot 3 1 2 1 0 2 1 1 0 1 1 1 2 0 1 0 0 0 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 1
67 phytophthora-rot 3 1 2 1 0 3 1 1 0 1 1 1 2 0 1 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
68 phytophthora-rot 3 1 1 0 0 2 1 2 1 2 1 1 0 2 2 0 0 0 1 0 2 2 0 0 0 0 0 3 4 0 0 0 0 0 0
69 phytophthora-rot 3 1 2 2 0 2 1 1 0 1 1 1 2 0 1 0 0 0 1 0 3 2 0 0 0 0 0 0 0 0 0 0 0 0 1
70 brown-stem-rot 4 0 0 1 0 1 3 1 1 2 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
71 brown-stem-rot 4 0 0 1 0 1 3 1 1 2 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
72 brown-stem-rot 3 1 0 0 0 3 0 1 1 2 1 0 0 2 2 0 0 0 1 0 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
73 brown-stem-rot 5 0 0 2 0 1 3 1 1 2 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
74 brown-stem-rot 5 0 0 2 0 2 3 1 1 1 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
75 brown-stem-rot 4 0 0 1 0 3 2 1 0 1 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
76 brown-stem-rot 5 0 0 1 1 3 3 1 0 2 1 1 2 0 1 0 0 0 1 1 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
77 brown-stem-rot 6 0 1 1 1 2 0 1 1 0 1 0 0 2 1 0 0 0 1 1 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
78 brown-stem-rot 5 1 0 0 0 3 2 1 0 0 1 1 0 2 2 0 0 0 1 1 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
79 brown-stem-rot 5 1 0 1 0 1 3 1 1 0 1 1 2 0 1 0 0 0 1 1 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
80 brown-stem-rot 4 0 1 0 1 2 3 1 1 2 1 0 0 2 2 0 0 0 1 1 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
81 brown-stem-rot 4 1 0 0 0 3 2 1 1 1 1 1 2 0 1 0 0 0 1 1 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
82 brown-stem-rot 4 0 0 1 0 2 0 1 1 0 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
83 brown-stem-rot 3 1 0 0 0 2 0 2 0 1 1 1 2 0 1 0 0 0 1 0 0 3 0 0 0 1 0 0 4 0 0 0 0 0 0
84 brown-stem-rot 5 0 0 1 0 3 2 1 0 1 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
85 brown-stem-rot 4 0 0 1 0 3 3 1 1 0 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
86 brown-stem-rot 4 0 0 1 0 3 2 1 0 2 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
87 brown-stem-rot 4 0 0 1 0 1 2 1 1 2 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
88 brown-stem-rot 4 0 0 1 0 1 2 1 0 0 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
89 brown-stem-rot 3 0 0 1 0 3 2 1 0 0 0 1 0 2 2 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
90 powdery-mildew 5 0 0 1 1 3 3 1 0 1 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
91 powdery-mildew 6 0 1 0 1 0 0 0 1 2 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
92 powdery-mildew 1 1 0 1 0 3 3 1 2 0 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
93 powdery-mildew 6 1 1 0 0 2 2 0 1 2 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
94 powdery-mildew 4 1 1 0 0 2 2 0 2 0 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
95 powdery-mildew 6 0 0 1 1 1 1 1 0 2 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
96 powdery-mildew 2 1 1 0 0 2 2 0 0 1 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
97 powdery-mildew 6 1 0 1 0 1 1 1 1 2 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
98 powdery-mildew 5 1 0 1 0 1 1 1 0 1 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
99 powdery-mildew 1 1 0 1 0 1 1 1 2 0 0 1 0 2 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
100 downy-mildew 6 0 2 0 1 2 1 0 1 2 0 1 2 0 1 0 1 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
101 downy-mildew 2 0 2 1 1 1 1 1 1 2 0 1 2 0 1 0 1 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
102 downy-mildew 1 0 2 1 1 3 2 1 0 1 0 1 1 0 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
103 downy-mildew 4 1 2 2 0 2 2 1 0 1 0 1 1 0 1 0 1 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
104 downy-mildew 1 0 2 0 1 0 0 1 0 1 0 1 1 0 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
105 downy-mildew 2 1 2 0 0 3 0 1 0 1 0 1 1 0 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
106 downy-mildew 2 1 2 1 0 2 0 1 0 1 0 1 2 0 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
107 downy-mildew 4 1 2 2 0 2 1 0 1 2 0 1 1 0 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
108 downy-mildew 4 1 2 0 0 1 2 1 0 1 0 1 2 0 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
109 downy-mildew 5 1 2 1 0 3 2 1 0 1 0 1 1 0 1 0 0 2 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0
110 brown-spot 1 1 2 2 1 3 3 1 0 2 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
111 brown-spot 2 0 2 1 0 2 3 1 1 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
112 brown-spot 2 0 2 1 0 2 3 1 1 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
113 brown-spot 2 0 2 1 0 1 0 1 2 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
114 brown-spot 1 1 2 2 1 3 3 1 1 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
115 brown-spot 1 1 2 1 0 2 3 1 0 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
116 brown-spot 0 1 2 2 1 3 3 1 2 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
117 brown-spot 2 0 2 1 0 2 3 1 0 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
118 brown-spot 1 0 2 1 0 2 3 1 1 1 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
119 brown-spot 2 1 2 1 0 3 3 1 0 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
120 brown-spot 5 0 2 1 0 2 2 1 0 1 0 1 2 0 1 0 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
121 brown-spot 1 1 2 1 0 3 3 1 1 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
122 brown-spot 1 0 2 1 0 3 3 1 2 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
123 brown-spot 4 0 2 1 0 1 3 1 0 0 0 1 2 0 1 1 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
124 brown-spot 1 0 2 1 0 2 3 1 0 1 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
125 brown-spot 4 1 2 1 0 3 3 1 0 2 0 1 2 0 1 0 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
126 brown-spot 2 0 2 1 0 3 3 1 0 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
127 brown-spot 0 1 1 1 1 2 2 0 2 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
128 brown-spot 1 1 1 1 1 2 0 0 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
129 brown-spot 1 1 2 1 0 1 0 1 2 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
130 brown-spot 1 0 2 1 0 1 3 1 0 0 0 1 2 0 1 0 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
131 brown-spot 2 0 2 1 0 3 3 1 1 1 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
132 brown-spot 3 0 2 1 0 2 3 2 2 1 0 1 2 0 1 0 0 0 1 0 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0
133 brown-spot 2 1 2 2 1 3 1 1 1 0 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
134 brown-spot 1 0 2 1 0 2 3 1 2 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
135 brown-spot 1 1 2 1 0 2 3 1 0 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
136 brown-spot 5 0 2 1 0 1 3 1 0 0 0 1 2 0 1 1 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
137 brown-spot 4 1 1 1 1 2 2 0 0 2 0 1 2 0 1 0 0 0 1 0 3 1 1 0 0 0 0 0 1 0 0 0 0 0 0
138 brown-spot 3 1 2 1 0 1 3 1 0 2 0 1 2 0 1 1 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
139 brown-spot 1 0 2 1 0 3 3 1 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
140 brown-spot 4 0 2 1 0 2 3 2 1 1 0 1 2 0 1 0 0 0 1 0 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0
141 brown-spot 2 1 2 1 0 2 3 1 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
142 brown-spot 2 1 1 1 1 0 0 1 1 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
143 brown-spot 3 1 2 1 0 3 1 1 0 2 0 1 2 0 1 0 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
144 brown-spot 3 0 2 1 0 3 3 2 2 0 0 1 2 0 1 0 0 0 1 0 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0
145 brown-spot 2 0 2 1 0 2 2 1 0 1 0 1 2 0 1 0 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
146 brown-spot 3 0 2 1 0 3 1 1 0 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
147 brown-spot 3 1 2 1 0 3 1 1 0 2 0 1 2 0 1 1 0 0 1 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0 0
148 brown-spot 2 1 2 1 0 3 3 2 2 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
149 brown-spot 5 1 2 1 0 3 3 2 0 2 0 1 2 0 1 0 0 0 1 0 0 3 1 1 0 0 0 0 0 0 0 0 0 0 0
150 bacterial-blight 5 0 2 1 1 3 3 1 1 0 0 1 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
151 bacterial-blight 4 0 2 2 1 2 3 1 1 1 0 1 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
152 bacterial-blight 2 0 1 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
153 bacterial-blight 3 0 1 1 0 1 2 0 0 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
154 bacterial-blight 3 0 1 1 0 3 2 0 0 0 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
155 bacterial-blight 3 0 2 1 1 2 1 1 1 0 0 1 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
156 bacterial-blight 3 0 1 1 0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
157 bacterial-blight 4 0 2 1 1 0 3 1 1 1 0 1 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
158 bacterial-blight 2 0 1 1 0 3 1 0 0 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
159 bacterial-blight 4 1 2 2 1 2 1 1 1 2 0 1 2 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
160 bacterial-pustule 2 1 1 2 0 2 2 0 0 2 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2
161 bacterial-pustule 3 0 2 0 1 2 3 1 1 1 1 1 2 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0
162 bacterial-pustule 2 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
163 bacterial-pustule 4 1 2 1 0 3 0 1 0 2 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1
164 bacterial-pustule 3 0 2 1 1 1 1 1 0 1 0 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1
165 bacterial-pustule 3 1 1 0 0 2 0 0 0 2 0 1 2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
166 bacterial-pustule 3 0 1 1 1 2 3 0 0 1 1 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0
167 bacterial-pustule 3 1 2 1 0 0 2 1 0 2 0 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1
168 bacterial-pustule 4 0 1 1 1 1 3 0 0 1 0 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0
169 bacterial-pustule 5 1 1 1 0 2 0 0 1 2 0 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
170 purple-seed-stain 6 0 2 0 1 2 2 0 0 0 0 0 0 2 2 0 0 0 1 1 0 3 0 0 0 0 0 1 1 1 0 1 0 0 0
171 purple-seed-stain 6 0 2 0 0 2 2 0 1 1 0 1 2 0 0 0 0 0 1 0 0 3 0 0 0 0 0 1 1 1 0 1 0 0 0
172 purple-seed-stain 4 0 2 1 1 1 1 0 1 2 0 0 0 2 2 0 0 0 0 0 0 3 0 0 0 0 0 0 0 1 0 1 0 0 0
173 purple-seed-stain 4 0 2 1 1 0 0 0 0 1 0 1 2 0 0 0 0 0 0 1 0 3 0 0 0 0 0 0 0 1 0 1 0 0 0
174 purple-seed-stain 4 0 2 0 0 0 0 0 0 2 0 1 2 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 1 0 1 0 0 0
175 purple-seed-stain 6 0 2 2 0 2 2 0 0 1 0 1 2 0 0 0 0 0 1 0 0 3 0 0 0 0 0 1 1 1 0 1 0 0 0
176 purple-seed-stain 3 0 2 0 1 0 0 0 0 1 0 0 0 2 2 0 0 0 0 1 0 3 0 0 0 0 0 0 0 1 0 1 0 0 0
177 purple-seed-stain 3 0 2 1 1 3 3 0 1 1 0 0 0 2 2 0 0 0 0 0 0 3 0 0 0 0 0 0 0 1 0 1 0 0 0
178 purple-seed-stain 5 0 2 1 0 1 1 0 0 0 0 1 2 0 0 0 0 0 1 0 0 3 0 0 0 0 0 1 1 1 0 1 0 0 0
179 purple-seed-stain 4 0 2 1 0 0 0 0 1 1 0 0 0 2 2 0 0 0 1 0 0 3 0 0 0 0 0 0 0 1 0 1 0 0 0
180 anthracnose 5 1 2 1 0 3 3 1 1 0 0 0 0 2 2 0 0 0 1 0 3 2 0 0 0 0 0 1 2 0 0 0 0 0 0
181 anthracnose 5 1 2 2 1 2 2 0 1 2 0 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 1 2 1 1 1 0 1 0
182 anthracnose 6 0 2 1 0 1 1 1 1 1 0 0 0 2 2 0 0 0 1 0 3 2 1 0 0 0 0 1 2 1 1 0 1 1 0
183 anthracnose 2 1 2 2 1 0 0 1 0 0 1 1 0 2 2 0 0 0 1 0 2 1 0 1 0 0 0 0 0 0 0 0 0 0 0
184 anthracnose 3 0 2 1 0 3 3 1 0 0 1 1 0 2 2 0 0 0 1 0 3 2 1 1 0 0 0 1 2 1 1 1 0 0 0
185 anthracnose 4 1 2 2 1 2 2 1 0 1 1 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 1 2 0 0 0 0 0 0
186 anthracnose 6 0 2 1 0 2 2 1 0 1 0 0 0 2 2 0 0 0 1 0 3 2 1 0 0 0 0 1 2 1 1 0 1 1 0
187 anthracnose 1 0 2 1 0 1 1 1 1 1 1 1 0 2 2 0 0 0 1 0 2 2 0 1 0 0 0 0 0 1 0 1 0 0 0
188 anthracnose 6 1 2 1 0 2 2 1 1 1 0 0 0 2 2 0 0 0 1 0 3 2 1 0 0 0 0 1 2 1 1 0 1 1 0
189 anthracnose 5 0 2 1 0 1 1 1 2 2 1 1 0 2 2 0 0 0 1 0 3 2 1 1 0 0 0 1 2 1 1 1 0 0 0
190 anthracnose 5 1 2 2 1 3 3 0 1 2 1 1 0 2 2 0 0 0 1 1 3 2 1 1 0 0 0 1 2 0 0 0 0 0 0
191 anthracnose 0 0 2 1 0 3 3 1 1 2 1 1 0 2 2 0 0 0 1 0 2 2 0 1 0 0 0 0 0 1 1 0 1 0 0
192 anthracnose 6 0 2 1 0 2 2 0 0 0 1 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 1 2 1 0 1 1 1 0
193 anthracnose 5 1 2 1 0 1 1 1 0 1 0 0 0 2 2 0 0 0 1 0 3 2 0 0 0 0 0 1 2 1 1 0 1 1 0
194 anthracnose 5 0 2 1 0 2 2 1 0 2 0 0 0 2 2 0 0 0 1 0 3 2 1 0 0 0 0 1 2 0 0 0 0 0 0
195 anthracnose 6 1 2 2 1 0 0 1 0 1 0 1 0 2 2 0 0 0 1 0 3 1 1 1 0 0 0 1 2 0 0 0 0 0 0
196 anthracnose 5 0 2 1 0 1 1 1 0 0 0 0 0 2 2 0 0 0 1 0 3 2 1 0 0 0 0 1 2 1 1 0 1 1 0
197 anthracnose 6 1 2 2 1 3 3 0 2 1 0 1 0 2 2 0 0 0 1 1 3 2 1 1 0 0 0 1 2 0 0 0 0 0 0
198 anthracnose 5 1 2 1 0 3 3 1 0 1 0 0 0 2 2 0 0 0 1 0 3 2 0 0 0 0 0 1 2 1 1 0 1 1 0
199 anthracnose 5 1 2 1 0 2 2 1 1 1 0 0 0 2 2 0 0 0 1 0 3 2 0 0 0 0 0 1 2 1 1 0 1 1 0
200 phyllosticta-leaf-spot 3 1 1 1 0 0 2 0 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
201 phyllosticta-leaf-spot 3 0 0 1 1 0 2 0 0 1 0 1 2 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
202 phyllosticta-leaf-spot 3 1 1 1 0 0 0 0 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
203 phyllosticta-leaf-spot 3 0 0 1 1 2 0 0 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
204 phyllosticta-leaf-spot 3 1 1 2 0 3 2 0 1 1 0 1 2 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
205 phyllosticta-leaf-spot 2 0 0 1 1 0 3 0 2 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
206 phyllosticta-leaf-spot 1 0 0 2 1 3 2 1 1 1 0 1 2 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
207 phyllosticta-leaf-spot 2 1 1 1 0 2 2 1 1 1 0 1 2 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
208 phyllosticta-leaf-spot 2 0 0 2 1 3 0 1 1 0 1 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
209 phyllosticta-leaf-spot 2 1 1 2 0 3 3 0 2 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
210 alternarialeaf-spot 4 1 2 1 0 1 1 1 1 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
211 alternarialeaf-spot 4 0 1 1 0 3 3 1 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
212 alternarialeaf-spot 3 0 2 1 0 0 0 1 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
213 alternarialeaf-spot 6 0 2 2 0 3 3 0 1 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
214 alternarialeaf-spot 6 0 1 1 1 2 2 0 2 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0
215 alternarialeaf-spot 5 0 2 2 0 3 3 1 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
216 alternarialeaf-spot 6 0 1 1 0 3 3 0 1 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
217 alternarialeaf-spot 5 1 2 2 0 3 1 0 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
218 alternarialeaf-spot 6 0 2 2 0 3 3 0 1 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
219 alternarialeaf-spot 6 0 2 2 0 3 2 1 1 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
220 alternarialeaf-spot 5 0 2 2 0 2 3 0 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
221 alternarialeaf-spot 4 1 2 1 0 3 0 1 1 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
222 alternarialeaf-spot 6 0 2 1 0 1 1 0 1 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
223 alternarialeaf-spot 5 0 2 2 0 2 2 1 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
224 alternarialeaf-spot 5 1 2 1 0 0 0 0 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
225 alternarialeaf-spot 4 0 2 1 0 2 2 0 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
226 alternarialeaf-spot 4 0 2 1 0 1 1 1 1 1 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
227 alternarialeaf-spot 5 0 2 1 0 2 1 0 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
228 alternarialeaf-spot 6 0 2 2 0 3 2 0 1 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
229 alternarialeaf-spot 6 1 2 2 0 1 1 0 1 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
230 alternarialeaf-spot 5 1 2 2 0 3 1 1 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
231 alternarialeaf-spot 5 1 2 2 0 3 3 1 1 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
232 alternarialeaf-spot 4 1 2 1 0 2 1 0 0 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
233 alternarialeaf-spot 6 1 1 2 0 2 2 0 2 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0
234 alternarialeaf-spot 4 1 2 1 0 1 2 1 1 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
235 alternarialeaf-spot 6 1 2 2 0 2 1 0 1 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
236 alternarialeaf-spot 4 1 2 1 0 0 3 0 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
237 alternarialeaf-spot 4 0 2 2 0 3 3 1 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
238 alternarialeaf-spot 5 0 2 2 0 2 3 1 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
239 alternarialeaf-spot 3 0 2 1 0 0 0 1 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
240 alternarialeaf-spot 5 0 2 1 0 1 2 0 1 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
241 alternarialeaf-spot 5 0 2 2 0 1 1 0 1 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
242 alternarialeaf-spot 4 0 2 2 0 1 1 1 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
243 alternarialeaf-spot 5 1 2 1 0 3 3 0 1 2 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
244 alternarialeaf-spot 6 0 2 1 0 2 1 0 0 1 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
245 alternarialeaf-spot 5 0 2 1 0 0 3 0 0 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
246 alternarialeaf-spot 6 0 2 1 0 0 3 0 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
247 alternarialeaf-spot 5 1 2 2 0 2 1 1 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
248 alternarialeaf-spot 5 0 2 1 0 3 0 0 1 0 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
249 alternarialeaf-spot 6 0 2 1 0 1 2 0 1 1 0 1 2 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
250 frog-eye-leaf-spot 6 0 1 2 0 3 3 0 0 0 0 1 2 0 1 0 0 0 1 0 3 2 1 1 0 0 0 1 2 1 0 0 0 0 0
251 frog-eye-leaf-spot 4 0 1 2 0 1 1 0 1 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
252 frog-eye-leaf-spot 5 0 1 1 0 2 1 0 0 0 0 1 2 0 1 0 0 0 1 0 3 1 0 1 0 0 0 0 0 0 0 0 0 0 0
253 frog-eye-leaf-spot 5 1 2 1 0 3 2 0 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
254 frog-eye-leaf-spot 6 1 2 2 0 3 3 0 1 2 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
255 frog-eye-leaf-spot 4 0 1 1 0 3 3 1 1 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
256 frog-eye-leaf-spot 3 0 2 1 0 2 3 0 1 1 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
257 frog-eye-leaf-spot 5 0 2 2 0 2 2 0 0 1 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
258 frog-eye-leaf-spot 5 0 2 1 0 1 1 1 1 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
259 frog-eye-leaf-spot 5 0 2 2 0 2 3 0 1 1 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
260 frog-eye-leaf-spot 5 0 2 1 0 0 1 0 1 1 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
261 frog-eye-leaf-spot 4 0 2 1 0 2 3 0 1 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
262 frog-eye-leaf-spot 4 0 2 2 0 1 1 1 1 0 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
263 frog-eye-leaf-spot 4 0 2 1 0 2 1 1 1 1 0 1 2 0 1 0 0 0 1 0 3 1 0 1 0 0 0 1 1 0 0 0 0 0 0
264 frog-eye-leaf-spot 3 1 2 1 0 3 2 1 0 2 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
265 frog-eye-leaf-spot 5 0 2 1 0 3 0 1 0 1 0 1 2 0 1 0 0 0 1 0 3 1 0 1 0 0 0 1 1 0 0 0 0 0 0
266 frog-eye-leaf-spot 5 0 2 2 0 1 1 0 1 0 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
267 frog-eye-leaf-spot 4 0 2 2 0 1 2 1 0 0 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
268 frog-eye-leaf-spot 5 0 2 2 0 2 1 0 1 1 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
269 frog-eye-leaf-spot 5 0 2 1 0 3 0 1 0 0 0 1 2 0 1 0 0 0 1 0 3 1 0 1 0 0 0 1 1 0 0 0 0 0 0
270 frog-eye-leaf-spot 3 0 2 1 0 1 2 1 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
271 frog-eye-leaf-spot 6 0 1 2 0 3 3 0 1 0 0 1 2 0 1 0 0 0 1 0 3 2 1 0 0 0 0 1 2 1 0 1 1 1 0
272 frog-eye-leaf-spot 5 0 1 1 0 1 3 1 2 0 0 1 2 0 1 0 0 0 1 0 3 0 1 0 0 0 0 1 1 0 0 0 0 0 0
273 frog-eye-leaf-spot 5 0 2 1 0 3 2 1 0 0 0 1 2 0 1 0 0 0 1 0 3 1 0 1 0 0 0 1 1 0 0 0 0 0 0
274 frog-eye-leaf-spot 5 1 2 1 0 3 3 0 1 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
275 frog-eye-leaf-spot 3 1 2 1 0 3 0 1 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
276 frog-eye-leaf-spot 6 1 2 2 0 3 1 0 1 2 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
277 frog-eye-leaf-spot 4 0 2 1 0 1 2 1 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
278 frog-eye-leaf-spot 4 0 2 2 0 1 0 1 0 0 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
279 frog-eye-leaf-spot 6 1 2 2 0 3 0 0 0 2 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
280 frog-eye-leaf-spot 5 1 2 2 0 3 3 0 1 2 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
281 frog-eye-leaf-spot 4 0 2 1 0 0 1 1 1 0 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
282 frog-eye-leaf-spot 4 0 2 1 0 2 3 1 1 1 0 1 2 0 1 0 0 0 1 0 3 1 0 1 0 0 0 1 1 0 0 0 0 0 0
283 frog-eye-leaf-spot 4 1 1 2 0 1 1 0 2 2 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
284 frog-eye-leaf-spot 4 0 2 1 0 2 0 0 0 1 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
285 frog-eye-leaf-spot 5 1 2 1 0 1 2 1 0 2 0 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
286 frog-eye-leaf-spot 4 0 2 2 0 1 3 1 1 0 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
287 frog-eye-leaf-spot 5 0 2 1 0 1 2 0 0 0 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
288 frog-eye-leaf-spot 5 0 2 2 0 2 0 0 0 1 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
289 frog-eye-leaf-spot 5 1 2 1 0 2 3 0 1 2 0 1 2 0 1 0 0 0 1 0 3 2 0 1 0 0 0 1 1 0 0 0 0 0 0
290 diaporthe-pod-&-stem-blight 5 0 2 2 0 3 3 1 0 0 0 0 2 0 1 0 0 0 1 0 0 0 1 0 0 0 0 1 2 0 1 1 1 1 0
291 diaporthe-pod-&-stem-blight 6 0 2 2 0 2 3 1 0 1 0 0 2 0 1 0 0 0 1 0 0 0 1 0 0 0 0 1 2 1 1 1 1 1 0
292 diaporthe-pod-&-stem-blight 5 0 2 2 0 3 3 1 0 0 0 0 2 0 1 0 0 0 1 0 0 0 1 0 0 0 0 1 2 1 1 1 1 1 0
293 diaporthe-pod-&-stem-blight 1 1 1 2 0 3 0 1 0 2 0 0 2 0 1 0 0 0 1 0 0 0 1 0 0 0 0 1 2 0 1 1 1 1 0
294 diaporthe-pod-&-stem-blight 5 0 2 2 0 2 3 1 0 1 0 0 2 0 1 0 0 0 1 0 0 0 1 0 0 0 0 1 2 1 1 1 1 1 0
295 diaporthe-pod-&-stem-blight 5 0 2 2 0 2 3 1 0 0 0 0 2 0 1 0 0 0 1 0 0 0 1 0 0 0 0 1 2 1 1 1 1 1 0
296 cyst-nematode 2 0 2 1 0 2 1 1 0 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 2 0 1 0 0 1 0 2
297 cyst-nematode 3 0 2 1 0 3 2 1 0 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 2 0 1 0 0 1 0 2
298 cyst-nematode 4 0 2 1 0 3 2 1 0 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 2 0 1 0 0 1 0 2
299 cyst-nematode 3 0 2 1 0 2 1 1 0 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 2 0 1 0 0 1 0 2
300 cyst-nematode 3 0 2 1 0 2 1 1 0 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 2 0 1 0 0 1 0 2
301 cyst-nematode 4 0 2 1 0 2 1 1 0 1 1 1 2 0 1 0 0 0 0 0 0 0 0 0 0 0 0 2 0 1 0 0 1 0 2
302 2-4-d-injury 5 0 2 1 0 2 1 1 0 1 0 1 0 2 2 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
303 herbicide-injury 1 1 2 0 0 1 0 1 0 1 1 1 2 1 1 0 1 0 1 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 1
304 herbicide-injury 0 1 2 0 0 0 3 1 0 1 1 1 0 2 2 0 1 0 1 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 1
305 herbicide-injury 1 1 2 0 0 0 0 1 0 1 1 1 0 2 2 0 1 0 1 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 1
306 herbicide-injury 1 1 2 0 0 1 3 1 0 1 1 1 2 1 1 0 1 0 1 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 1
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
!pip install --upgrade google-cloud-language
# Imports the Google Cloud client library
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
# Instantiates a client
client = language.LanguageServiceClient()
# The text to analyze
text = u'Hello, world!'
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
# Detects the sentiment of the text
sentiment = client.analyze_sentiment(document=document).document_sentiment
print('Text: {}'.format(text))
print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude))
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header = None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
flag_data = pd.read_csv(flag_data_url, header = None)
###Output
_____no_output_____
###Markdown
From a local file
###Code
import os
os.getcwd()
df = pd.read_csv(r'/Users/admin/Documents/lambda_school/unit_1/DS-Unit-1-Sprint-1-Dealing-With-Data/module2-loadingdata/flag.data')
df
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
# Research this topic. May not need it.
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
###Output
/bin/sh: wget: command not found
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
adult_url = 'https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv'
adult = pd.read_csv(adult_url)
adult.head(15)
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
adult.isnull().sum()
# Output is no missing values, but data shows missing values as "?"
adult.iloc[14]
# Country for row 14 has '?'
import numpy as np
adult = adult.replace(" ?", np.NaN)
adult.iloc[14]
# Country for row 14 in now replaced with "NaN"
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
adult.describe()
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
adult.describe(exclude = 'number')
###Output
_____no_output_____
###Markdown
Look at Categorical Values
###Code
adult['marital-status'].value_counts()
adult['marital-status'].value_counts(normalize = True) # shows percentage
###Output
_____no_output_____
###Markdown
Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
adult['age'].hist();
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
adult['age'].plot.density();
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
adult.plot.scatter('age', 'hours-per-week');
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
# URL for dataset, horse-colic
colic_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/horse-colic/horse-colic.data'
# Download dataset
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/horse-colic/horse-colic.data
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
colic_data = pd.read_csv(colic_data_url, header=None)
colic_data.head()
# Exploring dataset some more, looking at shape
colic_data.shape
from google.colab import files
uploaded = files.upload()
df = pd.read_csv('horse-colic.data', header=None, names=['surgery?','Age',
'Hospital Number',
'rectal temperature',
'pulse',
'respiratory rate',
'temperature of extremities',
'peripheral pulse',
'mucous membranes',
'capillary refill time',
'pain',
'peristalsis',
'abdominal distension',
'nasogastric tube',
'nasogastric reflux',
'nasogastric reflux PH',
'rectal examination - feces',
'abdomen',
'packed cell volume',
'total protein',
'abdominocentesis appearance',
'abdomcentesis total protein',
'outcome',
'surgical lesion?',
'type of lesion ',
'type of lesion 2',
'type of lesion 3',
'cp_data'], sep=" ",
na_values=["?"])
# Below line of code was used for debugging purposes; I had some difficulty with the CSV format.
#df = pd.read_csv('horse-colic.data', sep=" ")
df.head()
# Lots of null values for nasogastric tube/reflux/reflux PH, rectal examination, abdomen, abdominocentesis appearance, and abdomcentesis total protein
df.isnull().sum()
###Output
_____no_output_____
###Markdown
As I commented in the above code cell, certain columns are missing copious amounts of data. It would be impractical to do anything other than remove all NaNs.
###Code
df = df[~(df.isnull().any(axis=1))]
print(df.shape)
df.head()
# Testing to make sure all NaNs have been removed
df.isnull().any().sum()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
from google.colab import files
uploaded = files.upload()
df = pd.read_csv('data.csv', header=None, names=['COMMUNITY_AREA_NUMBER', 'COMMUNITY_AREA_NAME', 'PERCENT_OF_HOUSING_CROWDED', 'PERCENT_HOUSEHOLDS_BELOW_POVERTY', 'PERCENT_AGED_16__UNEMPLOYED',
'PERCENT_AGED_25__WITHOUT_HIGH_SCHOOL_DIPLOMA', 'PERCENT_AGED_UNDER_18_OR_OVER_64', 'PER_CAPITA_INCOME', 'HARDSHIP_INDEX'], na_values=["?"])
df.head()
import numpy as np
df_cleaned = df.replace('?', np.NaN)
df_cleaned.head()
df.head()
df.isnull().sum()
df.dtypes
df = df[~(df.isnull().any(axis=1))]
print(df.shape)
df.head()
from pandas.api.types import is_numeric_dtype
for header in df:
if is_numeric_dtype(df[header]):
print("numeric", header)
else:
print("non-numeric", header)
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 78 entries, 0 to 77
Data columns (total 9 columns):
COMMUNITY_AREA_NUMBER 78 non-null object
COMMUNITY_AREA_NAME 78 non-null object
PERCENT_OF_HOUSING_CROWDED 78 non-null object
PERCENT_HOUSEHOLDS_BELOW_POVERTY 78 non-null object
PERCENT_AGED_16__UNEMPLOYED 78 non-null object
PERCENT_AGED_25__WITHOUT_HIGH_SCHOOL_DIPLOMA 78 non-null object
PERCENT_AGED_UNDER_18_OR_OVER_64 78 non-null object
PER_CAPITA_INCOME 78 non-null object
HARDSHIP_INDEX 78 non-null object
dtypes: object(9)
memory usage: 6.1+ KB
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
flag_data_url = 'https://api.thecatapi.com/v1/images/search'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://api.thecatapi.com/v1/images/search
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img=mpimg.imread('https://cdn2.thecatapi.com/images/42k.jpg')
imgplot = plt.imshow(img)
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# loading data set from UCI
audiology_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/audiology/audiology.standardized.data", header=None)
audiology_data.head()
# let's see what this column is all about to see if we can
# extrapolate anything more about any data in this column
audiology_data[7].value_counts()
# No luck there. Maybe bringing in the test data will help
audiology_test = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/audiology/audiology.standardized.test", header=None)
audiology_test[7].value_counts()
# still unclear what the column 7 data is meant to show
# checking for number of "?" and null values
import numpy as np
audiology_cleaned = audiology_data.replace("?", np.NaN)
audiology_cleaned.isnull().sum().sum()
# LOADING NEW DATA SET FROM UCI
badge_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/badges/badges.data", header=None)
badge_data.head()
# no column titles here
# checking for null values
badge_data.isnull().sum()
# looks like the goal is to figure out what features
# in each name cause a + or - to be given.
# Is this NLP?
# Checking out both data sets from comic characters
from google.colab import files
files.upload()
# not working for large files, but I understand the concept
# loading both csvs the original way
dc_data = pd.read_csv("https://raw.githubusercontent.com/fivethirtyeight/data/master/comic-characters/dc-wikia-data.csv")
dc_data.head()
marvel_data = pd.read_csv("https://raw.githubusercontent.com/fivethirtyeight/data/master/comic-characters/marvel-wikia-data.csv")
marvel_data.head()
# combining both dataframes together
comic_df = dc_data.append(marvel_data)
comic_df.head()
dc_data.shape
marvel_data.shape
# checking if this adds up
comic_df.shape
# new dataset has added an extra "Year" column
comic_df.isnull().sum()
# Going to try beginning the cleaning of each column
comic_df["ALIGN"].value_counts()
comic_df["ALIGN"].fillna(method="ffill", inplace=True)
comic_df["ALIGN"].isnull().sum()
comic_df["ALIVE"].fillna(method="ffill", inplace=True)
comic_df["ALIVE"].isnull().sum()
comic_df["APPEARANCES"].describe()
comic_df["APPEARANCES"].fillna(19.009303, inplace=True)
comic_df["APPEARANCES"].isnull().sum()
comic_df["EYE"].fillna(method="ffill", inplace=True)
comic_df["EYE"].isnull().sum()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
names = """1. name: Name of the country concerned
2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania
3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW
4. area: in thousands of square km
5. population: in round millions
6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others
7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others
8. bars: Number of vertical bars in the flag
9. stripes: Number of horizontal stripes in the flag
10. colours: Number of different colours in the flag
11. red: 0 if red absent, 1 if red present in the flag
12. green: same for green
13. blue: same for blue
14. gold: same for gold (also yellow)
15. white: same for white
16. black: same for black
17. orange: same for orange (also brown)
18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)
19. circles: Number of circles in the flag
20. crosses: Number of (upright) crosses
21. saltires: Number of diagonal crosses
22. quarters: Number of quartered sections
23. sunstars: Number of sun or star symbols
24. crescent: 1 if a crescent moon symbol present, else 0
25. triangle: 1 if any triangles present, 0 otherwise
26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 0
27. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise
28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise
29. topleft: colour in the top-left corner (moving right to decide tie-breaks)
30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)"""
headers = []
for line in names.split("\n"):
if line != "":
header = line.split(":")[0]
headers.append(header.split(".")[1][1:])
flag_data_rev = pd.read_csv(flag_data_url, header=None, names=headers)
print(flag_data_rev.head())
###Output
name landmass zone area population language religion bars \
0 Afghanistan 5 1 648 16 10 2 0
1 Albania 3 1 29 3 6 6 0
2 Algeria 4 1 2388 20 8 2 2
3 American-Samoa 6 3 0 0 1 1 0
4 Andorra 3 1 0 0 6 0 3
stripes colours ... saltires quarters sunstars crescent \
0 3 5 ... 0 0 1 0
1 0 3 ... 0 0 1 0
2 0 3 ... 0 0 1 1
3 0 5 ... 0 0 0 0
4 0 3 ... 0 0 0 0
triangle icon animate text topleft botright
0 0 1 0 0 black green
1 0 0 1 0 red red
2 0 0 0 0 green white
3 1 1 1 0 blue red
4 0 0 0 0 blue red
[5 rows x 30 columns]
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
# Gathering and reading data into pandas dataframe
glass_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data'
glass_data = pd.read_csv(glass_url, header=None, names=['Id Number', 'RI', 'Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe', 'Type of glass'])
# Mapping category to representative integer
glass_type = {
1: 'building_windows_float_processed',
2: 'building_windows_non_float_processed',
3: 'vehicle_windows_float_processed',
4: 'vehicle_windows_non_float_processed',
5: 'containers',
6: 'tableware',
7: 'headlamps'
}
glass_data['Type of glass'] = glass_data['Type of glass'].map(glass_type)
# First five data points and headers
print(glass_data.head())
# Confirming the number of data points are correct
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data | wc
print(glass_data.count())
# Checking null
print(glass_data.isna().sum())
# Showing the information of the data set
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.names
# Gathering and reading data into pandas dataframe
glass_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data'
import urllib.request
with urllib.request.urlopen('https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.names') as response:
html = str(response.read(), 'utf-8')
var_lines = False
headers = []
maps = {}
for line in html.split("\n"):
if line != "":
if line[0] != " ":
var_lines = False
if var_lines:
if ":" in line:
prev = line.split(":")[0].split(".")[1][1:]
headers.append(prev)
else:
if line.lstrip()[0] == "-":
vals = line.lstrip().split(" ")
key = int(vals[1])
value = " ".join(vals[2:])
if prev not in maps:
maps[prev] = {}
maps[prev][key] = value
if line[3:] == "Attribute Information:":
var_lines = True
glass_data = pd.read_csv(glass_url, header=None, names=headers)
for key in maps:
glass_data[key] = glass_data[key].map(maps[key])
print(glass_data.head())
###Output
Id number RI Na Mg Al Si K Ca Ba Fe \
0 1 1.52101 13.64 4.49 1.10 71.78 0.06 8.75 0.0 0.0
1 2 1.51761 13.89 3.60 1.36 72.73 0.48 7.83 0.0 0.0
2 3 1.51618 13.53 3.55 1.54 72.99 0.39 7.78 0.0 0.0
3 4 1.51766 13.21 3.69 1.29 72.61 0.57 8.22 0.0 0.0
4 5 1.51742 13.27 3.62 1.24 73.08 0.55 8.07 0.0 0.0
Type of glass
0 building_windows_float_processed
1 building_windows_float_processed
2 building_windows_float_processed
3 building_windows_float_processed
4 building_windows_float_processed
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.php- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
# First we bring in our chosen dataset
# I chose Breast Cancer
breast_cancer_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer/breast-cancer.data'
# Let's use the Bash command
!curl $breast_cancer_url
# Now, let's use the wc command
!curl -s $breast_cancer_url | wc -l
# Let's load the data with pandas
import pandas as pd
bcancer_data = pd.read_csv(breast_cancer_url)
# We should take a quick look at our dataset
bcancer_data.head()
# We should use to shape command to confirm that we have header issues
bcancer_data.shape
# Yup, we're 1 row short, the 'header'. Let's fix this
# First we'll pass header=None
bcancer_data = pd.read_csv(breast_cancer_url, header=None)
# Let's have a look
bcancer_data.head()
# And the shape
bcancer_data.shape
# Now we have the correct number of rows.
# So let's fix our headers with the info in our metadata
col_headers = ['Class', 'age', 'menopause', 'tumor-size', 'inv-nodes', 'node-caps', 'deg-malig', 'breast', 'breast-quad', 'irradiat']
bcancer_data = pd.read_csv(breast_cancer_url, header=None, names=col_headers)
# Let's take another look
bcancer_data.head()
# Great, now let's see how clean our data is
bcancer_data.isna()
# Looks annoyingly clean, but let's make sure
bcancer_data.isna().sum()
# Yup, annoyingly clean.
# Alright, time for some visualizations.
# First, pandas
bcancer_data['deg-malig'].hist()
# I have a lot of categorical data in string form at the moment.
# I've been tasked with visualizations that require ints and floats.
# So, I'm going to replace some of them with integers
# First let's look at Age
print(bcancer_data['age'].value_counts())
# Also Tumor size
print(bcancer_data['tumor-size'].value_counts())
# I'll convert the string ranges to Integers in both these columns
conversions = {"age": {"20-29": 25, "30-39": 35, "40-49": 45, "50-59": 55, "60-69": 65, "70-79": 75},
"tumor-size": {"0-4": 0, "5-9": 5, "10-14": 10, "15-19": 15, "20-24": 20, "25-29": 25, "30-34": 30, "35-39": 35, "40-44": 40, "45-49": 45, "50-54": 50}}
# Now to replace the values in the dataset
bcancer_data.replace(conversions, inplace=True)
bcancer_data.head()
# Let's try to plot histograms with these now
# First "age"
print(bcancer_data["age"].hist(bins=10))
# Now tumor-size
print(bcancer_data['tumor-size'].hist())
# Alright, pandas visuals look boring. New library!
# Let's go wth Seaborn
import seaborn as sns
#bcancer_data.age = bcancer_data.age.astype('category')
#bcancer_data.head()
# Histogram first
print(sns.distplot(bcancer_data['tumor-size']))
# Then the Density plot
print(sns.distplot(bcancer_data['tumor-size'], hist=False))
# Now a Scatterplot. This will look really uniform due to the replacements
sns.scatterplot(x="tumor-size", y="age", data=bcancer_data)
# And finally the Pairplot
sns.set(style='ticks', color_codes=True)
sns.pairplot(bcancer_data)
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL From a local file Using the `!wget` command Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI. Fill Missing Values Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric Non-Numeric Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url, header=None)
# Step 3 - verify we've got *something*
flag_data.shape
flag_data = flag_data.rename(columns={0:'country', 1:'landmass', 2:'zone'})
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
from google.colab import files
uploaded = files.upload()
###Output
_____no_output_____
###Markdown
1. symboling: -3, -2, -1, 0, 1, 2, 3. 2. normalized-losses: continuous from 65 to 256. 3. make: alfa-romero, audi, bmw, chevrolet, dodge, honda, isuzu, jaguar, mazda, mercedes-benz, mercury, mitsubishi, nissan, peugot, plymouth, porsche, renault, saab, subaru, toyota, volkswagen, volvo 4. fuel-type: diesel, gas. 5. aspiration: std, turbo. 6. num-of-doors: four, two. 7. body-style: hardtop, wagon, sedan, hatchback, convertible. 8. drive-wheels: 4wd, fwd, rwd. 9. engine-location: front, rear. 10. wheel-base: continuous from 86.6 120.9. 11. length: continuous from 141.1 to 208.1. 12. width: continuous from 60.3 to 72.3. 13. height: continuous from 47.8 to 59.8. 14. curb-weight: continuous from 1488 to 4066. 15. engine-type: dohc, dohcv, l, ohc, ohcf, ohcv, rotor. 16. num-of-cylinders: eight, five, four, six, three, twelve, two. 17. engine-size: continuous from 61 to 326. 18. fuel-system: 1bbl, 2bbl, 4bbl, idi, mfi, mpfi, spdi, spfi. 19. bore: continuous from 2.54 to 3.94. 20. stroke: continuous from 2.07 to 4.17. 21. compression-ratio: continuous from 7 to 23. 22. horsepower: continuous from 48 to 288. 23. peak-rpm: continuous from 4150 to 6600. 24. city-mpg: continuous from 13 to 49. 25. highway-mpg: continuous from 16 to 54. 26. price: continuous from 5118 to 45400.
###Code
df = pd.read_csv('imports-85.data', header=None, names=['symboling', 'norm_loss',
'make', 'fuel', 'aspiration', 'doors', 'body_style',
'drive_wheels', 'engine_location', 'wheel_base',
'length','width', 'height','curb_weight','engine',
'cylinders','engine_size', 'fuel_system','bore',
'stroke','compression','hp','peak_rpm','city_mpg',
'hgwy_mpg','price'])
# option for read_csv(xxx.data, na_values=['?']) to replace non standard na values
df.head()
import numpy as np #get numpy for using NAN
df_fixna = df.replace('?', np.NAN) #replace ? missing values with NAN from numpy
df_fixna.head()
df_fixna.dtypes
df_fixna.isnull().sum()
df_fltr_na = df_fixna[~df_fixna.isnull().any(axis=1)]
print(df_fltr_na.isnull().sum())
print('\n', df_fltr_na.shape, '\n')
df_fltr_na.head()
from pandas.api.types import is_numeric_dtype
for header in df_fltr_na:
if is_numeric_dtype(df_fltr_na[header]):
print('numeric: ', header)
else:
print('not numeric: ', header)
df_fltr_na.dtypes
df_fltr_na['make'].value_counts()
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
#*** I downloaded this and opened it in libre office as a spreadsheet and used
# that to reformat it as a csv so I could at least get it into pandas as a
# dataframe that I could manipulate. Trying to sort it out from there
from google.colab import files
upload = files.upload()
# Attempting to sort out this dataset...
# Appeares to be categorized by conditions? and then the conditions have their
# noted symptoms for each patient (p1, p2 etc.) attempting to sort it out using
# str.contains cycling through each item and removing the category (bells_palsy,
# acoustic_neuroma) and placing the symptoms into new data frame by category and
#patient
audiology_data = pd.read_csv('audiology.csv', header=None, names=['patient','acoustic_neuroma',
'bells_palsy','cochlear_age','cochlear_age_and_noise','cochlear_age_plus_poss_menieres',
'cochlear_noise_and_heredity','cochlear_poss_noise','cochlear_unknown',
'conductive_discontinuity','conductive_fixation','mixed_cochlear_age_fixation',
'mixed_cochlear_age_otitis_media','mixed_cochlear_age_s_om',
'mixed_cochlear_unk_discontinuity','mixed_cochlear_unk_fixation',
'mixed_cochlear_unk_ser_om','mixed_poss_central_om','mixed_poss_noise_om',
'normal_ear','otitis_media','poss_central','possible_brainstem_disorder',
'possible_menieres','retrocochlear_unknown'] )
audiology_data.head()
print(len(audiology_data))
for item in audiology_data:
if item != patient:
for d in range(len(audiology_data)):
if audiology_data[d,item].str.contains('cochlear')
# I was under the impression that the categories that I set up were how this
# dataset was organized, and that there was some information for each
###Output
_____no_output_____
###Markdown
**I'm going to do some cleaner datasets to show I actually do get the basics. Probably shouldn't have started with audiology**
###Code
census_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
adult = pd.read_csv(census_data_url, header=None, names=['age','work','fnlwgt',
'education','education-num','marital-status','occupation',
'relatioship','race','sex','capital-gain','capital-loss',
'hours-per-week','native-country', 'income'], na_values=[' ?'])
print(adult.count())
print('\n', adult.shape, '\n')
adult.head()
print(adult.isna().sum())
print(adult.describe(), '\n\n') #display statistics about numerical data
# set up for loop to display statistics about columns with missing values to get
# a better idea of what to fill with.
for header in adult:
if adult[header].isnull().sum() != 0:
print('\n', header, ':')
print('\n', adult[header].value_counts(), '\n')
else:
print('\n', header, ' has no missing items\n')
# used forward fill for the first two as they had reasonable distributions
adult['work'] = adult['work'].fillna(method='ffill')
adult['occupation'] = adult['occupation'].fillna(method='ffill')
# Just filled in United States for native country as there were a lot of
# countries with only a few people from them. I figured this would mess up
# any results the least.
adult['native-country'] = adult['native-country'].fillna('United-States')
print(adult.isna().sum())
# Some categorical encoding next:
# starting with the income column, 0 is <= $50k; 1 is > $50k
income_dict = {' <=50K':0, ' >50K':1}
adult['income'].replace(income_dict, inplace=True)
adult.head()
# Marital status one hot encoding:
pd.get_dummies(adult, columns=["marital-status"], prefix=["relationship"]).head()
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
#if I get to this...
def load_a_csv_as(url, df_name, headers_bool, header_names):
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Ryan Allred Makes a Change
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
#My Change
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# PrinceYemen
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
# Importing the libraries that I am going to use.
import pandas as pd
import numpy as np
# Reading the Excel file as a local file here, I will link to the URL later
# A lot of the Data sets I was looking at, didn't come out as data files, they
# were archives.
data_file_name = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00342/Data_Cortex_Nuclear.xls'
df = pd.read_excel(data_file_name)
print(df.head())
print(df.shape)
# I am looking at the description of the data to see how this might skew the data
# It doesn't look like it will skew it more than will make it not useful anymore.
# I would also run this data dropping the individual mice that didn't have data
# To double check the work. I would also pick random mice to create a larger dataset.
# This has a name.
#print(df.count())
print(df.isna().sum())
print(df)
print(df.describe())
# This cleans the na data with the mean for that feature
df = df.fillna(df.mean())
'''This is multiline comment
hello
how
are
you
'''
print()
#I found a uniquely encoded dataset on UCI, the Lebowitz's Universities Database.
# I will get the data, and clean it to be used in a Pandas DF.
import requests
print('Beginning file download with urllib2...')
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/university/university.data'
universitydata = requests.get(url)
print(universitydata.content)
#This data set has some really big issues in it, and it may take me a little more time.
###Output
Beginning file download with urllib2...
b'(def-instance Adelphi\n (state newyork)\n (control private)\n (no-of-students thous:5-10)\n (male:female ratio:30:70)\n (student:faculty ratio:15:1)\n (sat verbal 500)\n (sat math 475)\n (expenses thous$:7-10)\n (percent-financial-aid 60)\n (no-applicants thous:4-7)\n (percent-admittance 70)\n (percent-enrolled 40)\n (academics scale:1-5 2)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 2)\n (academic-emphasis business-administration)\n (academic-emphasis biology))\n(def-instance Arizona-State\n (state arizona)\n (control state)\n (no-of-students thous:20+)\n (male:female ratio:50:50)\n (student:faculty ratio:20:1)\n (sat verbal 450)\n (sat math 500)\n (expenses thous$:4-7)\n (percent-financial-aid 50)\n (no-applicants thous:17+)\n (percent-admittance 80)\n (percent-enrolled 60)\n (academics scale:1-5 3)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 5)\n (academic-emphasis business-education)\n (academic-emphasis engineering)\n (academic-emphasis accounting)\n (academic-emphasis fine-arts))\n(def-instance Boston-College\n (state massachusetts)\n (location suburban)\n (control private:roman-catholic)\n (no-of-students thous:5-10)\n (male:female ratio:40:60)\n (student:faculty ratio:20:1)\n (sat verbal 500)\n (sat math 550)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:10-13)\n (percent-admittance 50)\n (percent-enrolled 40)\n (academics scale:1-5 4)\n (social scale:1-5 5)\n (quality-of-life scale:1-5 3)\n (academic-emphasis economics)\n (academic-emphasis biology)\n (academic-emphasis english))\n(def-instance Boston-University\n (state massachusetts)\n (location urban)\n (control private)\n (no-of-students thous:10-15)\n (male:female ratio:45:55)\n (student:faculty ratio:12:1)\n (sat verbal 550)\n (sat math 575)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:13-17)\n (percent-admittance 60)\n (percent-enrolled 40)\n (academics scale:1-5 4)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 3)\n (academic-emphasis business-administration)\n (academic-emphasis psychology)\n (academic-emphasis liberal-arts))\n(def-instance Brown\n (state rhodeisland)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:50:50)\n (student:faculty ratio:11:1)\n (sat verbal 625)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 40)\n (no-applicants thous:10-13)\n (percent-admittance 20)\n (percent-enrolled 50)\n (academics scale:1-5 5)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 5)\n (academic-emphasis biology)\n (academic-emphasis history)\n (academic-emphasis arts:sciences))\n(def-instance Cal-Tech\n (state california)\n (location suburban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:70:30)\n (student:faculty ratio:10:1)\n (sat verbal 650)\n (sat math 780)\n (expenses thous$:10+)\n (percent-financial-aid 70)\n (no-applicants thous:4-)\n (percent-admittance 15)\n (percent-enrolled 90)\n (academics scale:1-5 5)\n (social scale:1-5 1)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering))\n(def-instance Carnegie-Mellon\n (state Pennsylvania)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:60:40)\n (student:faculty ratio:10:1)\n (sat verbal 600)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 70)\n (no-applicants thous:4-7)\n (percent-admittance 40)\n (percent-enrolled 50)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering))\n(def-instance Case-Western\n (state ohio)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:70:30)\n (student:faculty ratio:9:1)\n (sat verbal 550)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 65)\n (no-applicants thous:4-)\n (percent-admittance 85)\n (percent-enrolled 35)\n (academics scale:1-5 3)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering)\n (academic-emphasis management)\n (academic-emphasis arts:sciences))\n(def-instance CCNY\n (state newyork)\n (location urban)\n (control city)\n (no-of-students thous:10-15)\n (male:female ratio:60:40)\n (student:faculty ratio:15:1)\n (expenses thous$:4-)\n (percent-financial-aid 80)\n (no-applicants thous:4-)\n (percent-admittance 80)\n (percent-enrolled 60)\n (academics scale:1-5 3)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 2)\n (academic-emphasis arts:sciences)\n (academic-emphasis electrical-engineering)\n (academic-emphasis architecture)\n (academic-emphasis biomed)\n (academic-emphasis education)\n (academic-emphasis nursing)\n (academic-emphasis performing-arts))\n(def-instance Colgate\n (state newyork)\n (location small-town)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:55:45)\n (student:faculty ratio:13:1)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:4-7)\n (percent-admittance 40)\n (percent-enrolled 40)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis liberal-arts)\n (academic-emphasis biology)\n (academic-emphasis english))\n(def-instance Columbia\n (state newyork)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:70:30)\n (student:faculty ratio:9:1)\n (sat verbal 625)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:4-7)\n (percent-admittance 30)\n (percent-enrolled 50)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis liberal-arts))\n(def-instance Cooper-Union\n (state newyork)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:70:30)\n (student:faculty ratio:6:1)\n (expenses thous$:4-)\n (percent-financial-aid 35)\n (no-applicants thous:4-)\n (percent-admittance 20)\n (percent-enrolled 65)\n (academics scale:1-5 3)\n (social scale:1-5 1)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering)\n (academic-emphasis architecture)\n (academic-emphasis fine-arts))\n(def-instance Cornell\n (state newyork)\n (location small-city)\n (control private)\n (no-of-students thous:10-15)\n (male:female ratio:55:45)\n (student:faculty ratio:7:1)\n (sat verbal 600)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 50)\n (no-applicants thous:17+)\n (percent-admittance 30)\n (percent-enrolled 50)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 2)\n (academic-emphasis agriculture)\n (academic-emphasis architecture)\n (academic-emphasis arts:sciences)\n (academic-emphasis engineering)\n (academic-emphasis hotel-administration)\n (academic-emphasis human-ecology)\n (academic-emphasis industrial:labor-relations))\n(def-instance Dartmouth\n (state newhampshire)\n (location small-town)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:60:40)\n (student:faculty ratio:7:1)\n (sat verbal 625)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 40)\n (no-applicants thous:7-10)\n (percent-admittance 20)\n (percent-enrolled 60)\n (academics scale:1-5 5)\n (social scale:1-5 5)\n (quality-of-life scale:1-5 3)\n (academic-emphasis liberal-arts))\n(def-instance Florida-Tech\n (state florida)\n (location small-city)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:80:20)\n (student:faculty ratio:20:1)\n (sat verbal 500)\n (sat math 550)\n (expenses thous$:4-7)\n (percent-financial-aid 60)\n (no-applicants thous:4-)\n (percent-admittance 60)\n (percent-enrolled 50)\n (academics scale:1-5 3)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis marine-biology)\n (academic-emphasis applied-technology)\n (academic-emphasis engineering))\n(def-instance Florida-state\n (state florida)\n (location small-city)\n (control state)\n (no-of-students thous:15-20)\n (male:female ratio:45:55)\n (student:faculty ratio:20:1)\n (sat verbal 500)\n (sat math 525)\n (expenses thous$:4-7)\n (percent-financial-aid 40)\n (no-applicants thous:7-10)\n (percent-admittance 60)\n (percent-enrolled 50)\n (academics scale:1-5 3)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3))\n(def-instance Georgia-Tech\n (state georgia)\n (location urban)\n (control state)\n (no-of-students thous:5-10)\n (male:female ratio:80:20)\n (student:faculty ratio:20:1)\n (sat verbal 525)\n (sat math 625)\n (expenses thous$:4-7)\n (percent-financial-aid 20)\n (no-applicants thous:4-7)\n (percent-admittance 60)\n (percent-enrolled 50)\n (academics scale:1-5 4)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 2)\n (academic-emphasis engineering))\n(def-instance Harvard\n (state massachusetts)\n (location urban)\n (control private)\n (no-of-students thous:5-10)\n (male:female ratio:65:35)\n (student:faculty ratio:10:1)\n (sat verbal 700)\n (sat math 675)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:13-17)\n (percent-admittance 20)\n (percent-enrolled 80)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 4)\n (academic-emphasis history)\n (academic-emphasis biology)\n (academic-emphasis liberal-arts))\n(def-instance Hofstra\n (state newyork)\n (location suburban)\n (no-of-students thous:5-10)\n (male:female ratio:50:50)\n (sat verbal 500)\n (sat math 525)\n (expenses thous$:7-10)\n (percent-financial-aid 80)\n (no-applicants thous:4-7)\n (percent-admittance 70)\n (percent-enrolled 50)\n (academics scale:1-5 2)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 2)\n (academic-emphasis accounting)\n (academic-emphasis computer-science)\n (academic-emphasis engineering))\n(def-instance Illinois-Tech\n (state illinois)\n (location urban)\n (control state)\n (no-of-students thous:5-)\n (male:female ratio:90:10)\n (student:faculty ratio:25:1)\n (sat verbal 450)\n (sat math 575)\n (expenses thous$:4-7)\n (percent-financial-aid 65)\n (no-applicants thous:4-)\n (percent-admittance 50)\n (percent-enrolled 60)\n (academics scale:1-5 3)\n (social scale:1-5 1)\n (quality-of-life scale:1-5 3)\n (academic-emphasis architecture)\n (academic-emphasis engineering))\n(def-instance Johns-Hopkins\n (state maryland)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:70:30)\n (student:faculty ratio:10:1)\n (sat verbal 625)\n (sat math 675)\n (expenses thous$:10+)\n (percent-financial-aid 70)\n (no-applicants thous:4-)\n (percent-admittance 50)\n (percent-enrolled 40)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis arts:sciences)\n (academic-emphasis biology)\n (academic-emphasis political-science)\n (academic-emphasis chemistry)\n (academic-emphasis engineering))\n(def-instance MIT\n (state massachusetts)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:75:25)\n (student:faculty ratio:5:1)\n (sat verbal 650)\n (sat math 750)\n (expenses thous$:10+)\n (percent-financial-aid 50)\n (no-applicants thous:4-7)\n (percent-admittance 30)\n (percent-enrolled 60)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis sciences)\n (academic-emphasis electrical-engineering)\n (academic-emphasis mechanical-engineering)\n (academic-emphasis engineering))\n(def-instance University-of-Montana\n (state montana)\n (location small-city)\n (control state)\n (no-of-students thous:5-)\n (male:female ratio:65:35)\n (student:faculty ratio:21:1)\n (expenses thous$:4-7)\n (percent-financial-aid 65)\n (no-applicants thous:4-)\n (percent-admittance 90)\n (percent-enrolled 60)\n (academics scale:1-5 3)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 4)\n (academic-emphasis engineering)\n (academic-emphasis mineral-engineering))\n(def-instance Morgan-state\n (state Maryland)\n (location urban)\n (control state)\n (no-of-students thous:5-)\n (male:female ratio:40:60)\n (student:faculty ratio:13:1)\n (sat verbal 300)\n (sat math 325)\n (expenses thous$:4-)\n (no-applicants thous:4-)\n (percent-admittance 70)\n (percent-enrolled 50)\n (academics scale:1-5 2)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 2)\n (academic-emphasis business-administration)\n (academic-emphasis accounting))\n(def-instance New-Jersey-Tech\n (state newjersey)\n (location urban)\n (control state)\n (no-of-students thous:5-)\n (male:female ratio:90:10)\n (student:faculty ratio:25:1)\n (sat verbal 450)\n (sat math 575)\n (expenses thous$:4-7)\n (percent-financial-aid 65)\n (no-applicants thous:4-)\n (percent-admittance 50)\n (percent-enrolled 60)\n (academics scale:1-5 3)\n (social scale:1-5 1)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering)\n (academic-emphasis architecture))\n(def-instance NYU\n (state newyork)\n (location urban)\n (control private)\n (no-of-students thous:5-10)\n (male:female ratio:50:50)\n (student:faculty ratio:7:1)\n (sat verbal 550)\n (sat math 575)\n (expenses thous$:10+)\n (percent-financial-aid 50)\n (no-applicants thous:7-10)\n (percent-admittance 50)\n (percent-enrolled 60)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis journalism)\n (academic-emphasis psychology))\n(def-instance Pratt\n (state newyork)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:60:40)\n (student:faculty ratio:7:1)\n (sat verbal 425)\n (sat math 475)\n (expenses thous$:4-7)\n (percent-financial-aid 80)\n (no-applicants thous:4-)\n (percent-admittance 50)\n (percent-enrolled 60)\n (academics scale:1-5 3)\n (social scale:1-5 1)\n (quality-of-life scale:1-5 2)\n (academic-emphasis architecture)\n (academic-emphasis art:design)\n (academic-emphasis electrical-engineering)\n (academic-emphasis arts:sciences))\n(def-instance Princeton\n (state newjersey)\n (location small-town)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:65:35)\n (student:faculty ratio:7:1)\n (sat verbal 650)\n (sat math 675)\n (expenses thous$:10+)\n (percent-financial-aid 50)\n (no-applicants thous:10-13)\n (percent-admittance 20)\n (percent-enrolled 60)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis history)\n (academic-emphasis economics)\n (academic-emphasis political-science)\n (academic-emphasis liberal-arts)\n (academic-emphasis engineering))\n(def-instance Rensselaer\n (state Newyork)\n (location small-city)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:80:20)\n (student:faculty ratio:11:1)\n (sat verbal 575)\n (sat math 700)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:7-10)\n (percent-admittance 50)\n (percent-enrolled 30)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering)\n (academic-emphasis architecture)\n (academic-emphasis management))\n(def-instance Rochester-Tech\n (state newyork)\n (location suburban)\n (control private)\n (no-of-students thous:5-10)\n (male:female ratio:65:35)\n (student:faculty ratio:14:1)\n (sat verbal 525)\n (sat math 575)\n (expenses thous$:7-10)\n (percent-financial-aid 60)\n (no-applicants thous:7-10)\n (percent-admittance 70)\n (percent-enrolled 50)\n (academics scale:1-5 3)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis business)\n (academic-emphasis engineering)\n (academic-emphasis computer-science)\n (academic-emphasis arts:sciences))\n(def-instance Stanford\n (state california)\n (location suburban)\n (control private)\n (no-of-students thous:5-10)\n (male:female ratio:55:45)\n (student:faculty ratio:10:1)\n (sat verbal 625)\n (sat math 675)\n (expenses thous$:10+)\n (percent-financial-aid 45)\n (no-applicants thous:13-17)\n (percent-admittance 20)\n (percent-enrolled 70)\n (academics scale:1-5 5)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 5)\n (academic-emphasis economics)\n (academic-emphasis biology)\n (academic-emphasis english)\n (academic-emphasis arts:sciences)\n (academic-emphasis engineering))\n(def-instance Stevens\n (state newjersey)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:80:20)\n (student:faculty ratio:13:1)\n (sat verbal 500)\n (sat math 625)\n (expenses thous$:7-10)\n (percent-financial-aid 65)\n (no-applicants thous:4-)\n (percent-admittance 60)\n (percent-enrolled 40)\n (academics scale:1-5 3)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 4)\n (academic-emphasis engineering))\n(def-instance Temple\n (state pennsylvania)\n (location urban)\n (control state)\n (no-of-students thous:15-20)\n (male:female ratio:50:50)\n (student:faculty ratio:11:1)\n (sat verbal 475)\n (sat math 500)\n (expenses thous$:4-7)\n (percent-financial-aid 60)\n (no-applicants thous:10-13)\n (percent-admittance 70)\n (percent-enrolled 60)\n (academics scale:1-5 2)\n (social scale:1-5 2)\n (quality-of-life scale:1-5 2)\n (academic-emphasis accounting)\n (academic-emphasis computer-science))\n(def-instance Texas-A&M\n (state texas)\n (location small-city)\n (control state)\n (no-of-students thous:20+)\n (male:female ratio:60:40)\n (student:faculty ratio:12:1)\n (sat verbal 475)\n (sat math 550)\n (expenses thous$:4-)\n (percent-financial-aid 20)\n (no-applicants thous:10-13)\n (percent-admittance 80)\n (percent-enrolled 70)\n (academics scale:1-5 3)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis marine-biology))\n(def-instance University-of-California-Berkely\n (state california)\n (location urban)\n (control state)\n (no-of-students thous:20+)\n (male:female ratio:55:45)\n (student:faculty ratio:11:1)\n (sat verbal 530)\n (sat math 600)\n (expenses thous$:4-7)\n (no-applicants thous:13-17)\n (percent-admittance 50)\n (percent-enrolled 70)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering)\n (academic-emphasis business)\n (academic-emphasis english)\n (academic-emphasis government))\n(def-instance University-of-California-Davis\n (state california)\n (location small-city)\n (control state)\n (no-of-students thous:10-15)\n (male:female ratio:50:50)\n (student:faculty ratio:15:1)\n (sat verbal 500)\n (sat math 550)\n (expenses thous$:4-7)\n (percent-financial-aid 40)\n (no-applicants thous:7-10)\n (percent-admittance 70)\n (percent-enrolled 70)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 4)\n (academic-emphasis biology)\n (academic-emphasis psychology)\n (academic-emphasis economics))\n(def-instance UCLA\n (state california)\n (location urban)\n (control state)\n (no-of-students thous:20+)\n (male:female ratio:50:50)\n (student:faculty ratio:11:1)\n (sat verbal 500)\n (sat math 550)\n (expenses thous$:4-7)\n (percent-financial-aid 50)\n (no-applicants thous:4-7)\n (percent-admittance 80)\n (percent-enrolled 70)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis biology)\n (academic-emphasis economics)\n (academic-emphasis english))\n(def-instance University-of-California-San-Diego\n (state california)\n (location suburban)\n (control state)\n (no-of-students thous:5-10)\n (male:female ratio:55:45)\n (student:faculty ratio:15:1)\n (sat verbal 550)\n (sat math 600)\n (expenses thous$:4-7)\n (percent-financial-aid 25)\n (no-applicants thous:4-7)\n (percent-admittance 80)\n (percent-enrolled 65)\n (academics scale:1-5 4)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 4)\n (academic-emphasis biology)\n (academic-emphasis engineering))\n(def-instance University-of-California-Santa-Cruz\n (state california)\n (location suburban)\n (control state)\n (no-of-students thous:5-10)\n (male:female ratio:50:50)\n (student:faculty ratio:18:1)\n (sat verbal 525)\n (sat math 550)\n (expenses thous$:4-7)\n (percent-financial-aid 65)\n (no-applicants thous:4-)\n (percent-admittance 70)\n (percent-enrolled 60)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 5)\n (academic-emphasis biology)\n (academic-emphasis psychology)\n (academic-emphasis arts:sciences))\n(def-instance University-of-Maine\n (state Maine)\n (location small-town)\n (control public)\n (no-of-students thous:10-15)\n (male:female ratio:55:45)\n (student:faculty ratio:15:1)\n (sat verbal 500)\n (sat math 500)\n (expenses thous$:4-7)\n (percent-financial-aid 70)\n (no-applicants thous:4-7)\n (percent-admittance 90)\n (percent-enrolled 50)\n (academics scale:1-5 2)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 3)\n (academic-emphasis liberal-arts))\n(def-instance University-of-Oklahoma\n (state Oklahoma)\n (location suburban)\n (control state)\n (no-of-students thous:10-15)\n (male:female ratio:60:40)\n (student:faculty ratio:20:1)\n (expenses thous$:4-)\n (percent-financial-aid 30)\n (no-applicants thous:10-13)\n (percent-admittance 90)\n (percent-enrolled 70)\n (academics scale:1-5 3)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 3)\n (academic-emphasis arts:sciences))\n(def-instance University-of-Penn\n (state pennsylvania)\n (location urban)\n (control private)\n (no-of-students thous:5-10)\n (male:female ratio:60:40)\n (student:faculty ratio:10:1)\n (sat verbal 600)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:10-13)\n (percent-admittance 40)\n (percent-enrolled 50)\n (academics scale:1-5 5)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 3)\n (academic-emphasis engineering)\n (academic-emphasis liberal-arts)\n (academic-emphasis management)\n (academic-emphasis economics)\n (academic-emphasis nursing))\n(def-instance University-of-San-Francisco\n (state california)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:50:50)\n (student:faculty ratio:13:1)\n (sat verbal 450)\n (sat math 525)\n (expenses thous$:7-10)\n (percent-financial-aid 70)\n (no-applicants thous:4-)\n (percent-admittance 60)\n (percent-enrolled 60)\n (academics scale:1-5 3)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 4)\n (academic-emphasis business)\n (academic-emphasis arts:sciences))\n(def-instance USC\n (state california)\n (location urban)\n (control private)\n (no-of-students thous:10-15)\n (male:female ratio:60:40)\n (student:faculty ratio:18:1)\n (sat verbal 475)\n (sat math 525)\n (expenses thous$:10+)\n (percent-financial-aid 60)\n (no-applicants thous:10-13)\n (percent-admittance 70)\n (percent-enrolled 50)\n (academics scale:1-5 4)\n (social scale:1-5 4)\n (quality-of-life scale:1-5 3)\n (academic-emphasis biology)\n (academic-emphasis business)\n (academic-emphasis psychology))\n(def-instance Worcester\n (state Massachusetts)\n (location urban)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:80:20)\n (student:faculty ratio:12:1)\n (sat verbal 550)\n (sat math 650)\n (expenses thous$:10+)\n (percent-financial-aid 70)\n (no-applicants thous:4-)\n (percent-admittance 50)\n (percent-enrolled 50)\n (academics scale:1-5 4)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 4)\n (academic-emphasis engineering))\n(def-instance Yale\n (state connecticut)\n (location small-city)\n (control private)\n (no-of-students thous:5-)\n (male:female ratio:55:45)\n (student:faculty ratio:5:1)\n (sat verbal 675)\n (sat math 675)\n (expenses thous$:10+)\n (percent-financial-aid 40)\n (no-applicants thous:10-13)\n (percent-admittance 20)\n (percent-enrolled 60)\n (academics scale:1-5 5)\n (social scale:1-5 3)\n (quality-of-life scale:1-5 4)\n (academic-emphasis history)\n (academic-emphasis biology)\n (academic-emphasis english)\n (academic-emphasis liberal-arts))\n\n\n\n(DEF-INSTANCE ABILENE-CHRISTIAN-UNIVERSITY\n (STATE TEXAS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:21:1)\n% (SAT VERBAL N/A)%\n% (SAT MATH N/A)%\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP BRANDEIS0 T DUPLICATE)\n(DEF-INSTANCE BRANDEIS0\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 580)\n (SAT MATH 610)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS MATH-AND-SCIENCE)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS AREA-STUDIES)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEFPROP EMORY0 T DUPLICATE)\n(DEF-INSTANCE EMORY0\n (STATE GEORGIA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 550)\n (SAT MATH 600)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LAW)\n (ACADEMIC-EMPHASIS MEDICAL-SCIENCES)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP GEORGETOWN1 T DUPLICATE)\n(DEF-INSTANCE GEORGETOWN1\n (STATE DISTRICT-OF-COLUMBIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 618)\n (SAT MATH 648)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP CLARK-UNIVERSITY1 T DUPLICATE)\n(DEF-INSTANCE CLARK-UNIVERSITY1\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 575)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS FINE-ARTS)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS COMMERCE-MANAGEMENT)\n (ACADEMIC-EMPHASIS PRE-MED)\n (ACADEMIC-EMPHASIS PRE-LAW)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP UNIVERSITY-OF-PITTSBURGH1 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-PITTSBURGH1\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 90)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS FINE-ARTS-APPLIED-ARTS)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS FOOD-TECHNOLOGY)\n (ACADEMIC-EMPHASIS SCIENCE)\n)\n\n(DEFPROP UNIVERSITY-OF-ROCHESTER1 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-ROCHESTER1\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 550)\n (SAT MATH 625)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS COMMERCE-MANAGEMENT)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS MEDICAL-TECHNOLOGY)\n (ACADEMIC-EMPHASIS PHARMACEUTICAL-SCIENCE)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP UNIVERSITY-OF-WASHINGTON1 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-WASHINGTON1\n (STATE WASHINGTON)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 500)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS SCIENCE)\n (ACADEMIC-EMPHASIS PHARMACEUTICAL-SCIENCE)\n (ACADEMIC-EMPHASIS PRE-MED)\n (ACADEMIC-EMPHASIS MEDICAL-TECHNOLOGY)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP BRANDEIS1 T DUPLICATE)\n(DEF-INSTANCE BRANDEIS1\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 580)\n (SAT MATH 620)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS MATH)\n (ACADEMIC-EMPHASIS SCIENCE)\n)\n\n(DEF-INSTANCE HUNTINGTON-COLLEGE\n (STATE INDIANA)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 450)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 90)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 65)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS SCIENCE)\n)\n\n(DEFPROP TUFTS1 T DUPLICATE)\n(DEF-INSTANCE TUFTS1\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 580)\n (SAT MATH 620)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS MATH)\n (ACADEMICS SCALE:1-5 3)\n)\n\n(DEF-INSTANCE TRINITY-COLLEGE\n (STATE CONNECTICUT)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:51:49)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 560)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 85)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 45)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS HUMANITIES)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS MATH)\n (ACADEMIC-EMPHASIS SCIENCE)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-PORTLAND\n (STATE OREGON)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:1:1)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 480)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE FLORIDA-ACADEMIC-UNIVERSITY\n (STATE FLORIDA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:2:1)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 510)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP BENNINGTON0 T DUPLICATE)\n(DEF-INSTANCE BENNINGTON0\n (STATE VERMONT)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:189:439)\n (STUDENT:FACULTY RATIO:9:1)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 55)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS DEWEY)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP JUILLIARD0 T DUPLICATE)\n(DEF-INSTANCE JUILLIARD0\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:1:1)\n (STUDENT:FACULTY RATIO:5:1)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 90)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 95)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS MUSIC)\n (ACADEMIC-EMPHASIS THEATER)\n (ACADEMIC-EMPHASIS PERFORMING-ARTS)\n)\n\n(DEF-INSTANCE MESA\n (STATE COLORADO)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:7:6)\n (STUDENT:FACULTY RATIO:20:1)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 95)\n (PERCENT-ENROLLED 55)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEFPROP HAMPSHIRE-COLLEGE1 T DUPLICATE)\n(DEF-INSTANCE HAMPSHIRE-COLLEGE1\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:5:6)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 556)\n (SAT MATH 542)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 35)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS INDEPENDENT-STUDY)\n)\n\n(DEFPROP RUTGERS2 T DUPLICATE)\n(DEF-INSTANCE RUTGERS2\n (STATE NEWJERSEY)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:44:37)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 540)\n (SAT MATH 490)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 55)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 55)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n)\n\n(DEFPROP UNIVERSITY-OF-PENNSYLVANIA1 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-PENNSYLVANIA1\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:52:44)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 680)\n (SAT MATH 640)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEFPROP UNIVERSITY-OF-GEORGIA T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-GEORGIA1\n (STATE GEORGIA)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:1:1)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 490)\n (SAT MATH 530)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS JOURNALISM)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (COLORS RED-BLACK)\n)\n\n(DEF-INSTANCE GOLDEN-GATE-COLLEGE\n (STATE CALIFORNIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:3:10)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 500)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 25)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 100)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 1)\n (QUALITY-OF-LIFE SCALE:1-5 1)\n (ACADEMIC-EMPHASIS GOVERNMENT)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n (COLORS YELLOW-WHITE)\n)\n\n(DEF-INSTANCE AUGSBURG\n (STATE MINNESOTA)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:13:10)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 420)\n (SAT MATH 490)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 85)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 1)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS PERFORMING-ARTS)\n)\n\n(DEF-INSTANCE VANDERBILT\n (STATE TENNESSEE)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:1:1)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 550)\n (SAT MATH 600)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 35)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 25)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-ALABAMA\n (STATE ALABAMA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:51:49)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 470)\n (SAT MATH 535)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n (ACADEMIC-EMPHASIS PRE-PROFESSIONAL)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE AUBURN\n (STATE ALABAMA)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:11:9)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 480)\n (SAT MATH 545)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n (ACADEMIC-EMPHASIS PRE-PROFESSIONAL)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEFPROP GEORGETOWN2 T DUPLICATE)\n(DEF-INSTANCE GEORGETOWN2\n (STATE DISTRICT-OF-COLUMBIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 620)\n (SAT MATH 635)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 10)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS FOREIGN-LANGUAGES)\n (ACADEMIC-EMPHASIS PHILOSOPHY)\n (ACADEMIC-EMPHASIS SOCIAL-STUDIES)\n (ACADEMIC-EMPHASIS MATH-AND-SCIENCE)\n (ACADEMIC-EMPHASIS NURSING)\n)\n\n(DEFPROP WASHINGTON-AND-LEE0 T DUPLICATE)\n(DEF-INSTANCE WASHINGTON-AND-LEE0\n (STATE VIRGINIA)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 560)\n (SAT MATH 590)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS FOREIGN-LANGUAGES)\n (ACADEMIC-EMPHASIS PHILOSOPHY)\n (ACADEMIC-EMPHASIS SOCIAL-STUDIES)\n)\n\n(DEFPROP RUTGERS3 T DUPLICATE)\n(DEF-INSTANCE RUTGERS3\n (STATE NEWJERSEY)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:40:1)\n (SAT VERBAL 460)\n (SAT MATH 510)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n)\n\n(DEF-INSTANCE QUEENS\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 450)\n (SAT MATH 450)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n)\n\n(DEF-INSTANCE BARUCH\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 450)\n (SAT MATH 400)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEFPROP RENSSELAER0 T DUPLICATE)\n(DEF-INSTANCE RENSSELAER0\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 570)\n (SAT MATH 690)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n% (ACADEMIC-EMPHASIS 50)%\n% (ACADEMIC-EMPHASIS HUND:5-10)%\n% (ACADEMIC-EMPHASIS 50)%\n)\n\n(DEF-INSTANCE NICHOLLS-STATE\n (STATE LOUISIANA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 90)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 100)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS PERFORMING-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-EDUCATION)\n)\n\n(DEFPROP BOSTON-COLLEGE0 T DUPLICATE)\n(DEF-INSTANCE BOSTON-COLLEGE0\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 507)\n (SAT MATH 555)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 55)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 20)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE TOURO\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 520)\n (SAT MATH 490)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 90)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS JUDAIC-STUDIES)\n)\n\n(DEF-INSTANCE EASTERN-MICHIGAN\n (STATE MICHIGAN)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:20:1)\n% (SAT VERBAL N/A)%\n% (SAT MATH N/A)%\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS MATH-AND-SCIENCE)\n)\n\n(DEFPROP SUNY-STONY-BROOK2 T DUPLICATE)\n(DEF-INSTANCE SUNY-STONY-BROOK2\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 480)\n (SAT MATH 560)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n)\n-------\n-------\n\nFrom [email protected] Mon Feb 22 20:53:02 1988\nReceived: from zodiac by meridian (5.52/4.7)\nReceived: from Jessica.Stanford.EDU by ads.com (5.58/1.9)\n id AA04539; Mon, 22 Feb 88 20:59:59 PST\nReceived: from Portia.Stanford.EDU by jessica.Stanford.EDU with TCP; Mon, 22 Feb\n 88 20:58:22 PST\nReceived: from columbia.edu (COLUMBIA.EDU.ARPA) by Portia.STANFORD.EDU\n(1.2/Ultrix2.0-B)\n id AA11480; Mon, 22 Feb 88 20:49:53 pst\nReceived: from CS.COLUMBIA.EDU by columbia.edu (5.54/1.14)\n id AA10186; Mon, 22 Feb 88 23:48:44 EST\nMessage-Id: <[email protected]>\nDate: Fri 22 Jan 88 02:50:00-EST\nFrom: The Mailer Daemon <[email protected]>\nTo: [email protected]\nSubject: Message of 18-Jan-88 20:13:54\nResent-Date: Mon 22 Feb 88 23:44:07-EST\nResent-From: Michael Lebowitz <[email protected]>\nResent-To: [email protected]\nResent-Message-Id: <[email protected]>\nStatus: R\n\nMessage undeliverable and dequeued after 3 days:\nsouders%[email protected]: Cannot connect to host\n ------------\nDate: Mon 18 Jan 88 20:13:54-EST\nFrom: Michael Lebowitz <[email protected]>\nSubject: bigger file part 3\nTo: souders%[email protected]\nIn-Reply-To: <[email protected]>\nMessage-ID: <[email protected]>\n\n(DEF-INSTANCE GEORGETOWN\n (STATE MARYLAND)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:45:55)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 600)\n (SAT MATH 620)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-HUMANITIES)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS HEALTH-MEDICINE)\n (ACADEMIC-EMPHASIS MATHEMATICS-AND-PHYSICAL-SCIENCES)\n)\n\n(DEFPROP UNIVERSITY-OF-MICHIGAN0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-MICHIGAN0\n (STATE MICHIGAN)\n (LOCATION SUBURBAN)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 530)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 55)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS APPLIED-SCIENCE)\n)\n\n(DEFPROP OREGON-STATE0 T DUPLICATE)\n(DEF-INSTANCE OREGON-STATE0\n (STATE OREGON)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 450)\n (SAT MATH 675)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 85)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-PITTSBURGH\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 480)\n (SAT MATH 530)\n (EXPENSES THOUS$:4-)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 55)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS LAW)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MISSOURI\n (STATE MISSOURI)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 494)\n (SAT MATH 529)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 65)\n (PERCENT-ENROLLED 65)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-KANSAS\n (STATE KANSAS)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 480)\n (SAT MATH 460)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS JOURNALISM)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-EVANSVILLE\n (STATE INDIANA)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 458)\n (SAT MATH 516)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 55)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS NURSING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-LOWELL\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 472)\n (SAT MATH 535)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS AMERICAN-STUDIES)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS DENTISTRY)\n)\n\n(DEF-INSTANCE OKLAHOMA-STATE-UNIVERSITY\n (STATE OKLAHOMA)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 470)\n (SAT MATH 470)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS SCIENCE)\n)\n\n(DEFPROP POLYTECHNIC-INSTITUTE-OF-NEWYORK1 T DUPLICATE)\n(DEF-INSTANCE POLYTECHNIC-INSTITUTE-OF-NEWYORK1\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:87:13)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 45)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n)\n\n(DEFPROP TUFTS0 T DUPLICATE)\n(DEF-INSTANCE TUFTS0\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS SCIENCE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEFPROP RUTGERS1 T DUPLICATE)\n(DEF-INSTANCE RUTGERS1\n (STATE NEWJERSEY)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:10:11)\n (STUDENT:FACULTY RATIO:22:1)\n (SAT VERBAL 461)\n (SAT MATH 507)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 10)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n (ACADEMIC-EMPHASIS MEDICINE)\n)\n\n(DEF-INSTANCE SETON-HALL\n (STATE NEWJERSEY)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:28:1)\n (SAT VERBAL 430)\n (SAT MATH 452)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 65)\n (PERCENT-ENROLLED 45)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS RELIGION)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n)\n\n(DEFPROP UNIVERSITY-OF-PITTSBURGH0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-PITTSBURGH0\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:100:1)\n (SAT VERBAL 550)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEF-INSTANCE BARNARD\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:0:100)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 630)\n (SAT MATH 610)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 20)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS ART-HISTORY)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEFPROP UNIVERSITY-OF-MASSACHUSETTS-AMHERST0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-MASSACHUSETTS-AMHERST0\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 480)\n (SAT MATH 510)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 20)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEF-INSTANCE FORDHAM\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 525)\n (SAT MATH 540)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 45)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS COMMUNICATIONS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEF-INSTANCE MARIST-COLLEGE\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 500)\n (SAT MATH 500)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 85)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEF-INSTANCE HAMPSHIRE-COLLEGE\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:65)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 550)\n (SAT MATH 540)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP HAMPSHIRE-COLLEGE0 T DUPLICATE)\n(DEF-INSTANCE HAMPSHIRE-COLLEGE0\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:65)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 550)\n (SAT MATH 540)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP FORDHAM1 T DUPLICATE)\n(DEF-INSTANCE FORDHAM1\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:1:1)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 525)\n (SAT MATH 550)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 85)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 65)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE ST-JOHNS-U\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:10:6)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 468)\n (SAT MATH 508)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS LAW)\n)\n\n(DEFPROP UNIVERSITY-OF-CHICAGO0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-CHICAGO0\n (STATE ILLINOIS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:5:1)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 617)\n (SAT MATH 639)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 25)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS HUMANITIES)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS PHYSICAL-SCIENCES)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEFPROP FORDHAM0 T DUPLICATE)\n(DEF-INSTANCE FORDHAM0\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:51:49)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 525)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 85)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS COMMUNICATIONS)\n (ACADEMIC-EMPHASIS LAW)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEF-INSTANCE NEWYORKIT\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 410)\n (SAT MATH 470)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEFPROP UNIVERSITY-OF-ROCHESTER0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-ROCHESTER0\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:8:1)\n% (SAT VERBAL NA) %\n% (SAT MATH NA) %\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 25)\n)\n\n(DEF-INSTANCE WAYNE-STATE-COLLEGE\n (STATE NEBRASKA)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:10:14)\n (STUDENT:FACULTY RATIO:20:1)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 5)\n (PERCENT-ADMITTANCE 100)\n (PERCENT-ENROLLED 75)\n)\n\n(DEFPROP VASSAR0 T DUPLICATE)\n(DEF-INSTANCE VASSAR0\n (STATE NEWYORK)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:10:14)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 574)\n (SAT MATH 604)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 30)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MISSISSIPPI\n (STATE MISSISSIPPI)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:10:1)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 75)\n)\n\n(DEF-INSTANCE REED\n (STATE OREGON)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 625)\n (SAT MATH 625)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS FRENCH)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEFPROP SUNY-ALBANY0 T DUPLICATE)\n(DEF-INSTANCE SUNY-ALBANY0\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:52:49)\n (STUDENT:FACULTY RATIO:19:1)\n (SAT VERBAL 522)\n (SAT MATH 596)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS SOCIAL-WELFARE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS SCIENCE)\n (ACADEMIC-EMPHASIS MATHEMATICS)\n (ACADEMIC-EMPHASIS HUMANITIES)\n)\n\n(DEFPROP SUNY-BINGHAMTON0 T DUPLICATE)\n(DEF-INSTANCE SUNY-BINGHAMTON0\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:36:44)\n (STUDENT:FACULTY RATIO:19:1)\n (SAT VERBAL 527)\n (SAT MATH 594)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 85)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 45)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n (ACADEMIC-EMPHASIS GENERAL-STUDIES)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS NURSING)\n)\n\n(DEF-INSTANCE LESLEY\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:20:80)\n% (STUDENT:FACULTY RATIO:?)%\n (SAT VERBAL 420)\n (SAT MATH 400)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-PUGET-SOUND\n (STATE WASHINGTON)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n% (STUDENT:FACULTY RATIO:?)%\n (SAT VERBAL 510)\n (SAT MATH 549)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEFPROP PURDUE0 T DUPLICATE)\n(DEF-INSTANCE PURDUE0\n (STATE INDIANA)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n% (STUDENT:FACULTY RATIO:?)%\n (SAT VERBAL 450)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-)\n% (PERCENT-FINANCIAL-AID N/A)%\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n)\n\n(DEFPROP NORTHWESTERN0 T DUPLICATE)\n(DEF-INSTANCE NORTHWESTERN0\n (STATE ILLINOIS)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 600)\n (SAT MATH 660)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ARTS-AND-HUMANITIES)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE EMORY\n (STATE GEORGIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:55:45)\n% (STUDENT:FACULTY RATIO:UNAVAILABLE)%\n (SAT VERBAL 550)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 35)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n)\n\n(DEFPROP YALE0 T DUPLICATE)\n(DEF-INSTANCE YALE0\n (STATE CONNECTICUT)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 615)\n (SAT MATH 660)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 25)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ARTS-AND-HUMANITIES)\n (ACADEMIC-EMPHASIS FOREIGN-LANGUAGES)\n (MASCOT BULLDOGS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-BRIDGEPORT\n (STATE CONNECTICUT)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:53:47)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 432)\n (SAT MATH 488)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 35)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 25)\n (ACADEMICS SCALE:1-5 1)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HEALTH-MEDICINE)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n)\n\n(DEF-INSTANCE BARD\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 560)\n (SAT MATH 520)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE DREW\n (STATE NEWJERSEY)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:52:48)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 535)\n (SAT MATH 553)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP GEORGETOWN0 T DUPLICATE)\n(DEF-INSTANCE GEORGETOWN0\n (STATE DISTRICT-OF-COLUMBIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:56:44)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 616)\n (SAT MATH 645)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE SWARTHMORE\n (STATE PENNSYLVANIA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:56:44)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 620)\n (SAT MATH 660)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE WESLEYAN\n (STATE CONNECTICUT)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 635)\n (SAT MATH 660)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEF-INSTANCE MOUNT-HOLYOKE\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:0:100)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 610)\n (SAT MATH 590)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEFPROP UNIVERSITY-OF-TEXAS1 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-TEXAS1\n (STATE TEXAS)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 485)\n (SAT MATH 540)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE TULANE\n (STATE LOUISIANA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 552)\n (SAT MATH 594)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE TEXAS-CHRISTIAN-UNIVERSITY\n (STATE TEXAS)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 515)\n (SAT MATH 515)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n-------\n-------\n\nFrom [email protected] Mon Feb 22 20:56:01 1988\nReceived: from zodiac by meridian (5.52/4.7)\nReceived: from Jessica.Stanford.EDU by ads.com (5.58/1.9)\n id AA04603; Mon, 22 Feb 88 21:02:43 PST\nReceived: from Portia.Stanford.EDU by jessica.Stanford.EDU with TCP; Mon, 22 Feb\n 88 21:01:04 PST\nReceived: from columbia.edu (COLUMBIA.EDU.ARPA) by Portia.STANFORD.EDU\n(1.2/Ultrix2.0-B)\n id AA11454; Mon, 22 Feb 88 20:49:18 pst\nReceived: from CS.COLUMBIA.EDU by columbia.edu (5.54/1.14)\n id AA10182; Mon, 22 Feb 88 23:48:06 EST\nMessage-Id: <[email protected]>\nDate: Fri 22 Jan 88 02:49:58-EST\nFrom: The Mailer Daemon <[email protected]>\nTo: [email protected]\nSubject: Message of 18-Jan-88 20:09:51\nResent-Date: Mon 22 Feb 88 23:44:01-EST\nResent-From: Michael Lebowitz <[email protected]>\nResent-To: [email protected]\nResent-Message-Id: <[email protected]>\nStatus: R\n\nMessage undeliverable and dequeued after 3 days:\nsouders%[email protected]: Cannot connect to host\n ------------\nDate: Mon 18 Jan 88 20:09:50-EST\nFrom: Michael Lebowitz <[email protected]>\nSubject: bigger file part 2\nTo: souders%[email protected]\nIn-Reply-To: <[email protected]>\nMessage-ID: <[email protected]>\n\n\n(DEF-INSTANCE OREGON-INSTITUTE-OF-TECHNOLOGY\n (STATE OREGON)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:3:1)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 404)\n (SAT MATH 443)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 15)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEF-INSTANCE SAN-JOSE-STATE\n (STATE CALIFORNIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 425)\n (SAT MATH 465)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS PUBLIC-AFFAIRS-AND-SERVICES)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-CHICAGO\n (STATE ILLINOIS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 620)\n (SAT MATH 640)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 20)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS MEDICINE)\n)\n\n(DEF-INSTANCE BRYN-MAWR\n (STATE PENNSYLVANIA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:0:100)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 640)\n (SAT MATH 610)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE OBERLIN\n (STATE OHIO)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 550)\n (SAT MATH 550)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS MUSIC)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS FINE-ARTS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-TEXAS\n (STATE TEXAS)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 300)\n (SAT MATH 300)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE CORPUS-CHRISTI-STATE-U\n (STATE TEXAS)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 250)\n (SAT MATH 250)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 1)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEFPROP UNIVERSITY-OF-PENNSYLVANIA0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-PENNSYLVANIA0\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:5:4)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 610)\n (SAT MATH 660)\n (EXPENSES THOUS$:7-10)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 50)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n% (ACADEMIC-EMPHASIS WHARTON)%\n)\n\n(DEF-INSTANCE VILLANOVA\n (STATE PENNSYLVANIA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:4:3)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 528)\n (SAT MATH 585)\n (EXPENSES THOUS$:4-7)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 40)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS NURSING)\n (ACADEMIC-EMPHASIS COMMERCE)\n)\n\n(DEF-INSTANCE GLASSBORO-STATE-COLLEGE\n (STATE NEWJERSEY)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:3:3)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 440)\n (SAT MATH 470)\n (EXPENSES THOUS$:4-)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 50)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS PERFORMING-ARTS)\n)\n\n(DEF-INSTANCE SAINT-ELIZABETHS\n (STATE NEWJERSEY)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:0:100)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 428)\n (SAT MATH 440)\n (EXPENSES THOUS$:4-7)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 50)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEF-INSTANCE JUILLIARD\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 15)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 1)\n (SOCIAL SCALE:1-5 1)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS MUSIC-PERFORMANCE)\n (ACADEMIC-EMPHASIS DANCE)\n (ACADEMIC-EMPHASIS DRAMA)\n)\n\n(DEF-INSTANCE EASTMAN-SCHOOL-OF-MUSIC\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 400)\n (SAT MATH 400)\n (SAT VERBAL 0)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 15)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS MUSIC-EDUCATION)\n (ACADEMIC-EMPHASIS MUSIC-PERFORMANCE)\n (ACADEMIC-EMPHASIS MUSIC-COMPOSITION)\n (ACADEMIC-EMPHASIS MUSIC)\n)\n\n(DEF-INSTANCE BUTLER\n (STATE INDIANA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 530)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEF-INSTANCE SYRACUSE\n (STATE NEWYORK)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 535)\n (SAT MATH 560)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS COMMUNICATIONS)\n (ACADEMIC-EMPHASIS VISUAL-AND-PERFORMING-ARTS)\n)\n\n(DEF-INSTANCE RUTGERS\n (STATE NEWJERSEY)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:47:53)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 550)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS PHYSICAL-SCIENCES)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MINNESOTA\n (STATE MINNESOTA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 490)\n (SAT MATH 557)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MARYLAND\n (STATE MARYLAND)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 468)\n (SAT MATH 529)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-HUMANITIES)\n)\n\n(DEF-INSTANCE NORTHWESTERN\n (STATE ILLINOIS)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 590)\n (SAT MATH 630)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 45)\n (PERCENT-ENROLLED 45)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS JOURNALISM)\n (ACADEMIC-EMPHASIS MUSIC)\n (ACADEMIC-EMPHASIS TECHNOLOGY)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEFPROP SYRACUSE0 T DUPLICATE)\n(DEF-INSTANCE SYRACUSE0\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 535)\n (SAT MATH 560)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 55)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS PUBLIC-COMMUNICATION)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n)\n\n(DEF-INSTANCE MICHIGAN-STATE\n (STATE MICHIGAN)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:25:1)\n (SAT VERBAL 450)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 10)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS VETERINARY-MEDICINE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS MEDICINE)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n (ACADEMIC-EMPHASIS TEACHER-EDUCATION)\n)\n\n(DEFPROP UNIVERSITY-OF-MICHIGAN1 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-MICHIGAN1\n (STATE MICHIGAN)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 580)\n (SAT MATH 660)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS FOREIGN-LANGUAGES)\n)\n\n(DEF-INSTANCE MONMOUTH-COLLEGE\n (STATE NEWJERSEY)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:48:52)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS MARKETING)\n)\n\n(DEF-INSTANCE CLARKSON-UNIVERSITY\n (STATE NEWYORK)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 535)\n (SAT MATH 640)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n)\n\n(DEFPROP CLARK-UNIVERSITY0 T DUPLICATE)\n(DEF-INSTANCE CLARK-UNIVERSITY0\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 550)\n (SAT MATH 580)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n (ACADEMIC-EMPHASIS GEOGRAPHY)\n (ACADEMIC-EMPHASIS ARTS)\n)\n\n(DEF-INSTANCE COLORADO-COLLEGE\n (STATE COLORADO)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:1:9)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 0)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS MINING)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n)\n\n(DEF-INSTANCE SUNY-PLATTSBURGH\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:2:3)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 20)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE CHALMERS-UNIVERSITY-OF-TECHNOLOGY\n (STATE FOREIGN)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:20:80)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 100)\n (ACADEMIC-EMPHASIS LOANS)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 90)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n)\n\n(DEF-INSTANCE GOTHENBURG-UNIVERSITY\n (STATE FOREIGN)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 100)\n (ACADEMIC-EMPHASIS LOANS)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS HUMANITIES)\n (ACADEMIC-EMPHASIS SCIENCE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP UNIVERSITY-OF-TULSA0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-TULSA0\n (STATE OKLAHOMA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 489)\n (SAT MATH 529)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-DENVER\n (STATE COLORADO)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 510)\n (SAT MATH 540)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n)\n\n(DEF-INSTANCE HOLY-CROSS\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 600)\n (SAT MATH 575)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 0)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS CLASSICS)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS PHYSICS)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n)\n\n(DEF-INSTANCE BUCKNELL\n (STATE PENNSYLVANIA)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 600)\n (SAT MATH 500)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 15)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS CLASSICS)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS FOREIGN-LANGUAGES)\n (ACADEMIC-EMPHASIS MATHEMATICS)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n (MASCOT BISON)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-THE-PACIFIC\n (STATE CALIFORNIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:48:52)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 560)\n (SAT MATH 550)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS TEACHER-EDUCATION)\n (ACADEMIC-EMPHASIS MUSIC)\n (ACADEMIC-EMPHASIS PHARMACY)\n)\n\n(DEFPROP UNIVERSITY-OF-THE-SOUTH0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-THE-SOUTH0\n (STATE TENNESSEE)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 540)\n (SAT MATH 560)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS AMERICAN-STUDIES)\n (ACADEMIC-EMPHASIS ASIAN//ORIENTAL-STUDIES)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-TOLEDO\n (STATE OHIO)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 450)\n (SAT MATH 450)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 95)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 1)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS MUSIC)\n (ACADEMIC-EMPHASIS PHARMACY)\n (ACADEMIC-EMPHASIS TEACHER-EDUCATION)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-THE-DISTRICT-OF-COLUMBIA\n (STATE DISTRICT-OF-COLUMBIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:44:56)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 500)\n (SAT MATH 510)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 95)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS TEACHER-EDUCATION)\n (ACADEMIC-EMPHASIS HISTORY)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-SOUTHDAKOTA\n (STATE SOUTHDAKOTA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:19:1)\n (SAT VERBAL ACT-21)\n (SAT MATH ACT-21)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 85)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS MATH-AND-SCIENCE)\n)\n\n(DEF-INSTANCE YANKTOWN-COLLEGE\n (STATE SOUTHDAKOTA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 450)\n (SAT MATH 400)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 95)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 95)\n (PERCENT-ENROLLED 90)\n (ACADEMICS SCALE:1-5 1)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEF-INSTANCE BAYLOR-UNIVERSITY\n (STATE TEXAS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:45:559)\n (STUDENT:FACULTY RATIO:21:1)\n (SAT VERBAL 485)\n (SAT MATH 521)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 65)\n (PERCENT-ENROLLED 75)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS PHILOSOPHY)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE DALLAS-BAPTIST-COLLEGE\n (STATE TEXAS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:19:1)\n (SAT VERBAL ACT-15)\n (SAT MATH ACT-15)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 100)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 1)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS ENGLISH)\n)\n\n(DEF-INSTANCE SUNY-BINGHAMTON\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 575)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n)\n\n(DEF-INSTANCE SUNY-BUFFALO\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 450)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ARCHITECTURE-AND-ENVIROMENTAL-DESIGN)\n (ACADEMIC-EMPHASIS ARTS-AND-LETTERS)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n (ACADEMIC-EMPHASIS NATURAL-SCIENCES)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE SUNY-ALBANY\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:19:1)\n (SAT VERBAL 525)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS PROFESSIONAL-STUDIES)\n)\n\n(DEF-INSTANCE OHIO-STATE\n (STATE OHIO)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 450)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 65)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ADMINISTRATIVE-SCIENCE)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS DENTISTRY)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS LAW)\n (ACADEMIC-EMPHASIS MEDICINE)\n (ACADEMIC-EMPHASIS OPTOMETRY)\n (ACADEMIC-EMPHASIS PHARMACY)\n (ACADEMIC-EMPHASIS SOCIAL-WORK)\n (ACADEMIC-EMPHASIS VETERINARY-MEDICINE)\n)\n\n(DEFPROP PENN-STATE1 T DUPLICATE)\n(DEF-INSTANCE PENN-STATE1\n (STATE PENNSYLVANIA)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-7)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 55)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n (ACADEMIC-EMPHASIS ARTS-AND-ARCHITECTURE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS EARTH-AND-MINERAL-SCIENCE)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HEALTH)\n (ACADEMIC-EMPHASIS PHYSICAL-EDUCATION)\n (ACADEMIC-EMPHASIS RECREATION)\n (ACADEMIC-EMPHASIS HUMAN-DEVELOPMENT)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS SCIENCE)\n)\n\n(DEFPROP RUTGERS0 T DUPLICATE)\n(DEF-INSTANCE RUTGERS0\n (STATE NEWJERSEY)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 475)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 55)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-WASHINGTON\n (STATE WASHINGTON)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 525)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 35)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 65)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n)\n\n(DEFPROP UNIVERSITY-OF-TEXAS0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-TEXAS0\n (STATE TEXAS)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 475)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 25)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 65)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n)\n\n(DEFPROP SUNY-STONY-BROOK0 T DUPLICATE)\n(DEF-INSTANCE SUNY-STONY-BROOK0\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ALLIED-HEALTH-PROFESSIONS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-NORTHCAROLINA\n (STATE NORTHCAROLINA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:3:4)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 510)\n (SAT MATH 552)\n (EXPENSES THOUS$:4-7)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS JOURNALISM)\n (ACADEMIC-EMPHASIS MEDICINE)\n (ACADEMIC-EMPHASIS NURSING)\n (ACADEMIC-EMPHASIS PHARMACY)\n (ACADEMIC-EMPHASIS PUBLIC-HEALTH)\n)\n\n(DEF-INSTANCE NORTHCAROLINA-STATE-UNIVERSITY\n (STATE NORTHCAROLINA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:9:4)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 466)\n (SAT MATH 538)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 0)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 0)\n (PERCENT-ENROLLED 0)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HUMANITIES)\n (ACADEMIC-EMPHASIS MATHEMATICS)\n (ACADEMIC-EMPHASIS TEXTILES)\n)\n\n(DEFPROP BRYN-MAWR0 T DUPLICATE)\n(DEF-INSTANCE BRYN-MAWR0\n (STATE PENNSYLVANIA)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:0:100)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS CLASSICS)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n (ACADEMIC-EMPHASIS GOVERNMENT)\n (ACADEMIC-EMPHASIS ROMANCE-LANGUAGES)\n)\n\n(DEF-INSTANCE WALLA-WALLA-COLLEGE\n (STATE WASHINGTON)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:54:46)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 400)\n (SAT MATH 400)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS INDUSTRIAL-ARTS)\n (ACADEMIC-EMPHASIS BIBLICAL-LANGUAGES)\n)\n\n(DEF-INSTANCE VASSAR\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:5:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS FILM-STUDIES)\n (ACADEMIC-EMPHASIS FINE-ARTS)\n (ACADEMIC-EMPHASIS CLASSICS)\n (ACADEMIC-EMPHASIS ENGLISH)\n)\n\n(DEF-INSTANCE COLLEGE-OF-NEWROCHELLE\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:0:100)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS SOCIAL-WORK)\n (ACADEMIC-EMPHASIS NURSING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MASSACHUSETTS-AMHERST\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 475)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 45)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEFPROP UNIVERSITY-OF-VIRGINIA0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-VIRGINIA0\n (STATE VIRGINIA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 575)\n (SAT MATH 625)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS COMMERCE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n)\n\n(DEFPROP SYRACUSE1 T DUPLICATE)\n(DEF-INSTANCE SYRACUSE1\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:52:48)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-NOTRE-DAME\n (STATE INDIANA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n (ACADEMIC-EMPHASIS PRE-MED)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n (ACADEMIC-EMPHASIS GOVERNMENT)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-COLORADO\n (STATE COLORADO)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS MOLECULAR-BIOLOGY)\n (ACADEMIC-EMPHASIS PHYSICS)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE COLORADO-SCHOOL-OF-MINES\n (STATE COLORADO)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 550)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEFPROP UNIVERSITY-OF-WASHINGTON0 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-WASHINGTON0\n (STATE WASHINGTON)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 525)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 35)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 65)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n)\n\n(DEFPROP SUNY-STONY-BROOK1 T DUPLICATE)\n(DEF-INSTANCE SUNY-STONY-BROOK1\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ALLIED-HEALTH-PROFESSIONS)\n)\n\n(DEF-INSTANCE WILLIAM-PATERSON-COLLEGE\n (STATE NEWJERSEY)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:1:1)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 475)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS FINE-ARTS)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS SCIENCE)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEFPROP OREGON-INSTITUTE-OF-TECHNOLOGY0 T DUPLICATE)\n(DEF-INSTANCE OREGON-INSTITUTE-OF-TECHNOLOGY0\n (STATE OREGON)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 460)\n (SAT MATH 480)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS HEALTH-SERVICES)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n)\n\n(DEF-INSTANCE ECOLE-NATIONALE-SUPERIEURE-DE-TELECOMMUNICATION-DE-PARIS\n (STATE FOREIGN)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:5:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 5)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n(DEF-INSTANCE ECOLE-POLYTECHNIQUE\n (STATE FOREIGN)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:90:10)\n (STUDENT:FACULTY RATIO:5:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 100)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 10)\n (PERCENT-ENROLLED 95)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS SCIENCE)\n)\n\n(DEF-INSTANCE UNIVERSITE-SAINT-JOSEPH\n (STATE FOREIGN)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:6:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 10)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS MEDICAL-SCHOOL)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS HUMANITIES)\n)\n\n(DEF-INSTANCE AMERICAN-UNIVERSITY-OF-BEIRUT\n (STATE FOREIGN)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:4:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS MEDICAL-SCHOOL)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HUMANITIES)\n)\n\n(DEFPROP POLYTECHNIC-INSTITUTE-OF-NEWYORK0 T DUPLICATE)\n(DEF-INSTANCE POLYTECHNIC-INSTITUTE-OF-NEWYORK0\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:90:10)\n (SAT VERBAL 500)\n (SAT MATH 600)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 90)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEFPROP PENN-STATE0 T DUPLICATE)\n(DEF-INSTANCE PENN-STATE0\n (STATE PENNSYLVANIA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (SAT VERBAL 510)\n (SAT MATH 570)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS MEDICINE)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-OREGON\n (STATE OREGON)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:51:49)\n (SAT VERBAL 475)\n (SAT MATH 515)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS LETTERS)\n (ACADEMIC-EMPHASIS PHYSICAL-SCIENCES)\n)\n\n(DEF-INSTANCE PURDUE\n (STATE INDIANA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:58:42)\n (SAT VERBAL 475)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE UNIVERSITY-WEST-VIRGINIA\n (STATE WESTVIRGINIA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n% (STUDENT:FACULTY N/A)%\n% (SAT VERBAL N/A)%\n% (SAT MATH N/A)%\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 95)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-HUMANITIES)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS MOUNTAINEERS)\n)\n\n(DEF-INSTANCE DEPAUL-UNIVERSITY\n (STATE ILLINOIS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 540)\n (SAT MATH 540)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 55)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-AND-MANAGEMENT)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS HEALTH-PROFESSIONS)\n (ACADEMIC-EMPHASIS INTERDISCIPLINARY-STUDIES)\n (MASCOT BLUE-DEVILS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-GEORGIA\n (STATE GEORGIA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n% (STUDENT:FACULTY N/A)%\n (SAT VERBAL 492)\n (SAT MATH 534)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-AND-MANAGEMENT)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n (ACADEMIC-EMPHASIS NATURAL-RESOURCES)\n (ACADEMIC-EMPHASIS COMMUNICATIONS)\n (ACADEMIC-EMPHASIS HOME-ECONOMICS)\n)\n-------\n-------\n\nFrom [email protected] Mon Feb 22 20:56:39 1988\nReceived: from zodiac by meridian (5.52/4.7)\nReceived: from Jessica.Stanford.EDU by ads.com (5.58/1.9)\n id AA04605; Mon, 22 Feb 88 21:03:07 PST\nReceived: from Portia.Stanford.EDU by jessica.Stanford.EDU with TCP; Mon, 22 Feb\n 88 21:01:26 PST\nReceived: from columbia.edu (COLUMBIA.EDU.ARPA) by Portia.STANFORD.EDU\n(1.2/Ultrix2.0-B)\n id AA11448; Mon, 22 Feb 88 20:48:42 pst\nReceived: from CS.COLUMBIA.EDU by columbia.edu (5.54/1.14)\n id AA10178; Mon, 22 Feb 88 23:47:21 EST\nMessage-Id: <[email protected]>\nDate: Fri 22 Jan 88 02:49:55-EST\nFrom: The Mailer Daemon <[email protected]>\nTo: [email protected]\nSubject: Message of 18-Jan-88 20:07:53\nResent-Date: Mon 22 Feb 88 23:43:29-EST\nResent-From: Michael Lebowitz <[email protected]>\nResent-To: [email protected]\nResent-Message-Id: <[email protected]>\nStatus: R\n\nMessage undeliverable and dequeued after 3 days:\nsouders%[email protected]: Cannot connect to host\n ------------\nDate: Mon 18 Jan 88 20:07:53-EST\nFrom: Michael Lebowitz <[email protected]>\nSubject: bigger file part 1\nTo: souders%[email protected]\nIn-Reply-To: <[email protected]>\nMessage-ID: <[email protected]>\n\n% This file contains all of the colleges instances in a cleaned form. Although%\n% there may be errors in the values there shouldn\'t be any typos.%\n% Note that there are multiple instances for some colleges - they can be %\n% found in the list $multiple-instances$%\n\n% U.Wolz, October 17, 1985 %\n\n\n(SETQ $multiple-instances$ \'(SUNY-STONY-BROOK BOSTON-COLLEGE RENSSELAER RUTGERS\nGEORGETOWN RUTGERS BENNINGTON BRANDEIS UNIVERSITY-OF-WASHINGTON\nUNIVERSITY-OF-ROCHESTER UNIVERSITY-OF-PITTSBURGH CLARK-UNIVERSITY GEORGETOWN\nBRANDEIS UNIVERSITY-OF-TEXAS GEORGETOWN YALE NORTHWESTERN SUNY-BINGHAMTON\nSUNY-ALBANY VASSAR UNIVERSITY-OF-ROCHESTER FORDHAM UNIVERSITY-OF-CHICAGO\nUNIVERSITY-OF-MASSACHUSETTS-AMHERST UNIVERSITY-OF-PITTSBURGH RUTGERS TUFTS\nPOLYTECHNIC-INSTITUTE-OF-NEWYORK OREGON-STATE UNIVERSITY-OF-MICHIGAN PENN-STATE\nPOLYTECHNIC-INSTITUTE-OF-NEWYORK OREGON-INSTITUTE-OF-TECHNOLOGY\nSUNY-STONEWYORK-BROOK UNIVERSITY-OF-WASHINGTON SYRACUSE UNIVERSITY-OF-VIRGINIA\nBRYN-MAWR UNIVERSITY-OF-TEXAS RUTGERS UNIVERSITY-OF-THE-SOUTH0\nUNIVERSITY-OF-TULSA0 CLARK-UNIVERSITY0 SYRACUSE0))\n\n(DEF-INSTANCE ADELPHI\n (STATE NEWYORK)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:30:70)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 475)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS BIOLOGY)\n)\n\n(DEF-INSTANCE ARIZONA-STATE\n (STATE ARIZONA)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 450)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS BUSINESS-EDUCATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n (ACADEMIC-EMPHASIS FINE-ARTS)\n)\n\n(DEF-INSTANCE BOSTON-COLLEGE\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (RELIGIOUS-BACKING CATHOLIC)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGLISH)\n)\n\n(DEF-INSTANCE BOSTON-UNIVERSITY\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 550)\n (SAT MATH 575)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE BROWN\n (STATE RHODEISLAND)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 625)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n)\n\n(DEF-INSTANCE CAL-TECH\n (STATE CALIFORNIA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 650)\n (SAT MATH 780)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 90)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 1)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE CARNEGIE-MELLON\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 600)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE CASE-WESTERN\n (STATE OHIO)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 550)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 85)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n)\n\n(DEF-INSTANCE CCNY\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL CITY)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:15:1)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS ELECTRICAL-ENGINEERING)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS BIOMED)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS NURSING)\n (ACADEMIC-EMPHASIS PERFORMING-ARTS)\n)\n\n(DEF-INSTANCE COLGATE\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:13:1)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGLISH)\n)\n\n(DEF-INSTANCE COLUMBIA\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 625)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE COOPER-UNION\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:6:1)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 35)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 65)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 1)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS FINE-ARTS)\n)\n\n(DEF-INSTANCE CORNELL\n (STATE NEWYORK)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 600)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS AGRICULTURE)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS HOTEL-ADMINISTRATION)\n (ACADEMIC-EMPHASIS HUMAN-ECOLOGY)\n (ACADEMIC-EMPHASIS INDUSTRIAL:LABOR-RELATIONS)\n)\n\n(DEF-INSTANCE DARTMOUTH\n (STATE NEWHAMPSHIRE)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 625)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 5)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE FLORIDA-TECH\n (STATE FLORIDA)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS MARINE-BIOLOGY)\n (ACADEMIC-EMPHASIS APPLIED-TECHNOLOGY)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE FLORIDA-STATE\n (STATE FLORIDA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 500)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n)\n\n(DEF-INSTANCE GEORGIA-TECH\n (STATE GEORGIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:20:1)\n (SAT VERBAL 525)\n (SAT MATH 625)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE HARVARD\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:65:35)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 700)\n (SAT MATH 675)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 80)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE HOFSTRA\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (SAT VERBAL 500)\n (SAT MATH 525)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE ILLINOIS-TECH\n (STATE ILLINOIS)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:90:10)\n (STUDENT:FACULTY RATIO:25:1)\n (SAT VERBAL 450)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 1)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE JOHNS-HOPKINS\n (STATE MARYLAND)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 625)\n (SAT MATH 675)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE MIT\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:75:25)\n (STUDENT:FACULTY RATIO:5:1)\n (SAT VERBAL 650)\n (SAT MATH 750)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 30)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS SCIENCE)\n (ACADEMIC-EMPHASIS ELECTRICAL-ENGINEERING)\n (ACADEMIC-EMPHASIS MECHANICAL-ENGINEERING)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MONTANA\n (STATE MONTANA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:65:35)\n (STUDENT:FACULTY RATIO:21:1)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS MINERAL-ENGINEERING)\n)\n\n(DEF-INSTANCE MORGAN-STATE\n (STATE MARYLAND)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:40:60)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 300)\n (SAT MATH 325)\n (EXPENSES THOUS$:4-)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n)\n\n(DEF-INSTANCE NEWJERSEY-TECH\n (STATE NEWJERSEY)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:90:10)\n (STUDENT:FACULTY RATIO:25:1)\n (SAT VERBAL 450)\n (SAT MATH 575)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 1)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n)\n\n(DEF-INSTANCE NYU\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 550)\n (SAT MATH 575)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS JOURNALISM)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n)\n\n(DEF-INSTANCE PRATT\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 425)\n (SAT MATH 475)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 1)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS ART-AND-DESIGN)\n (ACADEMIC-EMPHASIS ELECTRICAL-ENGINEERING)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n)\n\n(DEF-INSTANCE PRINCETON\n (STATE NEWJERSEY)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:65:35)\n (STUDENT:FACULTY RATIO:7:1)\n (SAT VERBAL 650)\n (SAT MATH 675)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE RENSSELAER\n (STATE NEWYORK)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 575)\n (SAT MATH 700)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n)\n\n(DEF-INSTANCE ROCHESTER-TECH\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:65:35)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 525)\n (SAT MATH 575)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n)\n\n(DEF-INSTANCE STANFORD\n (STATE CALIFORNIA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 625)\n (SAT MATH 675)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE STEVENS\n (STATE NEWJERSEY)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 500)\n (SAT MATH 625)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE TEMPLE\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 475)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ACCOUNTING)\n (ACADEMIC-EMPHASIS COMPUTER-SCIENCE)\n)\n\n(DEF-INSTANCE TEXAS-A&M\n (STATE TEXAS)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 475)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 20)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS MARINE-BIOLOGY)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-CALIFORNIA-BERKELEY\n (STATE CALIFORNIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 530)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-7)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS GOVERNMENT)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-CALIFORNIA-DAVIS\n (STATE CALIFORNIA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n (ACADEMIC-EMPHASIS ECONOMICS)\n)\n\n(DEF-INSTANCE UCLA\n (STATE CALIFORNIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 500)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS ENGLISH)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-CALIFORNIA-SAN-DIEGO\n (STATE CALIFORNIA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 550)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 25)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 65)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-CALIFORNIA-SANTA-CRUZ\n (STATE CALIFORNIA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 525)\n (SAT MATH 550)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MAINE\n (STATE MAINE)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 500)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-OKLAHOMA\n (STATE OKLAHOMA)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:20:1)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-PENNSYLVANIA\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 600)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS NURSING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-SAN-FRANCISCO\n (STATE CALIFORNIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 450)\n (SAT MATH 525)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n)\n\n(DEF-INSTANCE USC\n (STATE CALIFORNIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:18:1)\n (SAT VERBAL 475)\n (SAT MATH 525)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n)\n\n(DEF-INSTANCE WORCESTER\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:80:20)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 550)\n (SAT MATH 650)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE YALE\n (STATE CONNECTICUT)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:55:45)\n (STUDENT:FACULTY RATIO:5:1)\n (SAT VERBAL 675)\n (SAT MATH 675)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 20)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS HISTORY)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE PENN-STATE\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:13)\n (SAT VERBAL 620)\n (SAT MATH 680)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS LAW)\n (ACADEMIC-EMPHASIS MEDICAL)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEFPROP UNIVERSITY-OF-PITTSBURGH2 T DUPLICATE)\n(DEF-INSTANCE UNIVERSITY-OF-PITTSBURGH2\n (STATE PENNSYLVANIA)\n (LOCATION URBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 480)\n (SAT MATH 530)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:17+)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 55)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 5)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n (ACADEMIC-EMPHASIS LAW)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-MICHIGAN\n (STATE MICHIGAN)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:10:8)\n (STUDENT:FACULTY RATIO:15:1)\n (SAT VERBAL 540)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:13-17)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 50)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS AEROSPACE)\n (ACADEMIC-EMPHASIS CHEMICAL-ENGIREERING)\n (ACADEMIC-EMPHASIS IEOR)\n (ACADEMIC-EMPHASIS HUMANITIES)\n)\n\n(DEF-INSTANCE NORTHEASTERN\n (STATE MASSACHUSETTS)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:2:1)\n (STUDENT:FACULTY RATIO:22:1)\n% (SAT VERBAL N/A) %\n% (SAT MATH N/A) %\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 55)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 100)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS HUMANITIES)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n)\n\n(DEF-INSTANCE RICE\n (STATE TEXAS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:2:1)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 621)\n (SAT MATH 671)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 55)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS HUMANITIES)\n)\n\n\n(DEF-INSTANCE NOTRE-DAME\n (STATE INDIANA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:5:2)\n (STUDENT:FACULTY RATIO:12:1)\n (SAT VERBAL 570)\n (SAT MATH 640)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 60)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS HUMANITIES)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE NEWENGLAND-COLLEGE\n (STATE NEWHAMPSHIRE)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:1)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 590)\n (SAT MATH 590)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 95)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS SCIENCE )\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n\n(DEF-INSTANCE SUNY-STONY-BROOK\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:20+)\n (MALE:FEMALE RATIO:1:1)\n (STUDENT:FACULTY RATION:30:1)\n (SAT VERBAL 0)\n (SAT MATH 0)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 15)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 90)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n)\n\n(DEF-INSTANCE SUFFOLK-COMMUNITY-COLLEGE\n (STATE NEWYORK)\n (LOCATION SMALL-TOWN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:1:100)\n (STUDENT:FACULTY RATIO:25:1)\n (SAT VERBAL 500)\n (SAT MATH 500)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 15)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 95)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n)\n\n(DEF-INSTANCE CLARK-UNIVERSITY\n (STATE MASSACHUSETTS)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 550)\n (SAT MATH 576)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS PSYCHOLOGY)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-HARTFORD\n (STATE CONNECTICUT)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:45:55)\n (STUDENT:FACULTY RATIO:13:1)\n (SAT VERBAL 445)\n (SAT MATH 491)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE LEWIS-AND-CLARK\n (STATE OREGON)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n% (STUDENT:FACULTY RATIO:?)%\n (SAT VERBAL 530)\n (SAT MATH 550)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE BENNINGTON\n (STATE VERMONT)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:30:70)\n (STUDENT:FACULTY RATIO:9:1)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 70)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (ACADEMIC-EMPHASIS NATURAL-SCIENCES)\n (ACADEMIC-EMPHASIS MATHEMATICS)\n (ACADEMIC-EMPHASIS MUSIC)\n)\n\n(DEFPROP RICE0 T DUPLICATE)\n(DEF-INSTANCE RICE0\n (STATE TEXAS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:70:30)\n (STUDENT:FACULTY RATIO:9:1)\n (SAT VERBAL 650)\n (SAT MATH 650)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 5)\n (SOCIAL SCALE:1-5 4)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS PHILOSOPHY)\n (ACADEMIC-EMPHASIS MATH)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE SMITH\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:1:100)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 610)\n (SAT MATH 600)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 20)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS MATH-AND-SCIENCE)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-THE-SOUTH\n (STATE TENNESSEE)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 550)\n (SAT MATH 600)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 50)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS PHILOSOPHY)\n (ACADEMIC-EMPHASIS PERFORMING-ARTS)\n (ACADEMIC-EMPHASIS PRE-PROFESSIONAL)\n)\n\n(DEF-INSTANCE POLYTECHNIC-INSTITUTE-OF-NEWYORK\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:87:13)\n (STUDENT:FACULTY RATIO:8:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 80)\n (NO-APPLICANTS THOUS:7-10)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 45)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS MANAGEMENT)\n)\n\n(DEF-INSTANCE TUFTS\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 600)\n (SAT MATH 600)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 35)\n (PERCENT-ENROLLED 35)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS SCIENCE)\n (ACADEMIC-EMPHASIS ENGINEERING)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-VIRGINIA\n (STATE VIRGINIA)\n (LOCATION SMALL-CITY)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 585)\n (SAT MATH 625)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:10-13)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 20)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ARTS-AND-SCIENCES)\n (ACADEMIC-EMPHASIS ARCHITECTURE)\n (ACADEMIC-EMPHASIS COMMERCE)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS NURSING)\n)\n\n(DEF-INSTANCE WASHINGTON-AND-LEE\n (STATE VIRGINIA)\n (LOCATION SMALL-TOWN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:1:0)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 550)\n (SAT MATH 595)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS ART)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS CLASSICS)\n (ACADEMIC-EMPHASIS DRAMA)\n (ACADEMIC-EMPHASIS EAST-ASIAN-STUDIES)\n (ACADEMIC-EMPHASIS HUMANITIES)\n (ACADEMIC-EMPHASIS JOURNALISM)\n (ACADEMIC-EMPHASIS FOREIGN-LANGUAGES)\n (ACADEMIC-EMPHASIS MATHEMATICS)\n (ACADEMIC-EMPHASIS NATURAL-SCIENCES)\n (ACADEMIC-EMPHASIS RELIGION)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n\n(DEF-INSTANCE UNIVERSITY-OF-ROCHESTER\n (STATE NEWYORK)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:60:40)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 550)\n (SAT MATH 550)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE SAM-HOUSTON-STATE-UNIVERSITY\n (STATE TEXAS)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:10-15)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:30:1)\n (SAT VERBAL 400)\n (SAT MATH 400)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 80)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 2)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS APPLIED-SCIENCE)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS CRIMINAL-JUSTICE)\n (ACADEMIC-EMPHASIS EDUCATION)\n)\n\n(DEF-INSTANCE MANHATTANVILLE-COLLEGE\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:30:70)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 500)\n (SAT MATH 530)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 60)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS MUSIC)\n (ACADEMIC-EMPHASIS FINE-ARTS)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE SUNY-PURCHASE\n (STATE NEWYORK)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:35:65)\n (STUDENT:FACULTY RATIO:17:1)\n (SAT VERBAL 525)\n (SAT MATH 525)\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 5)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS VISUAL-AND-PERFORMING-ARTS)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE BRANDEIS\n (STATE MASSACHUSETTS)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:48:52)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 580)\n (SAT MATH 630)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 40)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 60)\n (PERCENT-ENROLLED 45)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS ECONOMICS)\n (ACADEMIC-EMPHASIS BIOLOGY)\n (ACADEMIC-EMPHASIS CHEMISTRY)\n (ACADEMIC-EMPHASIS PRE-MED)\n (ACADEMIC-EMPHASIS PRE-LAW)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE GEORGE-WASHINGTON\n (STATE DISTRICT-OF-COLUMBIA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:50:50)\n (STUDENT:FACULTY RATIO:14:1)\n (SAT VERBAL 530)\n (SAT MATH 560)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 75)\n (PERCENT-ENROLLED 30)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS POLITICAL-SCIENCE)\n (ACADEMIC-EMPHASIS INTERNATIONAL-AFFAIRS)\n)\n\n(DEF-INSTANCE ORAL-ROBERTS-UNIVERSITY\n (STATE OKLAHOMA)\n (LOCATION SUBURBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:1:1)\n (STUDENT:FACULTY RATIO:11:1)\n (SAT VERBAL 463)\n (SAT MATH 490)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 70)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 65)\n (PERCENT-ENROLLED 75)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 2)\n (QUALITY-OF-LIFE SCALE:1-5 3)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS FINE-AND-PERFORMING-ARTS)\n (ACADEMIC-EMPHASIS HEALTH-SCIENCE)\n (ACADEMIC-EMPHASIS MATH)\n (ACADEMIC-EMPHASIS PHILOSOPHY)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE UNIVERSITY-OF-TULSA\n (STATE OKLAHOMA)\n (LOCATION URBAN)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:3:2)\n (STUDENT:FACULTY RATIO:16:1)\n (SAT VERBAL 471)\n (SAT MATH 520)\n (EXPENSES THOUS$:4-)\n (PERCENT-FINANCIAL-AID 75)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 85)\n (PERCENT-ENROLLED 65)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS AREA-STUDIES)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS ENGLISH)\n (ACADEMIC-EMPHASIS FINE-AND-PERFORMING-ARTS)\n (ACADEMIC-EMPHASIS FOREIGN-LANGUAGES)\n (ACADEMIC-EMPHASIS MATH)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n)\n\n(DEF-INSTANCE CONNECTICUT-COLLEGE\n (STATE CONNECTICUT)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-)\n (MALE:FEMALE RATIO:35:65)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 550)\n (SAT MATH 575)\n (EXPENSES THOUS$:10+)\n (PERCENT-FINANCIAL-AID 30)\n (NO-APPLICANTS THOUS:4-)\n (PERCENT-ADMITTANCE 40)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 3)\n (QUALITY-OF-LIFE SCALE:1-5 4)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n)\n\n(DEF-INSTANCE LEHIGH-UNIVERSITY\n (STATE PENNSYLVANIA)\n (LOCATION SMALL-CITY)\n (CONTROL PRIVATE)\n (NO-OF-STUDENTS THOUS:5-10)\n (MALE:FEMALE RATIO:75:25)\n (STUDENT:FACULTY RATIO:10:1)\n (SAT VERBAL 550)\n (SAT MATH 650)\n (EXPENSES THOUS$:7-10)\n (PERCENT-FINANCIAL-AID 45)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 50)\n (PERCENT-ENROLLED 40)\n (ACADEMICS SCALE:1-5 4)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS ENGINEERING)\n (ACADEMIC-EMPHASIS PHYSICAL)\n)\n\n(DEF-INSTANCE OREGON-STATE\n (STATE OREGON)\n (LOCATION SUBURBAN)\n (CONTROL STATE)\n (NO-OF-STUDENTS THOUS:15-20)\n (MALE:FEMALE RATIO:80:60)\n (STUDENT:FACULTY RATIO:17:1)\n% (SAT VERBAL NA) %\n% (SAT MATH NA) %\n (EXPENSES THOUS$:4-7)\n (PERCENT-FINANCIAL-AID 65)\n (NO-APPLICANTS THOUS:4-7)\n (PERCENT-ADMITTANCE 90)\n (PERCENT-ENROLLED 70)\n (ACADEMICS SCALE:1-5 3)\n (SOCIAL SCALE:1-5 4)\n (QUALITY-OF-LIFE SCALE:1-5 2)\n (ACADEMIC-EMPHASIS EDUCATION)\n (ACADEMIC-EMPHASIS SOCIAL-SCIENCE)\n (ACADEMIC-EMPHASIS LIBERAL-ARTS)\n (ACADEMIC-EMPHASIS BUSINESS-ADMINISTRATION)\n (ACADEMIC-EMPHASIS MATHEMATICS)\n)\n\n\n===================================================\n\n\n(dfx def-instance (l)\n (tlet (instance (car l) f-list (cdr l))\n (cond ((or (null instance) (consp instance))\n (msg t instance " is not a valid instance name (must be an atom)"))\n (t (make:event instance)\n (push instance !instances)\n (:= (get instance \'features)\n (tfor (f in f-list)\n (when (cond ((or (atom f) (null (cdr f)))\n (msg t f " is not a valid feature "\n "(must be a 2 or 3 item list)") nil)\n ((consp (car f))\n (msg t (car f) " is not a valid feature "\n "name (must be an atom)") nil)\n ((and (cddr f) (consp (cadr f)))\n (msg t (cadr f) " is not a valid feature "\n "role (must be an atom)") nil)\n (t t)))\n (save (cond ((equal (length f) 3)\n (make:feature (car f) (cadr f) (caddr f)))\n (t (make:feature (car f) \'value (cadr f)))))))\n instance))))\n\n(set-if !instances nil)\n\n\n\n(dex run-uniq-colleges (l n)\n (tfor (sc in l)\n (when (cond ((ge (length *events-added*) n))\n ((not (get sc \'duplicate))\n (run-instance sc)\n~ (remprop sc \'features)\n nil)\n (t (remprop sc \'features) nil)))\n (stop)))\n'
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL From a local file Using the `!wget` command Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI. Fill Missing Values Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric Non-Numeric Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Wesley was here
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals. --- ---Assignment------ Still TODO:1. Add **labels** to all graphs2. Add **conclusions** to all graphs3. **Comment** code to explain less obvious syntax4. How to decide between using **mean** or **median** Preparing Data for Visualization and/or Analysis
###Code
#Global Imports
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np;
#link to database
car_mpg_db_address = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
###Output
_____no_output_____
###Markdown
Bringing in dataset Read CSV from linkMost familiar method
###Code
#Read from link
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data', names=column_headers)
print(df.shape)
df.head()
###Output
(398, 9)
###Markdown
Import dataframe using !curl
###Code
!curl -o auto-mpg.data https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data
df = pd.read_csv('auto-mpg.data')
print(df.shape)
df.head()
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 30286 100 30286 0 0 90676 0 --:--:-- --:--:-- --:--:-- 90676
(397, 1)
###Markdown
Upload data from local machine to ColabDefinitly my least favorite method. You need to go select the file everytime.
###Code
from google.colab import files
uploads = files.upload()
df = pd.read_csv('auto-mpg.data')
print(df.shape)
df.head()
###Output
_____no_output_____
###Markdown
Correcting Importing Anomolies Expected number of rows/columns Number of Observations (rows): 398Number of Features (columns): 8
###Code
#returns (rows, columns)
df.shape
###Output
_____no_output_____
###Markdown
The number of recognized columns is only 1 vs an expected 8. I'll look at the first few rows as an initial troubleshooting step.
###Code
#returns first 5(by default) rows of dataframe
df.head()
###Output
_____no_output_____
###Markdown
It appeares that all the features were loaded into the fist column. This happened because the values are not seperated by commas. Commas are the default delimitor, but we can specify that whitespaces should be used as the delimitor using **delim_whitespace=True**.Note: Other seperators could also be specified using **sep('delimitor')**, but in this case the last feature **car names** has values that contain spaces (e.g. "buick skylark 320"). **delim_whitespace=True** recognizes the quotes and will parse correctly.
###Code
!curl -o auto-mpg.data https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data
df = pd.read_csv('auto-mpg.data', delim_whitespace=True)
print(df.shape)
df.head()
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 30286 100 30286 0 0 90136 0 --:--:-- --:--:-- --:--:-- 90136
(397, 9)
###Markdown
Now there are 9 features. 1 More than UCIs description specifies, however looking under UCIs[ **Attribute Information**](https://archive.ics.uci.edu/ml/datasets/Auto+MPG) we can see that they do infact list 9 attributes or features meaning our features now parse correctly. Rows/Columns labeled correctly There are 1 rows fewer than expected 398. This is normal when the dataset does not include a column label row since pandas assumed the first row is the column headers. Fixing this is as simple as passing the **read_csv** function a **list** containing the propper column headers using the **names** arguement.
###Code
column_headers = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'model year', 'origin', 'car name']
!curl -o auto-mpg.data https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data
df = pd.read_csv('auto-mpg.data', delim_whitespace=True, names=column_headers)
print(df.shape)
df.head()
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
54 30286 54 16384 0 0 47766 0 --:--:-- --:--:-- --:--:-- 47627
100 30286 100 30286 0 0 88297 0 --:--:-- --:--:-- --:--:-- 88040
(398, 9)
###Markdown
NaN values correctly identifiedAccroding to the UCI metadata this dataset does have missing values. Checking for, and finding some, NaN values can't confirm that all NaNs were correctly parsed, but finding none (since we expect there are some) can indicate that there is a parsing issue that needs to be found.
###Code
df.isna().sum()
###Output
_____no_output_____
###Markdown
Now we can conclude there was an error parsing the NaN values. Let's see if a glace at the dataset reveals any clues.
###Code
df
###Output
_____no_output_____
###Markdown
Scrolling through I found a **'?'** in row 374, column '*horsepower*'To remedy this we can specify '?' as being a NaN value during the dataframe import process using the **na_values** arguement of **pd.read_csv**.
###Code
!curl -o auto-mpg.data https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data
df = pd.read_csv('auto-mpg.data', delim_whitespace=True, names=column_headers, na_values='?')
df.isna().sum()
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 30286 100 30286 0 0 90676 0 --:--:-- --:--:-- --:--:-- 90676
###Markdown
Now we can see that we have 6 NaNs all in the '*horsepower*' column. In the next step, I'll take some time to figure out an informed guess for what those values might have been.Note: In this specific case, there are few enough NaNs and the information is readily available enough that depending on your purposes it could well be worth just looking up the information. In the interest of the exercise however, I will impute the values. Cleaning Data
###Code
df.describe()
df.hist(column='horsepower')
###Output
_____no_output_____
###Markdown
**!!!Mean or Median?!?!?!**I am going to assign all NaN values the mean of the set.
###Code
df.fillna(np.mean(df['horsepower']), inplace=True)
###Output
_____no_output_____
###Markdown
Recheck for NaN values, this time expecting none.
###Code
df.isna().sum()
###Output
_____no_output_____
###Markdown
Now our dataframe is imported nominally, clean, and ready for analysis. Visualizing Data Scatterplot
###Code
df.plot.scatter('horsepower','mpg');
###Output
_____no_output_____
###Markdown
Histogram
###Code
df.hist(column='cylinders');
###Output
_____no_output_____
###Markdown
Density Plot
###Code
import seaborn as sns
sns.distplot(df['mpg']);
###Output
_____no_output_____
###Markdown
Pairplot
###Code
import seaborn as sns
sns.pairplot(df);
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup. Using an API NTS: Try this API. Free. No key. (Thanks Connor Sanderford)
###Code
pip install kaggle
from google.colab import files
uploads = files.upload()
!mkdir ~/.kaggle
!mv kaggle.json ~/.kaggle/
kaggle competitions download boston-housing
kaggle datasets list -s titanic
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Vera makes a change
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | head
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
# help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
import numpy as np
col_headers = ['name','landmass','zone','area','population','language','religion','bars','stripes','colours','red',
'green','blue','gold','white','black','orange','mainhue','circles','crosses','saltires','quarters',
'sunstars','crescent','triangle','icon','animate','text','topleft','botright']
flag_data = pd.read_csv(flag_data_url, header=None, names=col_headers)
flag_data.head()
flag_data['language'] = flag_data['language'].map({1: 'English', 2:'Spanish', 3:'French', 4:'German', 5:'Slavic', 6:'Other Indo-European', 7:'Chinese', 8:'Arabic', 9:'Japanese/Turkish/Finnish/Magyar', 10:'Others'})
flag_data.head()
# flag_data['language'] = np.where((flag_data['language'] == 1), 'English', 'No')
# flag_data.head()
link1 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions.csv'
link2 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_index.csv'
link3 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_header.csv'
df = pd.read_csv(link1, index_col=0)
print(df.shape)
df.head()
df.to_csv('drink_test.csv')
df.loc['Afghanistan']
df = pd.read_csv(link3, header = 3)
print(df.shape)
df.head()
###Output
(193, 7)
###Markdown
Loading from a local CSV to Google Colab Part 2 - Basic Visualizations
###Code
from google.colab import files
uploaded = files.upload()
###Output
_____no_output_____
###Markdown
Basic Data Visualizations Using Matplotlib
###Code
# Use for more complex
import matplotlib.pyplot as plt
# Scatter Plot semicolon - just graph
plt.scatter(df.beer_servings, df.wine_servings);
plt.xlabel('Beer Servings')
plt.ylabel('Wine Servings')
plt.show()
# Use for simple plots - uses matplotlib
df.plot.scatter('beer_servings','wine_servings');
# Histogram
plt.hist(df.total_litres_of_pure_alcohol, bins=20);
# Seaborn Pairplot
import seaborn as sns
sns.pairplot(df)
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values = ' ?')
print(df.shape)
df.head()
###Output
(32561, 15)
###Markdown
Fill Missing Values
###Code
df.isna().sum()
df.country.value_counts()
df.dropna(subset=['country'], inplace=True)
print(df.shape)
df.head()
df.mode().iloc[0]
# Use fillna
df = df.fillna
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# Las Vegas hotel data
import numpy as np
import pandas as pd
# Load the Las Vegas data from UCI
vegas_data_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00397/LasVegasTripAdvisorReviews-Dataset.csv'
vegas_data = pd.read_csv(vegas_data_url, sep=';')
print(vegas_data.shape)
vegas_data.head()
# Check for null values
vegas_data.isna().sum()
# Check a representative sample of column data
vegas_data['User country'].value_counts()
# Check a representative sample of column data
vegas_data['Hotel name'].value_counts()
vegas_data['Score'].value_counts()
# Since the data is clean I will add a column of null values
vegas_data['Score Ranking'] = np.nan
vegas_data.head()
# Fill the Score Ranking column with appropriate values
vegas_data['Score Ranking'] = np.where(vegas_data['Score'] > 3, 'High', 'Low')
vegas_data.head()
# Create a histogram/horizontal bar graph of the number of scores
import matplotlib.pyplot as plt
vegas_data['Score'].value_counts().plot(kind='barh')
plt.title('Las Vegas Hotel Reviews - Distribution of 1-5 Scores')
plt.xlabel('Number of Scores')
plt.ylabel('Score Values')
plt.show()
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
# Run this cell and select the kaggle.json file downloaded
# from the Kaggle account settings page.
from google.colab import files
files.upload()
# install the kaggle.json file
!pip install -q kaggle
# create the proper directory and move the kaggle.json file to it
!mkdir -p ~/.kaggle
!cp kaggle.json ~/.kaggle/
# change permissions
!chmod 600 /root/.kaggle/kaggle.json
# show available datasets
!kaggle datasets list
# download the selected dataset
!kaggle datasets download -d jealousleopard/goodreadsbooks
# unzip the selected zip file
from zipfile import ZipFile
file_name = 'goodreadsbooks.zip'
with ZipFile(file_name, 'r') as zip:
zip.printdir()
zip.extractall()
# load the books.csv data into the dataframe
import numpy as np
import pandas as pd
books_data = pd.read_csv('books.csv', error_bad_lines=False)
print(books_data.shape)
books_data.head()
###Output
(13714, 10)
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
import numpy as np
import pandas as pd
!pip install requests
api_key = 'ojND6ChEhU2t29F1DikvymV5n1SvL9zz'
tickers = ['AAPL']
sim_ids = []
for ticker in tickers:
request_url = f'https://simfin.com/api/v1/info/find-id/ticker/{ticker}?api-key={api_key}'
content = requests.get(request_url)
data = content.json()
print(data)
if "error" in data or len(data) < 1:
sim_ids.append(None)
else:
sim_ids.append(data[0]['simId'])
print(sim_ids)
# define time periods for financial statement data
statement_type = "bs"
time_periods = ["Q4"]
year_start = 2008
year_end = 2017
data = {}
for idx, sim_id in enumerate(sim_ids):
d = data[tickers[idx]] = {"Line Item": []}
if sim_id is not None:
for year in range(year_start, year_end + 1):
for time_period in time_periods:
period_identifier = time_period + "-" + str(year)
if period_identifier not in d:
d[period_identifier] = []
request_url = f'https://simfin.com/api/v1/companies/id/{sim_id}/statements/standardised?stype={statement_type}&fyear={year}&ptype={time_period}&api-key={api_key}'
content = requests.get(request_url)
statement_data = content.json()
# collect line item names once, they are the same for all companies with the standardised data
if len(d['Line Item']) == 0:
d['Line Item'] = [x['standardisedName'] for x in statement_data['values']]
if 'values' in statement_data:
for item in statement_data['values']:
d[period_identifier].append(item['valueChosen'])
else:
# no data found for time period
d[period_identifier] = [None for _ in d['Line Item']]
df = pd.DataFrame(data=d)
df.head()
df.isna().sum()
df.fillna(value=0)
df.describe()
df.dropna()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
import numpy as np
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
auto_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
!ls
###Output
--2018-11-06 20:43:02-- https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Resolving archive.ics.uci.edu (archive.ics.uci.edu)... 128.195.10.249
Connecting to archive.ics.uci.edu (archive.ics.uci.edu)|128.195.10.249|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 25936 (25K) [text/plain]
Saving to: ‘imports-85.data.2’
imports-85.data.2 100%[===================>] 25.33K --.-KB/s in 0.1s
2018-11-06 20:43:06 (182 KB/s) - ‘imports-85.data.2’ saved [25936/25936]
forestfires.csv forestfires.csv.2 imports-85.data.1 sample_data
forestfires.csv.1 imports-85.data imports-85.data.2
###Markdown
The data set came with no header, these are notes on the data set attributes to add column namesAttribute Information: Attribute: Attribute Range: ------------------ ----------------------------------------------- 1. symboling: -3, -2, -1, 0, 1, 2, 3. 2. normalized-losses: continuous from 65 to 256. 3. make: alfa-romero, audi, bmw, chevrolet, dodge, honda, isuzu, jaguar, mazda, mercedes-benz, mercury, mitsubishi, nissan, peugot, plymouth, porsche, renault, saab, subaru, toyota, volkswagen, volvo 4. fuel-type: diesel, gas. 5. aspiration: std, turbo. 6. num-of-doors: four, two. 7. body-style: hardtop, wagon, sedan, hatchback, convertible. 8. drive-wheels: 4wd, fwd, rwd. 9. engine-location: front, rear. 10. wheel-base: continuous from 86.6 120.9. 11. length: continuous from 141.1 to 208.1. 12. width: continuous from 60.3 to 72.3. 13. height: continuous from 47.8 to 59.8. 14. curb-weight: continuous from 1488 to 4066. 15. engine-type: dohc, dohcv, l, ohc, ohcf, ohcv, rotor. 16. num-of-cylinders: eight, five, four, six, three, twelve, two. 17. engine-size: continuous from 61 to 326. 18. fuel-system: 1bbl, 2bbl, 4bbl, idi, mfi, mpfi, spdi, spfi. 19. bore: continuous from 2.54 to 3.94. 20. stroke: continuous from 2.07 to 4.17. 21. compression-ratio: continuous from 7 to 23. 22. horsepower: continuous from 48 to 288. 23. peak-rpm: continuous from 4150 to 6600. 24. city-mpg: continuous from 13 to 49. 25. highway-mpg: continuous from 16 to 54. 26. price: continuous from 5118 to 45400.8. Missing Attribute Values: (denoted by "?") Attribute : Number of instances missing a value: 2. 41 6. 2 19. 4 20. 4 22. 2 23. 2 26. 4
###Code
#create a list of the data set column names
auto_col_names = ['symbol', 'normalized-losses', 'make', 'fuel-type',
'aspiration', 'num-of-doors', 'body-style', 'drive-wheels',
'engine-location', 'wheel-base','length', 'width','height',
'curb-weight', 'engine-type', 'num-of-cylinders',
'engine-size', 'fuel-system', 'bore', 'stroke',
'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg',
'highway-mpg', 'price']
#create a dataframe from the csv file, indicating that there is no header in
#the initial data and renaming the columns as in the list above
auto_data = pd.read_csv(auto_url, header=None, names=auto_col_names)
print(auto_data.shape)
auto_data.head()
auto_data.count()
#replace missing values '?' with 'NaN'
auto_data_replaced = auto_data.replace('?', np.nan)
auto_data_replaced.head()
#check NaN values in the dataframe
auto_data_replaced.isna().sum().sum()
auto_data_replaced.isna().sum()
#use forward fill method to impute the NaN values
auto_data_replaced.fillna(method='ffill')
#check the dataframe for NaN values again
auto_data_replaced.isna().sum()
#replace NaN values with 0
auto_data_replaced['normalized-losses'].replace(np.nan, 0, inplace=True)
auto_data_replaced['normalized-losses']
#replace all NaN values with 0 in numeric columns in the entire dataframe
numeric = auto_data_replaced.select_dtypes(include=[np.number, np.float])
for column in numeric:
auto_data_replaced.replace(np.nan, 0, inplace=True)
print(auto_data_replaced.head())
auto_data_replaced.isna().sum()
#Extracting csv files from a zip directory and saving them locally
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/00317/grammatical_facial_expression.zip
!unzip grammatical_facial_expression.zip
#browsing the extracted files
!ls
#create a dataframe from one of the downloaded locally and renamed csv files
#and get it's info
df = pd.read_csv('gfe')
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1062 entries, 0 to 1061
Data columns (total 1 columns):
0.0 0x 0y 0z 1x 1y 1z 2x 2y 2z 3x 3y 3z 4x 4y 4z 5x 5y 5z 6x 6y 6z 7x 7y 7z 8x 8y 8z 9x 9y 9z 10x 10y 10z 11x 11y 11z 12x 12y 12z 13x 13y 13z 14x 14y 14z 15x 15y 15z 16x 16y 16z 17x 17y 17z 18x 18y 18z 19x 19y 19z 20x 20y 20z 21x 21y 21z 22x 22y 22z 23x 23y 23z 24x 24y 24z 25x 25y 25z 26x 26y 26z 27x 27y 27z 28x 28y 28z 29x 29y 29z 30x 30y 30z 31x 31y 31z 32x 32y 32z 33x 33y 33z 34x 34y 34z 35x 35y 35z 36x 36y 36z 37x 37y 37z 38x 38y 38z 39x 39y 39z 40x 40y 40z 41x 41y 41z 42x 42y 42z 43x 43y 43z 44x 44y 44z 45x 45y 45z 46x 46y 46z 47x 47y 47z 48x 48y 48z 49x 49y 49z 50x 50y 50z 51x 51y 51z 52x 52y 52z 53x 53y 53z 54x 54y 54z 55x 55y 55z 56x 56y 56z 57x 57y 57z 58x 58y 58z 59x 59y 59z 60x 60y 60z 61x 61y 61z 62x 62y 62z 63x 63y 63z 64x 64y 64z 65x 65y 65z 66x 66y 66z 67x 67y 67z 68x 68y 68z 69x 69y 69z 70x 70y 70z 71x 71y 71z 72x 72y 72z 73x 73y 73z 74x 74y 74z 75x 75y 75z 76x 76y 76z 77x 77y 77z 78x 78y 78z 79x 79y 79z 80x 80y 80z 81x 81y 81z 82x 82y 82z 83x 83y 83z 84x 84y 84z 85x 85y 85z 86x 86y 86z 87x 87y 87z 88x 88y 88z 89x 89y 89z 90x 90y 90z 91x 91y 91z 92x 92y 92z 93x 93y 93z 94x 94y 94z 95x 95y 95z 96x 96y 96z 97x 97y 97z 98x 98y 98z 99x 99y 99z 1062 non-null object
dtypes: object(1)
memory usage: 8.4+ KB
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
#Upcoming space launches API
#this specific endpoint returns all launches with locations in the United States:
launch_data = pd.read_json('https://launchlibrary.net/1.3/location?countryCode=USA')
print(launch_data.shape)
launch_data.head()
print(launch_data.iloc[1])
###Output
(30, 4)
count 30
locations {'id': 2, 'name': 'Taiyuan, People's Republic ...
offset 0
total 36
Name: 1, dtype: object
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Timothy Hsu
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
# Assign data url to variable
mpg_data_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
# load data
df = pd.read_csv(mpg_data_url)
# inspect data
df.head()
# Data is missing headers, reload data with column names
df = pd.read_csv(mpg_data_url, header=None, names=['mpg', 'cylinders', 'displacement',
'horsepower', 'weight', 'acceleration',
'model year', 'origin', 'car name'])
# Inspect data
df.head()
# Data is combined into 1 column, reload data
df = pd.read_csv(mpg_data_url, sep='\s+', header=None, names=['mpg', 'cylinders', 'displacement',
'horsepower', 'weight', 'acceleration',
'model year', 'origin', 'car name'])
# Inspect data
df.head(50)
import numpy as np
df_nan_values = df.replace('?', np.NaN)
df_nan_values.head(50)
# Check for missing data
df_nan_values.isnull().sum()
print(df_nan_values.dtypes)
# Fill forward to replace nan values
df_no_nan = df_nan_values.fillna(method='ffill')
df_no_nan.head(50)
print(df_no_nan.dtypes)
print(df_no_nan.isnull().sum())
# Horsepower still showing as type object, cast feature as float
df_no_nan['horsepower'] = df_no_nan['horsepower'].astype(float)
# Check data types
df_no_nan.dtypes
# Use one hot encoding on column 'car name' to convert categorical data to boolean
df_cleaned = pd.get_dummies(df_no_nan, columns=['car name'])
df_cleaned.head()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
#Dataset selected from UCI.
#Find the file to download.
breastcancer_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer/breast-cancer.data'
#Create a dataframe.
import pandas as pd
breastcancer_df = pd.read_csv(breastcancer_data_url)
#Start to learn about the data we have.
print(breastcancer_df.shape)
breastcancer_df.head()
#Notice in looking at the shape of the dataframe that we only have 285 instances... The UCI website says we should have 286 instances.
#We can confirm that we should have 286 instances below.
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer/breast-cancer.data | wc
#In this case, we see that we don't have attribute/column names in our dataframe. So, let's find out what our column names should be and add those to our dataframe.
#What are the column names of the dataset? Take a look at the meta data below.
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer/breast-cancer.names
#Write the column names as a list.
column_names = ['class', 'age', 'menopause', 'tumor_size', 'inv_nodes', 'node_caps', 'deg_malig', 'breast', 'breast_quad', 'irradiat']
#Pass the column names into our dataframe. Be sure to save the updated dataframe.
breastcancer_df = pd.read_csv(breastcancer_data_url, names = column_names )
#Verify that column names are now in our dataframe.
breastcancer_df.head()
#Does our dataset have any missing values?
#We see that our dataset doesn't have any missing values.
breastcancer_df.isna().sum()
#Create basic visualizations of our dataset.
#Histogram example
breastcancer_df.deg_malig.hist(bins=5);
#Density plot example
breastcancer_df.deg_malig.plot.density();
#Seaborn Pairplot
import seaborn as sns
sns.set(style='ticks', color_codes=True)
graph = sns.pairplot(breastcancer_df)
#When trying to run some of these basic visualizations, I came to find that my data needs to be in more numeric form.
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
#!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
flag_names = ["name", "landmass", "zone", "area", "population", "language", "religion", "bars", "stripes", "colours", "red", "green", "blue", "gold", "white", "black", "orange", "mainhue", "circles", "crosses", "saltires", "quarters", "sunstars", "crescent", "triangle", "icon", "animate", "text", "topleft", "botright"]
import pandas as pd
flag_data = pd.read_csv(flag_data_url, header=None, names=flag_names )
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
computer_data_names =["vendor", "model", "myct", "mmin", "mmax", "cach", "chmin", "chmax", "prp", "erp"]
computer_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data", header=None, names = computer_data_names)
computer_data.head(), computer_data.isna().sum()
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
import requests
url = "https://api.spacexdata.com/v3/launches/"
r = requests.get(url)
data = r.json()
data[0]
#from pandas.io.json import json_normalize
#spacex = json_normalize(data[0])
df =pd.DataFrame(data)
df.head()
list_launch_site = df['launch_site'].tolist()
list_rocket = df['rocket'].tolist()
#did not help
def change_column_names(toplevel,dataframe_list):
names = dataframe_list.columns.values.tolist()
new = []
for i in names:
i = toplevel + i
new.append(i)
return new
df_launch_site = pd.DataFrame(list_launch_site)
df_rocket = pd.DataFrame(list_rocket)
df_1 = pd.concat([df, df_launch_site, df_rocket], axis=1).drop(columns = ['launch_site','rocket'])
df_1.head()
list_fairings = df_1['fairings'].tolist()
#df_fairings = pd.DataFrame(list_fairings)
len(list_fairings)
###Output
_____no_output_____
###Markdown
class notes
###Code
audio_data_url = "http://archive.ics.uci.edu/ml/machine-learning-databases/audiology/audiology.standardized.data"
audio_data = pd.read_csv(audio_data_url, header=None)
audio_data.head() , audio_data.tail()
audio_data = pd.read_csv(audio_data_url, header=None)
import numpy as np
audio_data.replace('?', np.nan, inplace=True)
audio_data.replace('f', False, inplace=True)
audio_data.replace('t', True, inplace=True)
audio_data.head()
audio_data_forward_fill = audio_data.fillna(method='ffill')
audio_data_forward_fill.head()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
col_headers = ['name','landmass','zone','area','population','language','religion','bars','stripes','colours','red',
'green','blue','gold','white','black','orange','mainhue','circles','crosses','saltires','quarters',
'sunstars','crescent','triangle','icon','animate','text','topleft','botright']
flag_data = pd.read_csv(flag_data_url, header=None, names=col_headers)
flag_data.head()
flag_data.mask['language']
1 = 'English'
2 = 'Spanish'
3 = 'French'
4 = 'German'
5 = 'Slavic'
6 = 'Other Indo-European'
7 = 'Chinese'
8 = 'Arabic'
flag_data['language'] = flag_data['language'].map[1='English', 2='Spanish', 3='French', 4='German', 5='Slavic', 6='Other Indo-European', 7='Chinese', 8='Arabic']
language = {
1 : 'English',
2 : 'Spanish',
3 : 'French',
4 : 'German',
5 : 'Slavic',
6 : 'ther Indo-European',
7 : 'Chinese',
8 : 'Arabic',
9 : 'Japanese/Turkish/Finnish/Magyar',
10 : 'Others'
}
flag_data1 = flag_data.copy()
flag_data1[5] = flag_data1[5].map(language)
flag_data1.head()
di = {1:"English", 2:"Spanish", 3:"French", 4:"German", 5:"Slavic", 6:"Other Indo-European",
7:"Chinese", 8:"Arabic", 9:"Japanese/Turkish/Finnish/Magyar", 10:"Others"}
flag_data['language'] = flag_data.replace({"language": di})
flag_data.head()
###Output
_____no_output_____
###Markdown
Reading other CSV's
###Code
link1 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions.csv'
link2 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_index.csv'
link3 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_header.csv'
df = pd.read_csv(link1)
print(df.shape)
df.head()
df = pd.read_csv(link2, index_col=0)
print(df.shape)
df.head()
df = pd.read_csv(link3, header=3) # Could also use "skiprows" instead of "header"
print(df.shape)
df.head()
###Output
(193, 7)
###Markdown
Loading from a local CSV to Google Colab
###Code
from google.colab import files
uploaded = files.upload()
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
plt.scatter(df.beer_servings, df.wine_servings)
plt.title('Wine Servings by Beer Servings')
plt.xlabel('Beer Servings')
plt.ylabel('Wine Servings')
plt.show()
# Histogram
plt.hist(df.total_litres_of_pure_alcohol, bins=20);
# Seaborn Density Plot
# Seaborn Pairplot
import seaborn as sns
sns.pairplot(df);
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values='?')
print(df.shape)
df.head()
df.isna().sum()
df.country.value_counts()
df.country.unique()
df.dropna(subset=['country'], inplace=True)
df.shape
df.head()
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
df.mode()
df.fillna(df.mode())
df.isna().sum()
df.head()
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from google.colab import files
uploaded = files.upload()
df = pd.read_csv('bridges.data.version1') # Storing cleveland CSV to pandas dataframe
print(df.shape)
df.head()
# Changing the names of the columns
col_headers = ['Identifier','River','Location','Year Erected','Purpose','Length','# of Lanes','Clear-G','T or D','Material','Span','Rel-L','Type']
bridgedf = pd.read_csv('bridges.data.version1', header=None, names=col_headers)
bridgedf.head(20)
bridgedf.isna().sum() # Checking for NaN -- appears that all missing values were filled in with "?"
bridgedf['Length'].value_counts()['?']
bridgedf['Length'].value_counts()['?']
bridgedf.describe() # Can't find median/mean for Length because NaN values are classified as "?"
# Turn values in Length to ints and turn ? to NaN
bridgedf['Length'] = pd.to_numeric(bridgedf['Length'], errors='coerce')
# Turn values in # of Lanes to ints and turn ? to NaN
bridgedf['# of Lanes'] = pd.to_numeric(bridgedf['# of Lanes'], errors='coerce')
bridgedf.head(20)
# Checking to make sure "?" values in # of Lanes were changed to NaN
bridgedf.tail()
# Replace all other "?" values with "NaN"
bridgedf = bridgedf.replace('?',np.NaN)
bridgedf.head(10)
bridgedf.describe()
bridgedf.isnull().sum()
bridgedf.tail()
# Length column: Fill in NaN values with mean -- mean and median appear to be the same
bridgedf['Length'] = bridgedf['Length'].fillna(bridgedf['Length'].mean())
# # of Lanes column: Fill in NaN values with mean
bridgedf['# of Lanes'] = bridgedf['# of Lanes'].fillna(bridgedf['# of Lanes'].mean())
# Other columns: Fill in NaN values with value above -- there are missing values at the bottom of the dataframe, but not at the top
bridgedf = bridgedf.fillna(method = 'ffill')
bridgedf.head(10)
# Check to make sure all NaN values were filled
bridgedf.isna().sum()
###Output
_____no_output_____
###Markdown
Scatterplot
###Code
# Since there are only 107 total values and 27 of those were filled with the mean, you can see a clear line where those values were filled in
plt.scatter(bridgedf['Year Erected'], bridgedf['Length'])
plt.title('Length of Bridges by Year Erected')bridgedf['Length']
plt.xlabel('Year Erected')
plt.ylabel('Length')
plt.show()
###Output
_____no_output_____
###Markdown
Histogram
###Code
plt.hist(bridgedf['Type'], stacked=True)
plt.title('Bridge Types Erected')
plt.xlabel('Bridge Type')
plt.ylabel('Number of Bridges')
bridgedf['Type'].hist;
###Output
_____no_output_____
###Markdown
Density Plot
###Code
sns.distplot(bridgedf['Length'], hist=True, kde=True, bins=int(50), color = 'green', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 3})
sns.set(rc={'figure.figsize':(20,10)});
###Output
_____no_output_____
###Markdown
Pairplot
###Code
??sns.pairplot()
sns.pairplot(bridgedf)
# It appears I can't specify the columns I want to use
sns.pairplot(bridgedf['Year Erected'],bridgedf['Length'],bridgedf['# of Lanes'])
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
def drive_copy():
"""Just want to test out Drive-GitHub saving workflow."""
pass
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Loading from a local CSV to Google Colab Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI. Fill Missing Values
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# I used archive.ics.uci.edu/ml/datasets/Breast+Cancer data
path = '/Users/ridleyleisy/Documents/lambda/unit_one/DS-Unit-1-Sprint-1-Dealing-With-Data/module2-loadingdata/'
dataset_path = path + 'breast-cancer.data'
dataset_name_path = path + 'breast-cancer.names'
# was going to try to find a pythonic way to grab the column data but fastest way is to manually input cols
names = pd.read_csv(dataset_name_path, error_bad_lines=False).reset_index()
cols = ['class','age','menopause','tumor-size','inv-nodes','node-caps','deg-malig','breast','breast-quad','irradiat']
df = pd.read_csv(dataset_path, header=None, names=cols)
# replacing the dreaded ? with nan
df.replace('?', np.NaN, inplace=True)
# splitting series based on - and keeping the higher bound then casting to int
df['age'] = df['age'].str.split('-').str[1].astype(int)
df['tumor-size'] = df['tumor-size'].str.split('-').str[1].astype(int)
df['inv-nodes'] = df['inv-nodes'].str.split('-').str[1].astype(int)
df.head()
df.dtypes
sns.scatterplot(df['tumor-size'],df['breast'])
sns.scatterplot(df['tumor-size'],df['breast-quad'])
sns.barplot(df['age'],df['tumor-size'])
plt.hist(df['age'])
sns.distplot(df['age'])
sns.pairplot(df)
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Loading from a local CSV to Google Colab
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot
# Histogram
# Seaborn Density Plot
# Seaborn Pairplot
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Histogram - Look familiar?
# Pandas Scatterplot
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# uploading abalone data from the databases and using the !curl
abalone_data = 'https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data
abalone = pd.read_csv(abalone_data)
# Making sure I have something
abalone.head()
# verifying the count
abalone.count()
abalone.isna().sum()
# filling in random headers and replacing with the names
abalone = pd.read_csv(abalone_data, header=None, names=('Sex','Length','Diameter','Height','Whole Weight','Shucked Weight','Viscera weight','Shell Weight','Rings'))
abalone.head()
abalone.count()
abalone.isna().sum()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
#I am reading from a car model - mpg dataset
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data', sep='\t')
print(df.shape)
df.head(5)
#The data isn't shaped correctly
#I noticed it wasn't all tab separated - I used stackoverflow to find the delim_whitespace=True argument. This counts both spaces and tabs as a delimiter
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data', delim_whitespace=True, header=None)
df.head()
df.columns
# I'd like to restructure the dataset so that car model is on the left hand side
# I'd also like to label the columns
labels1= ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year', 'Origin', 'Car Model']
df.columns = labels1
df.head(50)
#lets clean the data
df.isnull().sum()
#The dataset said it had missing values where I got it. After examining the dataset, it uses '?' to denote missing values
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data', delim_whitespace=True, header=None, na_values=['?'])
df.columns = labels1
df.isnull().sum()
#Horsepower is the only column with missing values - it is numeric and only 6 samples need to be filled. Filling them with the median should be optimal.
import numpy as np
df.fillna(df.Horsepower.median(), inplace=True)
df.isnull().sum()
df.head(10)
#Reformatted dataset, it is loaded and ready!
df = df[['Car Model', 'MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year', 'Origin']]
df.head(10)
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
cols = ['mpg',
'cylinders',
'displacement',
'horsepower',
'weight',
'acceleration',
'model_year',
'origin',
'car_name']
df = pd.read_csv(data_url, header=None, delim_whitespace=True, names=cols) # Use delim_whitespace because dataset is not separated by commas
df.head(15)
#help(pd.read_csv)
df.describe()
df.shape
import numpy as np
#pd.set_option('display.max_rows', 400) # setting options to display all 398 rows
df.replace('?', np.nan, inplace=True) # Replace '?' with NaN so I can calculate the mean of horsepower
df['horsepower'].head(400)
#help(df.replace)
df.dropna(inplace=True) # Drop rows with NaNs
df.describe()
import matplotlib.pyplot as plt
plt.scatter(df['weight'], df['mpg'])
plt.title('Weight by MPG Scatter Plot')
plt.xlabel('Weight')
plt.ylabel('MPG');
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
!pip install quandl
# https://www.quandl.com/tools/python
import quandl
quandl.ApiConfig.api_key='MpnaaJjBfmnG16L9faLs' # Set authentication key for Quandl
data = quandl.get("EIA/PET_RWTC_D", returns="numpy") # Get WTI Crude oil data from Quandl
data
plt.plot(data); # Plot simple stock chart
# Another API -- Blockchain
# https://github.com/blockchain/api-v1-client-python
!pip install blockchain
from blockchain import exchangerates
ticker = exchangerates.get_ticker()
btc_price = []
# Append btc_price list with 15 minute bitcoin price for every currency
for k in ticker:
price_per_curr = k, ticker[k].p15min
btc_price.append(price_per_curr)
col = ['Currency', '1 Bitcoin']
btc_df = pd.DataFrame(btc_price, columns=col) # Create dataframe with the list
btc_df
#help(pd.DataFrame)
# Extra bit I was playing with
#btc_amount = exchangerates.to_btc('USD', 4024.18)
#btc_amount
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
## Bash command is to run it from the machine?
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
## Take a look at header
import pandas as pd
flag_data = pd.read_csv(flag_data_url, header=None)
# For Extra Help: help(pd.read_csv)
# Step 3 - verify we've got *something*
## Missing headers as is
flag_data.head()
# How the column names are currently
#flag_data.columns
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
from google.colab import files
uploaded = files.upload()
df = pd.read_csv('imports-85.data', header=None, names=['symboling','norm_loss','make','fuel','aspiration','doors',
'bod_style','drv_wheels','eng_loc','wheel_base','length','width',
'height','curb_weight','engine','cylinders','engine_size',
'fuel_system','bore','stroke','compression','hp','peak_rpm',
'city_mpg','hgwy_mpg','price'])
df.head()
import numpy as np
#replacing NaN Values
df_cleaned = df.replace('?', np.NaN)
df_cleaned.head()
# Fill the numeric columns with the mean
# and Forward Fill the categorical columns
#df.dtypes # use to check things out
#df.isnull().sum() # to check what is null
#df.isna().sum() # check for NA Values
df.fillna(method='ffill')
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals. **Wine Data**
###Code
# Address the file to reference
wine_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
# Pull in the dataset
import pandas as pd
wine_data = pd.read_csv(wine_data_url, header=None)
# Checking out what the data looks like -- first 5 rows
wine_data.head()
# I don't like how I cannot see the names in the Header, so I will add in the names from the dataset.
# To find the names, aka attributes, I returned to the source and clicked on the tab "Names"
# This is the revised version of the data header
wine_data = pd.read_csv(wine_data_url, header=None, names=['Alcohol', 'Malic acid','Ash','Alcalinity of ash','Magnesium', 'Total phenols', 'Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue','OD280/OD315 of diluted wines','Proline' ])
wine_data.head()
wine_data.shape
wine_data.describe()
# Checking out the Data by index
#wine_data.sort_index()
from statistics import mean, median, mode
# Calculating Mean, Median and Mode
print("\n----------- Wine Data Mean -----------\n")
print(wine_data.mean())
print("\n----------- Wine Data Median -----------\n")
print(wine_data.median())
print("\n----------- Wine Data Mode -----------\n")
print(wine_data.mode())
###Output
----------- Wine Data Mean -----------
Alcohol 13.000618
Malic acid 2.336348
Ash 2.366517
Alcalinity of ash 19.494944
Magnesium 99.741573
Total phenols 2.295112
Flavanoids 2.029270
Nonflavanoid phenols 0.361854
Proanthocyanins 1.590899
Color intensity 5.058090
Hue 0.957449
OD280/OD315 of diluted wines 2.611685
Proline 746.893258
dtype: float64
----------- Wine Data Median -----------
Alcohol 13.050
Malic acid 1.865
Ash 2.360
Alcalinity of ash 19.500
Magnesium 98.000
Total phenols 2.355
Flavanoids 2.135
Nonflavanoid phenols 0.340
Proanthocyanins 1.555
Color intensity 4.690
Hue 0.965
OD280/OD315 of diluted wines 2.780
Proline 673.500
dtype: float64
----------- Wine Data Mode -----------
Alcohol Malic acid Ash Alcalinity of ash Magnesium Total phenols \
0 12.37 1.73 2.28 20.0 88.0 2.2
1 13.05 NaN 2.30 NaN NaN NaN
2 NaN NaN NaN NaN NaN NaN
Flavanoids Nonflavanoid phenols Proanthocyanins Color intensity Hue \
0 2.65 0.26 1.35 2.6 1.04
1 NaN 0.43 NaN 3.8 NaN
2 NaN NaN NaN 4.6 NaN
OD280/OD315 of diluted wines Proline
0 2.87 520.0
1 NaN 680.0
2 NaN NaN
###Markdown
**Adult Data-- Has Missing Values**
###Code
adult_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
adult_data = pd.read_csv(adult_data_url, header=None, names=['Age','WorkClass','Fnlwgt','Education','EducationNum','Marital-Status','Occupation','Relationship','Race','Sex','Capital-Gain','Capital-Loss','hrs-per-wk','Native-Country','class'])
# Check it out
adult_data.head()
# Looking at the types of data
adult_data.dtypes
# Change ? to NaN
import numpy as np
adult_clean = adult_data.replace('[?]', np.NaN, regex=True)
# Checking my work
#adult_clean
# Check if there are any NaN Values
adult_clean.isnull().sum()
# If I want to look at total amounnt
adult_clean.isnull().sum().sum()
# Fill in the NaN (Playing with different approaches)
# Use this method to Fill in your own Value
adult_robot = adult_clean.fillna('Robot')
# Forward Fill
adult_fill = adult_clean.ffill()
# Drop
adult_drop = adult_clean.dropna()
###Output
_____no_output_____
###Markdown
**Summary Statistics For Each Approach**
###Code
adult_robot.describe()
adult_fill.describe()
adult_drop.describe()
# Check NaN Values again (whichever method works best)
adult_robot.isnull().sum().sum()
adult_fill.isnull().sum().sum()
adult_drop.isnull().sum().sum()
# Calculate Mean, Median and Mode
print("\n----------- Fill Data Mean -----------\n")
print(adult_robot.mean())
print("\n----------- Forward Fill Data Mean -----------\n")
print(adult_fill.mean())
print("\n----------- Drop Na Mean -----------\n")
print(adult_drop.mean())
print("\n----------- Fill Data Median -----------\n")
print(adult_robot.median())
print("\n----------- Forward Data Median -----------\n")
print(adult_fill.median())
print("\n----------- Drop Na Median -----------\n")
print(adult_drop.median())
print("\n----------- Fill Data Mode -----------\n")
print(adult_robot.mode())
print("\n----------- Forward Data Mode -----------\n")
print(adult_fill.mode())
print("\n----------- Drop Na Mode -----------\n")
print(adult_drop.mode())
###Output
----------- Fill Data Mean -----------
Age 38.581647
Fnlwgt 189778.366512
EducationNum 10.080679
Capital-Gain 1077.648844
Capital-Loss 87.303830
hrs-per-wk 40.437456
dtype: float64
----------- Forward Fill Data Mean -----------
Age 38.581647
Fnlwgt 189778.366512
EducationNum 10.080679
Capital-Gain 1077.648844
Capital-Loss 87.303830
hrs-per-wk 40.437456
dtype: float64
----------- Drop Na Mean -----------
Age 38.437902
Fnlwgt 189793.833930
EducationNum 10.121312
Capital-Gain 1092.007858
Capital-Loss 88.372489
hrs-per-wk 40.931238
dtype: float64
----------- Fill Data Median -----------
Age 37.0
Fnlwgt 178356.0
EducationNum 10.0
Capital-Gain 0.0
Capital-Loss 0.0
hrs-per-wk 40.0
dtype: float64
----------- Forward Data Median -----------
Age 37.0
Fnlwgt 178356.0
EducationNum 10.0
Capital-Gain 0.0
Capital-Loss 0.0
hrs-per-wk 40.0
dtype: float64
----------- Drop Na Median -----------
Age 37.0
Fnlwgt 178425.0
EducationNum 10.0
Capital-Gain 0.0
Capital-Loss 0.0
hrs-per-wk 40.0
dtype: float64
----------- Fill Data Mode -----------
Age WorkClass Fnlwgt Education EducationNum Marital-Status \
0 36.0 Private 123011 HS-grad 9.0 Married-civ-spouse
1 NaN NaN 164190 NaN NaN NaN
2 NaN NaN 203488 NaN NaN NaN
Occupation Relationship Race Sex Capital-Gain Capital-Loss \
0 Prof-specialty Husband White Male 0.0 0.0
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN NaN NaN
hrs-per-wk Native-Country class
0 40.0 United-States <=50K
1 NaN NaN NaN
2 NaN NaN NaN
----------- Forward Data Mode -----------
Age WorkClass Fnlwgt Education EducationNum Marital-Status \
0 36.0 Private 123011 HS-grad 9.0 Married-civ-spouse
1 NaN NaN 164190 NaN NaN NaN
2 NaN NaN 203488 NaN NaN NaN
Occupation Relationship Race Sex Capital-Gain Capital-Loss \
0 Prof-specialty Husband White Male 0.0 0.0
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN NaN NaN
hrs-per-wk Native-Country class
0 40.0 United-States <=50K
1 NaN NaN NaN
2 NaN NaN NaN
----------- Drop Na Mode -----------
Age WorkClass Fnlwgt Education EducationNum Marital-Status \
0 36 Private 203488 HS-grad 9 Married-civ-spouse
Occupation Relationship Race Sex Capital-Gain Capital-Loss \
0 Prof-specialty Husband White Male 0 0
hrs-per-wk Native-Country class
0 40 United-States <=50K
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - From a URL (github or otherwise) - CSV upload method - !wget method- "Clean" a dataset using common Python libraries - Removing NaN values "Data Imputation"- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot (if we have time) Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags).
###Code
###Output
_____no_output_____
###Markdown
Lecture example - flag data
###Code
# Confirming sync with GitHub
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
###Output
_____no_output_____
###Markdown
Steps of Loading and Exploring a Dataset:- Find a dataset that looks interesting- Learn what you can about it - What's in it? - How many rows and columns? - What types of variables?- Look at the raw contents of the file- Load it into your workspace (notebook) - Handle any challenges with headers - Handle any problems with missing values- Then you can start to explore the data - Look at the summary statistics - Look at counts of different categories - Make some plots to look at the distribution of the data 3 ways of loading a dataset From its URL
###Code
###Output
_____no_output_____
###Markdown
From a local file
###Code
###Output
_____no_output_____
###Markdown
Using the `!wget` command
###Code
###Output
_____no_output_____
###Markdown
Part 2 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
###Output
_____no_output_____
###Markdown
Part 3 - Explore the Dataset: Look at "Summary Statistics Numeric
###Code
###Output
_____no_output_____
###Markdown
Non-Numeric
###Code
###Output
_____no_output_____
###Markdown
Look at Categorical Values Part 4 - Basic Visualizations (using the Pandas Library) Histogram
###Code
# Pandas Histogram
###Output
_____no_output_____
###Markdown
Density Plot (KDE)
###Code
# Pandas Density Plot
###Output
_____no_output_____
###Markdown
Scatter Plot
###Code
# Pandas Scatterplot
###Output
_____no_output_____
###Markdown
1. Preparing a Flag Dataset for Analysis Building upon the example covered in the lecture, I spent some time to complete the cleaning and preparation of the flag dataset for analysis. The below example demonstrates an approach to importing a dataset without a header. Further, Pandas' "pd.set_option" is used to prevent the displayed table's columns from being truncated
###Code
import pandas as pd
pd.set_option('display.max_columns', None)
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
df = pd.read_csv(flag_data_url, header=None, names=['names','landmass','zone','area',
'population','lagnauge','religion','bars',
'stripes','colours','red','green','blue',
'gold','white','black','orange','mainhue',
'circles','crosses','saltires','quarters',
'sunstars','cresent','triangle','icon',
'animate','text','topleft','botright'])
df.head()
###Output
_____no_output_____
###Markdown
This dataset contains no null values.
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
However, the dataset does contain several features of type string. In order to fully prepare this dataset for analysis, I will convert these features to be integer based with one hot encoding. Below, I verify that one hot encoding is feasible given the number of distinct strings contained within the feature (i.e. verified that one hot encoding will not create 100s of additional columns in the dataset).
###Code
df['mainhue'].value_counts()
###Output
_____no_output_____
###Markdown
In the below statement, I use panda's "get_dummies" to one hot encode three string based features ('mainhue', 'topleft', 'botright')
###Code
df = pd.get_dummies(df, columns=['mainhue', 'topleft', 'botright'], prefix=['mainhue','tl','br'])
df.head()
###Output
_____no_output_____
###Markdown
2. Building a Decision Tree to Predict Student's Chance of Admittance to University Imported dataset from via file upload (originally foudn on Kaggle).
###Code
from google.colab import files
uploaded = files.upload()
###Output
_____no_output_____
###Markdown
Included instructions for 'read_csv' to remove extra spacing from header names (sep='\s*,\s*'). This was necessary because the dataset's headers contained extra spaces which made accessing them extremely tedious/frustrating.
###Code
import pandas as pd
df = pd.read_csv('Admission_Predict_Ver1.1.csv', sep='\s*,\s*', header=0, engine='python')
df.head()
###Output
_____no_output_____
###Markdown
The dataset has no null values.
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
All of the dataset's features are numeric which makes life easy!
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
A quick verification that none of the columns has a blank value.
###Code
df.describe()
###Output
_____no_output_____
###Markdown
This dataset naturally lends itself to attempting to model the probabiltiy that a given student will be accepted to the university they are applying to. In order, to increase the reliability of the model's prediction, in the below statements I reduce the specificity of the "Chance of Admit" column (rounded to nearest 10%; 10%, 20%, 30%, etc.)
###Code
df['Chance of Admit'] = df['Chance of Admit'] * 10
df['Chance of Admit'] = df['Chance of Admit'].astype(int)
df.head()
###Output
_____no_output_____
###Markdown
Using the 'train_test_split' function, our dataset is divided into a training and test set.
###Code
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, random_state=0)
train.shape, test.shape
A Decision Tree model is constructed based on a number of features with the target column set to "Chance of Admit"
features = ['GRE Score', 'TOEFL Score', 'University Rating', 'SOP', 'LOR', 'CGPA', 'Research']
target = 'Chance of Admit'
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
model = DecisionTreeClassifier(max_depth=4)
model.fit(train[features], train[target])
###Output
_____no_output_____
###Markdown
The below accuracy scores demonstrate that are model is somewhat predictive and that the dropoff between the Train and Test accuracy scores is reasonable (although not ideal). Note, to fine-tune the accuracy scores, I adjusted the included features and the max_depth of the decision tree. I found that a large max_depth resulted in over-fitting to the training data--and a very low test accuracy score.
###Code
#Train Accuracy
y_true = train[target]
y_pred = model.predict(train[features])
print('Train Accuracy:', accuracy_score(y_true, y_pred))
#Test Accuracy
y_true = test[target]
y_pred = model.predict(test[features])
print('Test Accuracy:', accuracy_score(y_true, y_pred))
###Output
Train Accuracy: 0.6746666666666666
Test Accuracy: 0.6
###Markdown
Below is an interesting plot which shows the relative importance of each feature in the model. By far the most influential variable is a student's GPA (from prior instituations).
###Code
import matplotlib.pyplot as plt
pd.Series(model.feature_importances_, features).plot.barh()
plt.title('Decision Tree Feature Importances')
###Output
_____no_output_____
###Markdown
Finally, the decision tree model can be used to predict a hypothetical students chance of admittance to a specific university based on their educational background.
###Code
import numpy as np
features = [[337, 118, 4, 4.5, 4.5, 9.65, 1]]
prediction = model.predict(np.asarray(features))
print('Predicted Rating: ' + str(prediction))
###Output
Predicted Rating: [9]
###Markdown
3. Cleaning a Real Estate Data Set To get more intensive data cleaning practice, I used a toy real estate data set (source). The first step was uploading the file to colab.
###Code
from google.colab import files
uploaded = files.upload()
###Output
_____no_output_____
###Markdown
I then imported the dataset and reviewed it with ".head()". Note, a simpler way to approach data cleaning is to attempt to convert non-NaN null values to NaN (e.g., 'na', ''--'', 'n/a'). This can be done by adding the following to the 'read_csv' statement: 'na_values= 'na', '--',' n/a''
###Code
import pandas as pd
df = pd.read_csv('property data.txt')
df.head(15)
###Output
_____no_output_____
###Markdown
As shown, there are a number of null values across the features.
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 9 entries, 0 to 8
Data columns (total 7 columns):
PID 8 non-null float64
ST_NUM 7 non-null float64
ST_NAME 9 non-null object
OWN_OCCUPIED 8 non-null object
NUM_BEDROOMS 7 non-null object
NUM_BATH 8 non-null object
SQ_FT 8 non-null object
dtypes: float64(2), object(5)
memory usage: 584.0+ bytes
###Markdown
The below statements, use a mix of 'fillna', 'ffill', and 'replace' to swap null/inappropriate values for sensible values.
###Code
df['NUM_BEDROOMS'] = df['NUM_BEDROOMS'].fillna(1)
df['NUM_BEDROOMS'] = df['NUM_BEDROOMS'].replace('na', 3)
df['OWN_OCCUPIED'] = df['OWN_OCCUPIED'].replace('12', 'Y')
df['OWN_OCCUPIED'] = df['OWN_OCCUPIED'].fillna('Y')
df['ST_NUM'] = df['ST_NUM'].ffill()
df['PID'] = df['PID'].ffill()
df['NUM_BATH'] = df['NUM_BATH'].replace('HURLEY', 1)
df['NUM_BATH'] = df['NUM_BATH'].fillna(1)
df['SQ_FT'] = df['SQ_FT'].replace('--', 1000)
df['SQ_FT'] = df['SQ_FT'].fillna(1000)
###Output
_____no_output_____
###Markdown
The final step in preparing this dataset is to convert the "ST_NAME" column to an int-based feature with one hot encoding.
###Code
df = pd.get_dummies(df, columns=['ST_NAME'], prefix=['STREET'])
df.head(10)
###Output
_____no_output_____
###Markdown
4. Preparing a Forest Fires Dataset For AnalysisTo illustrate dataset importing and cleaning, I will work through the publically available Forest Fires dataset (hosted by UCI). This dataset contains information related to forest fires in the northeast region of Portugal. Our objective is to prepare the dataset for regression models which will aim to predict the burned area of forets firs, in northeast Portugal.
###Code
import numpy as np
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv')
df.head()
###Output
_____no_output_____
###Markdown
A quick check reveals that the dataset does not have any missing/null values.
###Code
#Check to see if we are missing values
df.isnull().sum()
df.describe()
###Output
_____no_output_____
###Markdown
For the most part the dataset is composed of features that are either ints or floats. In the next set of activities, I will convert the two object features (month, day) into model-interpretable features with one-hot encoding.
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
One hot encoding, uses Pandas' "get_dummies" function to create a new column in the dataframe for each unique value within the original feature's column. For example, "get_dummies" converts the "month" column into 12 distinct columns (1 per month). Each row within each column is designated as either 0 (False) or 1 (True). One important detail to remeber is that the "get_dummies" function returns a modified dataframe.
###Code
df = pd.get_dummies(df, columns=['month'])
df.head()
pd.get_dummies(df, columns=['day']).head()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
!wget wine_url
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# Wine data
# results of a chemical analysis ofwines grown in the same region in Italy
# but derived from three different cultivars.
# https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
import pandas as pd
wine_url = ('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data')
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
# Inserting columns
cols = [
'Alcohol',
'Malic_acid',
'Ash',
'Alcalinity_of_ash',
'Magnesium',
'Total_phenols',
'Flavanoids',
'Nonflavanoid_phenols',
'Proanthocyanins',
'Color_intensity',
'Hue',
'Diluted_wines',
'Proline'
]
df_wine = pd.read_csv(wine_url, header=None, names=cols)
df_wine.head(8)
df_wine.count()
df_wine.describe
df_wine.describe()
df_wine.corr()
df_wine.shape
df_wine.isnull().sum()
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. I suggset image, text, or (public) API - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
api_url = 'https://random.dog/woof.json'
# using command line tool
!curl https://random.dog/woof.json
# in python, using library called "Requests"
import requests
import json
response = requests.get(api_url)
print(response)
response.text
#dir(response)
# this will give the output as a string
#def get_dog():
#response = requests.get('https://random.dog/woof.json')
#return response.text
# but we want dict - to see the url value of the dog (Import library called json)
def get_dog():
response = requests.get('https://random.dog/woof.json')
return json.loads(response.text)
get_dog()
type (get_dog())
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading, Cleaning and Visualizing DataObjectives for today:- Load data from multiple sources into a Python notebook - !curl method - CSV upload method- Create basic plots appropriate for different data types - Scatter Plot - Histogram - Density Plot - Pairplot- "Clean" a dataset using common Python libraries - Removing NaN values "Interpolation" Part 1 - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | head
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
?pd.read_csv
??pd.read_csv
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1...
###Code
col_headers = ['name','landmass','zone','area','population','language','religion','bars','stripes','colours','red',
'green','blue','gold','white','black','orange','mainhue','circles','crosses','saltires','quarters',
'sunstars','crescent','triangle','icon','animate','text','topleft','botright']
flag_data = pd.read_csv(flag_data_url, header=None, names=col_headers)
flag_data.head()
flag_data['language'] = flag_data['language'].map({1: 'English', 2:'Spanish', 3:'French', 4:'German', 5:'Slavic', 6:'Other Indo-European', 7:'Chinese', 8:'Arabic', 9:'Japanese/Turkish/Finnish/Magyar', 10:'Others'})
flag_data.head()
#This is also a way to do it:
# di = {1:"English", 2:"Spanish", 3:"French", 4:"German", 5:"Slavic", 6:"Other Indo-European",
# 7:"Chinese", 8:"Arabic", 9:"Japanese/Turkish/Finnish/Magyar", 10:"Others"}
# flag_data['language'] = flag_data.replace({"language": di})
# flag_data.head()
flag_data['language'].value_counts()
###Output
_____no_output_____
###Markdown
Reading other CSVs
###Code
link1 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions.csv'
link2 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_index.csv'
link3 = 'https://raw.githubusercontent.com/BJanota11/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module2-loadingdata/drinks_with_regions_header.csv'
df = pd.read_csv(link1)
print(df.shape)
df.head()
df.to_csv('test.csv')
df = pd.read_csv(link2, index_col=0)
# df - pd.read_csv(link2, usercols=range(1,8)) <--- this will give the same thing
# df = df.drop(df.columns[0], axis=1) <--- this will drop the first column
print(df.shape)
df.head()
df = pd.read_csv(link3)
print(df.shape)
df.head()
# The file had 3 lines of non-data items that ended up being read in.
df= pd.read_csv(link3, header=3)
# df= pd.read_csv(link3, skiprows=3) <--- another method to do same thing
print(df.shape)
df.head()
###Output
(193, 7)
###Markdown
Loading from a local CSV to Google Colab
###Code
# one way is to directly load files into google colab files directory
from google.colab import files
uploaded = files.upload()
###Output
_____no_output_____
###Markdown
Part 2 - Basic Visualizations Basic Data Visualizations Using Matplotlib
###Code
import matplotlib.pyplot as plt
# Scatter Plot in Matplotlib
plt.scatter(df['beer_servings'], df['wine_servings'])
plt.xlabel('beer_servings')
plt.ylabel('wine_servings')
plt.show()
#Scatter Plot with Pandas
df.plot.scatter('beer_servings', 'wine_servings');
# Histogram (matplotlib)
plt.hist(df['total_litres_of_pure_alcohol'], bins = 20)
# can use ; to get rid of array, but it has info on distribution in array
#Pandas histogram
df['total_litres_of_pure_alcohol'].hist(bins=20)
# Seaborn Density Plot
# Seaborn Pairplot
import seaborn as sns
sns.pairplot(df)
###Output
_____no_output_____
###Markdown
Create the same basic Visualizations using Pandas
###Code
# Pandas Scatterplot
df.plot.scatter('beer_servings', 'wine_servings')
# Pandas Scatter Matrix - Usually doesn't look too great.
###Output
_____no_output_____
###Markdown
Part 3 - Deal with Missing Values Diagnose Missing ValuesLets use the Adult Dataset from UCI.
###Code
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv')
print(df.shape)
df.head()
df.isna().sum() #not all missing values are represeted by NAs and NaNs
df['country'].value_counts()
# We change it by adding na_values to read_csv
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=' ?')
print(df.shape)
df.head()
df.isna().sum()
df['country'].unique() # this pointed us to see that there were spaces preceding many strings, so we updated na_values above to ' ?' instead of '?'
df.dropna(inplace=True)
# this does the same as --- df = df.dropna()
df.shape
df.dropna(subset=['country'], inplace=True)
df.shape
df.isna().sum()
df['workclass'].value_counts()
###Output
_____no_output_____
###Markdown
Fill Missing Values
###Code
df.mode().iloc[0]
df = df.fillna(df.mode().iloc[0]) #this replaces each column with the mode
df.isna().sum()
df['occupation'].value_counts()
df['country'].value_counts()
###Output
_____no_output_____
###Markdown
Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar semi-clean source. You don't want the data that you're working with for this assignment to have any bigger issues than maybe not having headers or including missing values, etc.After you have chosen your dataset, do the following:- Import the dataset using the method that you are least comfortable with (!curl or CSV upload). - Make sure that your dataset has the number of rows and columns that you expect. - Make sure that your dataset has appropriate column names, rename them if necessary. - If your dataset uses markers like "?" to indicate missing values, replace them with NaNs during import.- Identify and fill missing values in your dataset (if any) - Don't worry about using methods more advanced than the `.fillna()` function for today.- Create one of each of the following plots using your dataset - Scatterplot - Histogram - Density Plot - Pairplot (note that pairplots will take a long time to load with large datasets or datasets with many columns)If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck!).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
###Output
_____no_output_____
###Markdown
Refugees in the United States 2006 to 2015 Cleaning the Data
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('https://raw.githubusercontent.com/lechemrc/Datasets-to-ref/master/refugee_statistics.csv')
print(df.shape)
df.head()
df.isna().sum()
df['Spouses'].unique()
df = pd.read_csv('https://raw.githubusercontent.com/lechemrc/Datasets-to-ref/master/refugee_statistics.csv', na_values=['-', 'D', 'X'])
df2 = pd.read_csv('https://raw.githubusercontent.com/lechemrc/Datasets-to-ref/master/refugee_status.csv', index_col=0,na_values=['-', 'D', 'X'])
print(df.shape)
df.head()
# trying to decide which set would be more interesting... also when the first df
# takes place, as in which year
print(df2.shape)
df2.head()
df2.isna().sum()
df2['2006'].unique()
# df2['2006'].plot.bar()
#not sure yet what's going wrong here, but I'll figure it out. Keeping for learning purposes.
df2
df2_fill = df2.bfill(axis=1)
df2_fill
# I started with back fill on the row axis to make sure that it was decently consistent
# from country to country, instead of from above or below when numbers varied wildly.
df2_fill.isna().sum()
df2_fill = df2_fill.ffill(axis=1)
df2_fill
# I then went to forward fill on the row axis for the same reason as above
df2_fill.isna().sum()
df2_fill = df2_fill.fillna(value=0)
df2_fill
# I decided to fill countries (only one in this case) with no data with 0
# to be consistent with the lack of information. This way the data isn't skewed
# by a country with no data
df2_fill.isna().sum()
print(df2_fill.dtypes)
df2_fill.info()
# df2_fill = pd.concat([pd.DataFrame([pd.to_numeric(df2_fill[e],errors='coerce') \
# for e in df2_fill.columns if e not in ['Continent/Country of Nationality']]).T,\
# df2_fill[['Continent/Country of Nationality']]],axis=1)
# df2_fill.info()
# # this is code I adapted from a stack overflow question. I have an idea of how it
# # is working, but there are a couple things I have questions on. It clearly did
# # what I was hoping, since it turned the columns of years into float64 instead
# # of objects I was unable to use in plotting.
numeric = df2_fill.columns.tolist()
print(numeric)
# this was to create a list with the column titles to do the next step
df2_fill[numeric] = df2_fill[numeric].apply(lambda x: pd.to_numeric(x.astype(str)
.str.replace(',',''), errors='coerce'))
df2_fill
# this changed the objects into numbers, since it was being thrown off by the
# commas in the numbers, which pandas didn't recognize
###Output
_____no_output_____
###Markdown
Displaying the data
###Code
# df.set_index('Continent/Country of Nationality')
df2_fill['2006'][0:-1].plot.bar(figsize=(25,8), alpha=0.7)
df2_fill['2015'][0:-1].plot.bar(figsize=(25,8), color='orange', alpha=0.5)
plt.show()
# I'm struggling to list the countries/continents for the xticks instead of index
# update: got it!
df2_fill[0:7].plot.bar(figsize=(15,6));
df2_fill.iloc[0]
cc = 1
row = df2_fill.iloc[cc]
row.plot.bar(title=df2_fill.index[cc])
row.plot.line();
sns.pairplot(df2_fill)
# this really doesn't give me easy readable data
# sns.lineplot(x=df2_fill.iloc[0][:], y=row)
df2_fill.iloc[0][:]
df2_fill.iloc[0][0]
###Output
_____no_output_____
###Markdown
Affirmative Asylum Statistics in the United States Cleaning Data
###Code
asylum = pd.read_csv('https://raw.githubusercontent.com/lechemrc/Datasets-to-ref/master/affirmative_asylum.csv', index_col=0, na_values=['-', 'D'])
print(asylum.shape)
asylum.head()
asylum.isna().sum()
asylum # visualizing where the NaNs are
asylum = asylum.bfill(axis=1)
asylum
asylum = asylum.ffill(axis=1)
asylum.isna().sum()
print(asylum.info())
asylum.dtypes
nums = asylum.columns.tolist()
print(nums)
asylum[nums] = asylum[nums].apply(lambda x: pd.to_numeric(x.astype(str)
.str.replace(',',''), errors='coerce'))
asylum.dtypes
###Output
_____no_output_____
###Markdown
Visualizing the data
###Code
asylum['2006'][0:-1].plot.bar(figsize=(25,6), alpha=0.7);
asylum['2007'][0:-1].plot.bar(figsize=(25,6), color='orange', alpha=0.5);
asylum['2006'][0:7].plot.bar(figsize=(10,6), alpha=0.7);
asylum['2007'][0:7].plot.bar(figsize=(10,6), color='orange', alpha=0.5);
asylum[0:7].plot.bar(figsize=(12,6), title="Affirmative Asylum by Continent from 2006-2015");
# plt.bar(asylum[0:7])
plt.ylabel('Affirmative Asylum Numbers')
plt.tight_layout()
val = 1
single = asylum.iloc[val]
single.plot.bar(title=asylum.index[val], color='orange')
single.plot.line(color='gray');
###Output
_____no_output_____
###Markdown
Stretch Goals - Other types and sources of dataNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.If you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.Overall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.How does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.One last major source of data is APIs: https://github.com/toddmotto/public-apisAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as "somebody else's database" - you have (usually limited) access.*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.
###Code
!pip install spotipy
from tqdm import tqdm
tqdm.pandas()
import pandas as pd
import json
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
cid ="367d1c06f500433b9f3202d79d3eb8a9"
secret = "9ddba8a64fe84f528143d99b65ea94f5"
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
artist_name = []
track_name = []
track_id = []
popularity = []
for i in range(0,10000,50):
track_results = sp.search(q='year:2019', type='track', limit=50,offset=i)
for i, j in enumerate(track_results['tracks']['items']):
artist_name.append(j['artists'][0]['name'])
track_name.append(j['name'])
track_id.append(j['id'])
popularity.append(j['popularity'])
df = pd.DataFrame(list(zip(artist_name, track_name, track_id, popularity)),
columns=['Artist Name', 'Track Name', 'Track ID', 'Popularity'])
df.head()
df['Artist Name'].value_counts()
for i in range(0,10000,50):
track_results = sp.search(q='year:2019', type='track', limit=50,offset=i)
for i, j in enumerate(track_results['tracks']['items']):
artist_name.append(j['artists'][0]['name'])
track_name.append(j['name'])
track_id.append(j['id'])
popularity.append(j['popularity'])
df2 = pd.DataFrame(list(zip(artist_name, track_name, track_id, popularity)),
columns=['Artist Name', 'Track Name', 'Track ID', 'Popularity'])
df2.head()
###Output
_____no_output_____
###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
import numpy as np
from google.colab import files
uploaded = files.upload()
# adding header names that was not provided in the raw data
headers = ["age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
"occupation", "relationship", "race", "sex", "capital-gain", "capital-loss",
"hours-per-week", "native-country", "salary"]
df = pd.read_csv("adult.data.txt", header=None, names=headers)
# I am not interested in capital gains or capital losses for this case. they contain a lot of the missing data or zeroes. So I removed them from the dataframe
# I want to look at. Also There are '?' in the native-country column which i want to replace with 'unknown'
df_cleaned = df.drop(['capital-gain', 'capital-loss'], axis=1)
df_cleaned = df_cleaned.replace(' ?',' Unknown')
obj_df = df_cleaned.select_dtypes(include=['object']).copy()
# since salary only has two options '>=50k' or '<=50k', I will label encode it and add a column that will represent 1 as '>=50k' and 0 as '<=50k'
obj_df["salary"] = obj_df["salary"].astype('category')
obj_df.dtypes
obj_df['salary_cat'] = obj_df['salary'].cat.codes
# I will add cat codes column of the salary to the cleaned dataframe
df_cleaned['salary_cat'] = obj_df['salary_cat']
df_cleaned.head(20)
###Output
_____no_output_____ |
Amazon Augmented AI (A2I) and Comprehend DetectSentiment.ipynb | ###Markdown
Amazon Augmented AI (Amazon A2I) integration with Amazon Comprehend [Example] Visit https://github.com/aws-samples/amazon-a2i-sample-jupyter-notebooks for all A2I Sample Notebooks 1. [Introduction](Introduction)2. [Prerequisites](Prerequisites) 2. [Workteam](Workteam) 3. [Permissions](Notebook-Permission)3. [Client Setup](Client-Setup)4. [Create Control Plane Resources](Create-Control-Plane-Resources) 1. [Create Human Task UI](Create-Human-Task-UI) 2. [Create Flow Definition](Create-Flow-Definition)5. [Starting Human Loops](Scenario-1-:-When-Activation-Conditions-are-met-,-and-HumanLoop-is-created) 1. [Wait For Workers to Complete Task](Wait-For-Workers-to-Complete-Task) 2. [Check Status of Human Loop](Check-Status-of-Human-Loop) 3. [View Task Results](View-Task-Results) IntroductionAmazon Augmented AI (Amazon A2I) makes it easy to build the workflows required for human review of ML predictions. Amazon A2I brings human review to all developers, removing the undifferentiated heavy lifting associated with building human review systems or managing large numbers of human reviewers. You can create your own workflows for ML models built on Amazon SageMaker or any other tools. Using Amazon A2I, you can allow human reviewers to step in when a model is unable to make a high confidence prediction or to audit its predictions on an on-going basis. Learn more here: https://aws.amazon.com/augmented-ai/In this tutorial, we will show how you can use **Amazon A2I with AWS Comprehend's Detect Sentiment API.**For more in depth instructions, visit https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-getting-started.html To incorporate Amazon A2I into your human review workflows, you need three resources:* A **worker task template** to create a worker UI. The worker UI displays your input data, such as documents or images, and instructions to workers. It also provides interactive tools that the worker uses to complete your tasks. For more information, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-instructions-overview.html* A **human review workflow**, also referred to as a flow definition. You use the flow definition to configure your human workforce and provide information about how to accomplish the human review task. You can create a flow definition in the Amazon Augmented AI console or with Amazon A2I APIs. To learn more about both of these options, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html* A **human loop** to start your human review workflow. When you use one of the built-in task types, the corresponding AWS service creates and starts a human loop on your behalf when the conditions specified in your flow definition are met or for each object if no conditions were specified. When a human loop is triggered, human review tasks are sent to the workers as specified in the flow definition.When using a custom task type, as this tutorial will show, you start a human loop using the Amazon Augmented AI Runtime API. When you call StartHumanLoop in your custom application, a task is sent to human reviewers. Install Latest SDK
###Code
# First, let's get the latest installations of our dependencies
!pip install --upgrade pip
!pip install boto3 --upgrade
!pip install -U botocore
###Output
_____no_output_____
###Markdown
SetupWe need to set up the following data:* `region` - Region to call A2I* `bucket` - A S3 bucket accessible by the given role * Used to store the sample images & output results * Must be within the same region A2I is called from* `role` - The IAM role used as part of StartHumanLoop. By default, this notebook will use the execution role* `workteam` - Group of people to send the work to
###Code
# Region
REGION = '<REGION>'
###Output
_____no_output_____
###Markdown
Setup Bucket and Paths
###Code
import boto3
import botocore
BUCKET = '<YOUR_BUCKET>'
OUTPUT_PATH = f's3://{BUCKET}/a2i-results'
###Output
_____no_output_____
###Markdown
Role and PermissionsThe AWS IAM Role used to execute the notebook needs to have the following permissions:* ComprehendFullAccess* SagemakerFullAccess* S3 Read/Write Access to the BUCKET listed above* AmazonSageMakerMechanicalTurkAccess (if using MechanicalTurk as your Workforce)
###Code
from sagemaker import get_execution_role
# Setting Role to the default SageMaker Execution Role
ROLE = get_execution_role()
display(ROLE)
###Output
_____no_output_____
###Markdown
Workteam or Workforce A workforce is the group of workers that you have selected to label your dataset. You can choose either the Amazon Mechanical Turk workforce, a vendor-managed workforce, or you can create your own private workforce for human reviews. Whichever workforce type you choose, Amazon Augmented AI takes care of sending tasks to workers. When you use a private workforce, you also create work teams, a group of workers from your workforce that are assigned to Amazon Augmented AI human review tasks. You can have multiple work teams and can assign one or more work teams to each job. To create your Workteam, visit the instructions here: https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management.htmlAfter you have created your workteam, replace YOUR_WORKTEAM_ARN below
###Code
WORKTEAM_ARN= "<YOUR_WORKTEAM>"
###Output
_____no_output_____
###Markdown
Visit: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-permissions-security.html to add the necessary permissions to your role Client Setup Here we are going to setup the rest of our clients.
###Code
import io
import json
import uuid
import time
import boto3
import botocore
# Amazon SageMaker client
sagemaker = boto3.client('sagemaker', REGION)
# Amazon Comprehend client
comprehend = boto3.client('comprehend', REGION)
# Amazon Augment AI (A2I) client
a2i = boto3.client('sagemaker-a2i-runtime')
s3 = boto3.client('s3', REGION)
###Output
_____no_output_____
###Markdown
Comprehend helper method
###Code
# Will help us parse Detect Sentiment API responses
def capsToCamel(all_caps_string):
if all_caps_string == 'POSITIVE':
return 'Positive'
elif all_caps_string == 'NEGATIVE':
return 'Negative'
elif all_caps_string == 'NEUTRAL':
return 'Neutral'
###Output
_____no_output_____
###Markdown
Create Control Plane Resources Create Human Task UICreate a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required.Below we've provided a simple demo template that is compatible with AWS Comprehend's Detect Sentiment API input and response.For over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis
###Code
template = r"""
<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<crowd-form>
<crowd-classifier
name="sentiment"
categories="['Positive', 'Negative', 'Neutral', 'Mixed']"
initial-value="{{ task.input.initialValue }}"
header="What sentiment does this text convey?"
>
<classification-target>
{{ task.input.taskObject }}
</classification-target>
<full-instructions header="Sentiment Analysis Instructions">
<p><strong>Positive</strong> sentiment include: joy, excitement, delight</p>
<p><strong>Negative</strong> sentiment include: anger, sarcasm, anxiety</p>
<p><strong>Neutral</strong>: neither positive or negative, such as stating a fact</p>
<p><strong>Mixed</strong>: when the sentiment is mixed</p>
</full-instructions>
<short-instructions>
Choose the primary sentiment that is expressed by the text.
</short-instructions>
</crowd-classifier>
</crowd-form>
"""
def create_task_ui():
'''
Creates a Human Task UI resource.
Returns:
struct: HumanTaskUiArn
'''
response = sagemaker.create_human_task_ui(
HumanTaskUiName=taskUIName,
UiTemplate={'Content': template})
return response
# Task UI name - this value is unique per account and region. You can also provide your own value here.
taskUIName = 'ui-comprehend-' + str(uuid.uuid4())
# Create task UI
humanTaskUiResponse = create_task_ui()
humanTaskUiArn = humanTaskUiResponse['HumanTaskUiArn']
print(humanTaskUiArn)
###Output
_____no_output_____
###Markdown
Creating the Flow Definition In this section, we're going to create a flow definition definition. Flow Definitions allow us to specify:* The workforce that your tasks will be sent to.* The instructions that your workforce will receive. This is called a worker task template.* The configuration of your worker tasks, including the number of workers that receive a task and time limits to complete tasks.* Where your output data will be stored.This demo is going to use the API, but you can optionally create this workflow definition in the console as well. For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.
###Code
# Flow definition name - this value is unique per account and region. You can also provide your own value here.
flowDefinitionName = 'fd-comprehend-demo-' + str(uuid.uuid4())
create_workflow_definition_response = sagemaker.create_flow_definition(
FlowDefinitionName= flowDefinitionName,
RoleArn= ROLE,
HumanLoopConfig= {
"WorkteamArn": WORKTEAM_ARN,
"HumanTaskUiArn": humanTaskUiArn,
"TaskCount": 1,
"TaskDescription": "Identify the sentiment of the provided text",
"TaskTitle": "Detect Sentiment of Text"
},
OutputConfig={
"S3OutputPath" : OUTPUT_PATH
}
)
flowDefinitionArn = create_workflow_definition_response['FlowDefinitionArn'] # let's save this ARN for future use
# Describe flow definition - status should be active
for x in range(60):
describeFlowDefinitionResponse = sagemaker.describe_flow_definition(FlowDefinitionName=flowDefinitionName)
print(describeFlowDefinitionResponse['FlowDefinitionStatus'])
if (describeFlowDefinitionResponse['FlowDefinitionStatus'] == 'Active'):
print("Flow Definition is active")
break
time.sleep(2)
###Output
_____no_output_____
###Markdown
Human Loops Detect Sentiment with AWS Comprehend Now that we have setup our Flow Definition, we are ready to call AWS Comprehend and start our human loops. In this tutorial, we are interested in starting a HumanLoop only if the SentimentScore returned by AWS Comprehend is less than 99%. So, with a bit of logic, we can check the response for each call to Detect Sentiment, and if the SentimentScore is less than 99%, we will kick off a HumanLoop to engage our workforce for a human review. Sample Data
###Code
sample_detect_sentiment_blurbs = ['I enjoy this product', 'I am unhappy with this product', 'It is okay', 'sometimes it works']
human_loops_started = []
SENTIMENT_SCORE_THRESHOLD = .99
for blurb in sample_detect_sentiment_blurbs:
# Call AWS Comprehend's Detect Sentiment API
response = comprehend.detect_sentiment(Text=blurb, LanguageCode='en')
sentiment = response['Sentiment']
print(f'Processing blurb: \"{blurb}\"')
# Our condition for when we want to engage a human for review
if (response['SentimentScore'][capsToCamel(sentiment)]< SENTIMENT_SCORE_THRESHOLD):
humanLoopName = str(uuid.uuid4())
inputContent = {
"initialValue": sentiment.title(),
"taskObject": blurb
}
start_loop_response = a2i.start_human_loop(
HumanLoopName=humanLoopName,
FlowDefinitionArn=flowDefinitionArn,
HumanLoopInput={
"InputContent": json.dumps(inputContent)
}
)
human_loops_started.append(humanLoopName)
print(f'SentimentScore of {response["SentimentScore"][capsToCamel(sentiment)]} is less than the threshold of {SENTIMENT_SCORE_THRESHOLD}')
print(f'Starting human loop with name: {humanLoopName} \n')
else:
print(f'SentimentScore of {response["SentimentScore"][capsToCamel(sentiment)]} is above threshold of {SENTIMENT_SCORE_THRESHOLD}')
print('No human loop created. \n')
###Output
_____no_output_____
###Markdown
Check Status of Human Loop
###Code
completed_human_loops = []
for human_loop_name in human_loops_started:
resp = a2i.describe_human_loop(HumanLoopName=human_loop_name)
print(f'HumanLoop Name: {human_loop_name}')
print(f'HumanLoop Status: {resp["HumanLoopStatus"]}')
print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}')
print('\n')
if resp["HumanLoopStatus"] == "Completed":
completed_human_loops.append(resp)
###Output
_____no_output_____
###Markdown
Wait For Workers to Complete Task
###Code
workteamName = WORKTEAM_ARN[WORKTEAM_ARN.rfind('/') + 1:]
print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!")
print('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])
###Output
_____no_output_____
###Markdown
Check Status of Human Loop Again
###Code
completed_human_loops = []
for human_loop_name in human_loops_started:
resp = a2i.describe_human_loop(HumanLoopName=human_loop_name)
print(f'HumanLoop Name: {human_loop_name}')
print(f'HumanLoop Status: {resp["HumanLoopStatus"]}')
print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}')
print('\n')
if resp["HumanLoopStatus"] == "Completed":
completed_human_loops.append(resp)
###Output
_____no_output_____
###Markdown
View Task Results Once work is completed, Amazon A2I stores results in your S3 bucket and sends a Cloudwatch event. Your results should be available in the S3 OUTPUT_PATH when all work is completed.
###Code
import re
import pprint
pp = pprint.PrettyPrinter(indent=4)
for resp in completed_human_loops:
splitted_string = re.split('s3://' + BUCKET + '/', resp['HumanLoopOutput']['OutputS3Uri'])
output_bucket_key = splitted_string[1]
response = s3.get_object(Bucket=BUCKET, Key=output_bucket_key)
content = response["Body"].read()
json_output = json.loads(content)
pp.pprint(json_output)
print('\n')
###Output
_____no_output_____ |
py_ws/WS_03_for.ipynb | ###Markdown
3 일차 For Loops의 이해5명의 팀을 구성하여 의논하여 다음 코드의 실행 결과를 예상해보자.아래 명령은 미리 정의되어 있는 명령어들의 집합을 쓸 수 있도록 한다. 거북이를 사용하여 그림을 그릴 수 있는 명령어 집합을 제공한다.
###Code
import turtle as t # 거북이 라이브러리를 t라는 이름으로 불러옴
###Output
_____no_output_____
###Markdown
`for`에서 중요한 것은 **들여쓰기**와 반복 횟수이다. 용법에 익숙해지자.`for` 문의 구성은 다음과 같다. for 반복에사용될변수 in 반복횟수: 반복할 문장
###Code
for x in range(3):
print(x)
###Output
_____no_output_____
###Markdown
위의 코드에서 반복횟수는 3으로 되어 있다. `range()`는 단어의 의미와 똑같이 반복의 범위를 지정한다. 아래의 문제를 통해 `range()`의 용법을 이해해보자. 문제 1다음의 명령들 중에서 ```range```라는 명령은 반복을 하도록 하는 명령(함수)이다. ```range()```는 인자의 수가 가변적이다. 1개 부터 3개까지 입력할 수 있다.* 인자가 하나 일 때는 반복의 횟수를 지정한다. * 인자가 두 개인 경우 시작값과 반복 종료값를 지정한다. * 인자가 셋인 경우 시작값, 반복 종료 값, 건너뜀 값을 지정할 수 있다. 아래 코드의 실행 결과를 예상해보자.
###Code
import turtle as t
for x in range(3):
t.forward(100)
t.left(120)
for x in range(5):
print(x)
import turtle as t
for x in range(1, 4):
t.forward(100)
t.left(90)
for x in range(3, 6):
print(x)
for x in range(3, 8, 2):
print(x)
###Output
_____no_output_____
###Markdown
피보나치 수열은 앞의 두 수의 합으로 나타낼 수 있다. 10번 째 피보나치 수열을 계산하는 프로그램을 만들어 보자. 반복문을 써서 중복해서 적지 않도록 만들어 보자.
###Code
# 피보나치 수열의 시작은 1, 1 이다. fib_0 = 1, fib_1 = 1, fib_2 = 2, fib_3 = 3, fib_4 = 5
# 일반 식 fib_n = fib_n-1 + fib_n-2
# your code here
print("10번째 피보나치 수열의 값은")
###Output
_____no_output_____
###Markdown
배열배열은 한 변수에 여러 값을 저장할 수 있는 객체이다. 쉽게 기억하려면 배열은 박스에 칸막이를 두고 각 칸 안에 하나의 값만 넣는 구조라고 생각하면 된다. 활용하는 방식은 `[1, 3, 5, 7]` 과 같이 배열로 선언될 값들을 대괄호 안에 쉼표로 넣으면 된다. 배열의 첫 번째 칸에는 1, 두 번째 칸에는 3, 세 번째 칸에는 5, 그리고, 마지막 칸에는 7이 들어 있다. 숫자 외에도 글자들을 넣을 수 있는데, 그 때는 글자들을 쌍 따옴표로 묶어야 한다. 예를 들면 `["hello world", "this", "is", "list]`와 같이 적으면 된다. 배열을 예와 같이 변수에 할당하지 않고 쓰는 것이 가능하기는 하지만, 활용도가 떨어진다. 그래서 일반적인 선언 방법은 `num = [1, 3, 5, 7]` 처럼 배열에 사용할 변수의 이름과 할당자 뒤에 배열을 쓰는 것이다. 배열의 각 칸에 저장된 값을 하나씩 꺼내기 위해서는 첨자 또는 인덱스를 지정해야 한다. 사물함 번호로 원하는 칸에 가서 물건을 꺼내올 수 있는 것과 같다. 첨자를 쓰는 것은 배열이 변수에 할당되어 있어야 한다. 배열의 2번째 요소를 출력하는 예는 다음과 같다.
###Code
num = [1, 3, 5, 7]
print(num[2])
###Output
_____no_output_____
###Markdown
위의 코드에서 첨자를 2를 지정했는데, 신기하게도 결과는 3번째 값인 5가 나왔다. 여기서 중요한 깨달음을 얻어야 하는데, 바로 첨자의 시작은 1이 아니라 0이라는 것이다.
###Code
for cnt in [1, 3, 5, 7]:
print(cnt) # 예상 결과는?
num = [1, 3, 5, 7]
for cnt in num:
print(cnt) # 예상 결과는?
for cnt in num:
print(num) # 예상 결과는?
###Output
_____no_output_____
###Markdown
문제 2하나의 이름에 하나의 값만 저장하는 것을 보고 변수라고 부른다. 하나의 이름을 갖고 있지만 여러 개의 다른 값을 저장하는 것도 변수이기는 한데, 특별히 배열이라는 부른다. 대괄호를 쓰고 그 안에 값들을 쉼표로 구분하면 된다. 배열은 상당히 사용빈도가 높기 때문에 익숙해지는 것이 좋다. 아래의 예를 실행해서 어떻게 동작하는지 확인해보자.
###Code
# 배열의 사용 예 1
Numbers = [100, "감나무", 300, 400, 401, 402] # 배열의 선언, 문자는 쌍따옴표로 묶음
print(Numbers[0], Numbers[1], Numbers[5]) # 배열의 값의 호출, 주의 할 것은 시작위치값이 0부터 시작
print(Numbers[0]+Numbers[2]) # 연산도 가능함
# 배열의 사용 예 2
primes = [2, 3, 5, 7] # primes라는 변수에 값이 4개가 저장되어 있음
for prime in primes: # primes는 배열이라는 타입을 갖고 있음
print(prime)
print(prime[3], prime[2], prime[1], prime[0])
###Output
_____no_output_____
###Markdown
2. ```for``` 문을 사용하여 다음과 같은 결과를 출력하는 프로그램을 작성해보자. 배열 또는 변수를 사용하여도 됨```Great, delicious ham Great, delicious eggs Great, delicious nuts ```
###Code
# your code here
###Output
_____no_output_____
###Markdown
1. 지난 시간에 작성한 좋아하는 연예인 10명의 이름과 나이 그리고 좋아하는 이유를 한 문장으로 해서 3개의 서로 다른 문장을 출력하는 프로그램을 수정하여 배열을 활용하도록 작성해보자. 배열의 이름으로 NAME (좋아하는 연예인 이름)과 AGE (나이)를 사용하자.
###Code
# 지난 시간의 코드 참고
###Output
_____no_output_____
###Markdown
문제 3다음 코드의 예상 결과를 작성해보자.
###Code
n = 100
sum = 0
for counter in range(1,n+1):
sum = sum + counter
print("sum of 1 until ", n, ": ", sum)
for i in range(0, 5):
for j in range(0, i+1):
print("* ", end="") # end="" 는 줄바꿈을 하지 않는다는 뜻
print("\r") # “\r”는 줄바꿈을 한다는 뜻
###Output
_____no_output_____
###Markdown
문제 4아래와 같이 출력하는 프로그램을 작성해보자``` * * * * * * * * * * * * * * * ```
###Code
# your code here
###Output
_____no_output_____ |
ICCT_si/examples/04/SS-18-Notranja_stabilnost_primer_1.ipynb | ###Markdown
Notranja stabilnost - primer 1 Kako upravljati s tem interaktivnim primerom?Za dan stabilen sistem poizkusi doseči divergenten odziv zgolj s spreminjanjem začetnih pogojev.$$\dot{x} = \underbrace{\begin{bmatrix}0&1\\-0.8&-0.5\end{bmatrix}}_{A}x$$Odgovori na naslednji vprašanji:- Ali je možno doseči divergenten odziv za dani sistem?- Ali je možno doseči divergenten odziv za katerikoli stabilen sistem?
###Code
%matplotlib inline
import control as control
import numpy
import sympy as sym
from IPython.display import display, Markdown
import ipywidgets as widgets
import matplotlib.pyplot as plt
#matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value !
class matrixWidget(widgets.VBox):
def updateM(self,change):
for irow in range(0,self.n):
for icol in range(0,self.m):
self.M_[irow,icol] = self.children[irow].children[icol].value
#print(self.M_[irow,icol])
self.value = self.M_
def dummychangecallback(self,change):
pass
def __init__(self,n,m):
self.n = n
self.m = m
self.M_ = numpy.matrix(numpy.zeros((self.n,self.m)))
self.value = self.M_
widgets.VBox.__init__(self,
children = [
widgets.HBox(children =
[widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)]
)
for j in range(n)
])
#fill in widgets and tell interact to call updateM each time a children changes value
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].value = self.M_[irow,icol]
self.children[irow].children[icol].observe(self.updateM, names='value')
#value = Unicode('[email protected]', help="The email value.").tag(sync=True)
self.observe(self.updateM, names='value', type= 'All')
def setM(self, newM):
#disable callbacks, change values, and reenable
self.unobserve(self.updateM, names='value', type= 'All')
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].unobserve(self.updateM, names='value')
self.M_ = newM
self.value = self.M_
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].value = self.M_[irow,icol]
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].observe(self.updateM, names='value')
self.observe(self.updateM, names='value', type= 'All')
#self.children[irow].children[icol].observe(self.updateM, names='value')
#overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?)
class sss(control.StateSpace):
def __init__(self,*args):
#call base class init constructor
control.StateSpace.__init__(self,*args)
#disable function below in base class
def _remove_useless_states(self):
pass
# Preparatory cell
A = numpy.matrix([[0.,1.],[-4.0/5.0,-5.0/10.0]])
X0 = numpy.matrix([[0.0],[0.0]])
Aw = matrixWidget(2,2)
Aw.setM(A)
X0w = matrixWidget(2,1)
X0w.setM(X0)
# Misc
#create dummy widget
DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px'))
#create button widget
START = widgets.Button(
description='Test',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Test',
icon='check'
)
def on_start_button_clicked(b):
#This is a workaround to have intreactive_output call the callback:
# force the value of the dummy widget to change
if DW.value> 0 :
DW.value = -1
else:
DW.value = 1
pass
START.on_click(on_start_button_clicked)
# Main cell
def main_callback(A, X0, DW):
sols = numpy.linalg.eig(A)
sys = sss(A,[[1],[0]],[0,1],0)
pole = control.pole(sys)
if numpy.real(pole[0]) != 0:
p1r = abs(numpy.real(pole[0]))
else:
p1r = 1
if numpy.real(pole[1]) != 0:
p2r = abs(numpy.real(pole[1]))
else:
p2r = 1
if numpy.imag(pole[0]) != 0:
p1i = abs(numpy.imag(pole[0]))
else:
p1i = 1
if numpy.imag(pole[1]) != 0:
p2i = abs(numpy.imag(pole[1]))
else:
p2i = 1
print('Lastni vrednosti matrike A sta',round(sols[0][0],4),'in',round(sols[0][1],4))
#T = numpy.linspace(0, 60, 1000)
T, yout, xout = control.initial_response(sys,X0=X0,return_x=True)
fig = plt.figure("Prosti odziv", figsize=(16,5))
ax = fig.add_subplot(121)
plt.plot(T,xout[0])
plt.grid()
ax.set_xlabel('čas [s]')
ax.set_ylabel(r'$x_1$')
ax1 = fig.add_subplot(122)
plt.plot(T,xout[1])
plt.grid()
ax1.set_xlabel('čas [s]')
ax1.set_ylabel(r'$x_2$')
alltogether = widgets.HBox([widgets.VBox([widgets.Label('$A$:',border=3),
Aw]),
widgets.Label(' ',border=3),
widgets.VBox([widgets.Label('$X_0$:',border=3),
X0w]),
START])
out = widgets.interactive_output(main_callback, {'A':Aw, 'X0':X0w, 'DW':DW})
out.layout.height = '350px'
display(out, alltogether)
#create dummy widget 2
DW2 = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px'))
DW2.value = -1
#create button widget
START2 = widgets.Button(
description='Prikaži pravilna odgovora',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Pritisni za prikaz pravilnih odgovorov',
icon='check',
layout=widgets.Layout(width='200px', height='auto')
)
def on_start_button_clicked2(b):
#This is a workaround to have intreactive_output call the callback:
# force the value of the dummy widget to change
if DW2.value> 0 :
DW2.value = -1
else:
DW2.value = 1
pass
START2.on_click(on_start_button_clicked2)
def main_callback2(DW2):
if DW2 > 0:
display(Markdown(r'''>Odgovor: Prosti odziv sistema zavisi zgolj od lastnih vrednosti matrike $A$ in je linearna kombinacija njihovih modalnih oblik. Ker je sistem stabilen, ima zgolj konvergentne modalne oblike - odziv sistema tako ne more biti divergenten, ne glede na izbrane vrednosti začetnih pogojev.'''))
else:
display(Markdown(''))
#create a graphic structure to hold all widgets
alltogether2 = widgets.VBox([START2])
out2 = widgets.interactive_output(main_callback2,{'DW2':DW2})
#out.layout.height = '300px'
display(out2,alltogether2)
###Output
_____no_output_____ |
notebooks/wavelet_tests.ipynb | ###Markdown
Do a positive image lead to positive coefficients ?
###Code
num_pix = 100
image = np.ones((num_pix, num_pix)); image[::10, :] = 10; image[:, ::10] = 10
#image = np.random.rand(num_pix, num_pix)
plt.imshow(image)
plt.colorbar()
plt.show()
starlet = StarletTransform()
coeffs = starlet.transform(image)
print(type(coeffs), len(coeffs), coeffs[0].shape)
fig, axes = plt.subplots(1, starlet.nb_scale, figsize=(20, 3))
for l in range(starlet.nb_scale):
ax = axes[l]
if l < starlet.nb_scale-1:
ax.set_title("wavelet scale {}".format(l+1))
else:
ax.set_title("wavelet coarsest scale")
im = ax.imshow(coeffs[l])
nice_colorbar(im)
plt.show()
###Output
_____no_output_____
###Markdown
Conclusion : the 1st gen starlet transform is **not positive**, hence one should _not_ apply the non-negativity constraint on wavelet coefficients !!
###Code
image_back = starlet.inverse(coeffs)
print(type(image_back))
plt.imshow(image-image_back, cmap='bwr_r')
plt.colorbar()
plt.show()
###Output
<class 'numpy.ndarray'>
|
phase01/5.1.All-Pipeline.ipynb | ###Markdown
[모듈 5.1] HPO 사용 모델 빌딩 파이프라인 개발 (SageMaker Model Building Pipeline 모든 스텝)이 노트북은 아래와 같은 목차로 진행 됩니다. 전체를 모두 실행시에 완료 시간은 **약 30분** 소요 됩니다.- 0. SageMaker Model Building Pipeline 개요- 1. 파이프라인 변수 및 환경 설정- 2. 파이프라인 스텝 단계 정의 - (1) 전처리 스텝 단계 정의 - (2) 모델 학습을 위한 학습단계 정의 - (3) 모델 평가 단계 - (4) 모델 등록 스텝 - (5) 세이지 메이커 모델 생성 스텝 생성 - (6) HPO 단계 - (7) 조건 단계- 3. 모델 빌딩 파이프라인 정의 및 실행- 4. Pipleline 캐싱 및 파라미터 이용한 실행- 5. 정리 작업 --- 0.SageMaker Model Building Pipeline 개요- 필요시에 이전 노트북을 참조하세요: scratch/8.5.All-Pipeline.ipynb 1. 파이프라인 변수 및 환경 설정
###Code
import boto3
import sagemaker
import pandas as pd
region = boto3.Session().region_name
sagemaker_session = sagemaker.session.Session()
role = sagemaker.get_execution_role()
sm_client = boto3.client('sagemaker', region_name=region)
%store -r
###Output
_____no_output_____
###Markdown
파이프라인 변수 설정
###Code
from sagemaker.workflow.parameters import (
ParameterInteger,
ParameterString,
ParameterFloat,
)
processing_instance_count = ParameterInteger(
name="ProcessingInstanceCount",
default_value=1
)
processing_instance_type = ParameterString(
name="ProcessingInstanceType",
default_value="ml.m5.xlarge"
)
training_instance_type = ParameterString(
name="TrainingInstanceType",
default_value="ml.m5.xlarge"
)
training_instance_count = ParameterInteger(
name="TrainingInstanceCount",
default_value=1
)
model_eval_threshold = ParameterFloat(
name="model2eval2threshold",
default_value=0.85
)
input_data = ParameterString(
name="InputData",
default_value=input_data_uri,
)
model_approval_status = ParameterString(
name="ModelApprovalStatus", default_value="PendingManualApproval"
)
###Output
_____no_output_____
###Markdown
캐싱 정의- 참고: 캐싱 파이프라인 단계: [Caching Pipeline Steps](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/pipelines-caching.html)
###Code
from sagemaker.workflow.steps import CacheConfig
cache_config = CacheConfig(enable_caching=True,
expire_after="7d")
###Output
_____no_output_____
###Markdown
2. 파이프라인 스텝 단계 정의 (1) 전처리 스텝 단계 정의- input_data_uri 입력 데이타를 대상으로 전처리를 수행 합니다.
###Code
from sagemaker.sklearn.processing import SKLearnProcessor
split_rate = 0.2
framework_version = "0.23-1"
sklearn_processor = SKLearnProcessor(
framework_version=framework_version,
instance_type=processing_instance_type,
instance_count=processing_instance_count,
base_job_name="sklearn-fraud-process",
role=role,
)
print("input_data: \n", input_data)
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.workflow.steps import ProcessingStep
step_process = ProcessingStep(
name="FraudScratchProcess",
processor=sklearn_processor,
inputs=[
# ProcessingInput(source=input_data_uri,destination='/opt/ml/processing/input'),
ProcessingInput(source=input_data, destination='/opt/ml/processing/input'),
],
outputs=[ProcessingOutput(output_name="train",
source='/opt/ml/processing/output/train'),
ProcessingOutput(output_name="test",
source='/opt/ml/processing/output/test')],
job_arguments=["--split_rate", f"{split_rate}"],
code= 'src/preprocessing.py',
cache_config = cache_config, # 캐시 정의
)
###Output
_____no_output_____
###Markdown
(2)모델 학습을 위한 학습단계 정의 기본 훈련 변수 및 하이퍼파라미터 설정
###Code
from sagemaker.xgboost.estimator import XGBoost
bucket = sagemaker_session.default_bucket()
prefix = 'fraud2train'
estimator_output_path = f's3://{bucket}/{prefix}/training_jobs'
base_hyperparameters = {
"scale_pos_weight" : "29",
"max_depth": "6",
"alpha" : "0",
"eta": "0.3",
"min_child_weight": "1",
"objective": "binary:logistic",
"num_round": "100",
}
xgb_train = XGBoost(
entry_point = "xgboost_script.py",
source_dir = "src",
output_path = estimator_output_path,
code_location = estimator_output_path,
hyperparameters = base_hyperparameters,
role = role,
instance_count = training_instance_count,
instance_type = training_instance_type,
framework_version = "1.0-1")
###Output
_____no_output_____
###Markdown
훈련의 입력이 이전 전처리의 결과가 제공됩니다.- `step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri`
###Code
from sagemaker.inputs import TrainingInput
from sagemaker.workflow.steps import TrainingStep
step_train = TrainingStep(
name="FraudScratchTrain",
estimator=xgb_train,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri,
# s3_data= train_preproc_dir_artifact,
content_type="text/csv"
),
},
cache_config = cache_config, # 캐시 정의
)
###Output
_____no_output_____
###Markdown
(3) 모델 평가 단계 ScriptProcessor 의 기본 도커 컨테이너 지정ScriptProcessor 의 기본 도커 컨테이너로 Scikit-learn를 기본 이미지를 사용함. - 사용자가 정의한 도커 컨테이너도 사용할 수 있습니다.
###Code
from sagemaker.processing import ScriptProcessor
script_eval = SKLearnProcessor(
framework_version= "0.23-1",
role=role,
instance_type=processing_instance_type,
instance_count=1,
base_job_name="script-fraud-scratch-eval",
)
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.steps import ProcessingStep
from sagemaker.workflow.properties import PropertyFile
evaluation_report = PropertyFile(
name="EvaluationReport",
output_name="evaluation",
path="evaluation.json"
)
step_eval = ProcessingStep(
name="FraudEval",
processor=script_eval,
inputs=[
ProcessingInput(
source= step_train.properties.ModelArtifacts.S3ModelArtifacts,
destination="/opt/ml/processing/model"
),
ProcessingInput(
source=step_process.properties.ProcessingOutputConfig.Outputs[
"test"
].S3Output.S3Uri,
destination="/opt/ml/processing/test"
)
],
outputs=[
ProcessingOutput(output_name="evaluation", source="/opt/ml/processing/evaluation"),
],
code="src/evaluation.py",
cache_config = cache_config, # 캐시 정의
property_files=[evaluation_report], # 현재 이 라인을 넣으면 에러 발생
)
###Output
_____no_output_____
###Markdown
(4) 모델 등록 스텝 모델 그룹 생성- 참고 - 모델 그룹 릭스팅 API: [ListModelPackageGroups](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ListModelPackageGroups.html) - 모델 지표 등록: [Model Quality Metrics](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/model-monitor-model-quality-metrics.html)
###Code
model_package_group_name = f"{project_prefix}"
model_package_group_input_dict = {
"ModelPackageGroupName" : model_package_group_name,
"ModelPackageGroupDescription" : "Sample model package group"
}
response = sm_client.list_model_package_groups(NameContains=model_package_group_name)
if len(response['ModelPackageGroupSummaryList']) == 0:
print("No model group exists")
print("Create model group")
create_model_pacakge_group_response = sm_client.create_model_package_group(**model_package_group_input_dict)
print('ModelPackageGroup Arn : {}'.format(create_model_pacakge_group_response['ModelPackageGroupArn']))
else:
print(f"{model_package_group_name} exitss")
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.model_metrics import MetricsSource, ModelMetrics
model_metrics = ModelMetrics(
model_statistics=MetricsSource(
s3_uri="{}/evaluation.json".format(
step_eval.arguments["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
),
content_type="application/json"
)
)
step_register = RegisterModel(
name= "FraudScratcRegisterhModel",
estimator=xgb_train,
image_uri= step_train.properties.AlgorithmSpecification.TrainingImage,
model_data= step_train.properties.ModelArtifacts.S3ModelArtifacts,
content_types=["text/csv"],
response_types=["text/csv"],
inference_instances=["ml.t2.medium", "ml.m5.xlarge"],
transform_instances=["ml.m5.xlarge"],
model_package_group_name=model_package_group_name,
approval_status=model_approval_status,
model_metrics=model_metrics,
)
###Output
_____no_output_____
###Markdown
(5) 세이지 메이커 모델 스텝 생성- 아래 두 파리미터의 입력이 이전 스텝의 결과가 제공됩니다. - image_uri= step_train.properties.AlgorithmSpecification.TrainingImage, - model_data= step_train.properties.ModelArtifacts.S3ModelArtifacts,
###Code
from sagemaker.model import Model
model = Model(
image_uri= step_train.properties.AlgorithmSpecification.TrainingImage,
model_data= step_train.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sagemaker_session,
role=role,
)
from sagemaker.inputs import CreateModelInput
from sagemaker.workflow.steps import CreateModelStep
inputs = CreateModelInput(
instance_type="ml.m5.large",
# accelerator_type="ml.eia1.medium",
)
step_create_model = CreateModelStep(
name="FraudScratchModel",
model=model,
inputs=inputs,
)
###Output
_____no_output_____
###Markdown
(6) HPO 스텝
###Code
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
hyperparameter_ranges = {
"eta": ContinuousParameter(0, 1),
"min_child_weight": ContinuousParameter(1, 10),
"alpha": ContinuousParameter(0, 2),
"max_depth": IntegerParameter(1, 10),
}
objective_metric_name = "validation:auc"
tuner = HyperparameterTuner(
xgb_train, objective_metric_name, hyperparameter_ranges,
max_jobs=5,
max_parallel_jobs=5,
)
from sagemaker.workflow.steps import TuningStep
step_tuning = TuningStep(
name = "HPTuning",
tuner = tuner,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri,
# s3_data= train_preproc_dir_artifact,
content_type="text/csv"
),
},
cache_config = cache_config, # 캐시 정의
)
###Output
_____no_output_____
###Markdown
(7) 조건 스텝
###Code
from sagemaker.workflow.conditions import ConditionLessThanOrEqualTo
from sagemaker.workflow.condition_step import (
ConditionStep,
JsonGet,
)
cond_lte = ConditionLessThanOrEqualTo(
left=JsonGet(
step=step_eval,
property_file=evaluation_report,
json_path="binary_classification_metrics.auc.value",
),
# right=8.0
right = model_eval_threshold
)
step_cond = ConditionStep(
name="FruadScratchCond",
conditions=[cond_lte],
if_steps=[step_tuning],
else_steps=[step_register, step_create_model],
)
###Output
The class JsonGet has been renamed in sagemaker>=2.
See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.
###Markdown
3.모델 빌딩 파이프라인 정의 및 실행위에서 정의한 아래의 4개의 스텝으로 파이프라인 정의를 합니다.- steps=[step_process, step_train, step_create_model, step_deploy],- 아래는 약 20분 정도 소요 됩니다.
###Code
from sagemaker.workflow.pipeline import Pipeline
project_prefix = 'sagemaker-pipeline-phase2-step-by-step'
pipeline_name = project_prefix
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
training_instance_type,
training_instance_count,
input_data,
model_eval_threshold,
model_approval_status,
],
# steps=[step_process, step_train, step_register, step_eval, step_cond],
steps=[step_process, step_train, step_eval, step_cond],
)
import json
definition = json.loads(pipeline.definition())
# definition
###Output
No finished training job found associated with this estimator. Please make sure this estimator is only used for building workflow config
###Markdown
파이프라인을 SageMaker에 제출하고 실행하기
###Code
pipeline.upsert(role_arn=role)
###Output
No finished training job found associated with this estimator. Please make sure this estimator is only used for building workflow config
No finished training job found associated with this estimator. Please make sure this estimator is only used for building workflow config
###Markdown
디폴트값을 이용하여 파이프라인을 샐행합니다.
###Code
execution = pipeline.start()
###Output
_____no_output_____
###Markdown
파이프라인 운영: 파이프라인 대기 및 실행상태 확인워크플로우의 실행상황을 살펴봅니다.
###Code
execution.describe()
execution.wait()
###Output
_____no_output_____
###Markdown
실행이 완료될 때까지 기다립니다. 실행된 단계들을 리스트업합니다. 파이프라인의 단계실행 서비스에 의해 시작되거나 완료된 단계를 보여줍니다.
###Code
execution.list_steps()
###Output
_____no_output_____
###Markdown
4. Pipeline 캐싱 및 파라미터 이용한 실행- 캐싱은 2021년 7월 현재 Training, Processing, Transform 의 Step에 적용이 되어 있습니다.- 상세 사항은 여기를 확인하세요. --> [캐싱 파이프라인 단계](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/pipelines-caching.html)
###Code
is_cache = True
%%time
from IPython.display import display as dp
import time
if is_cache:
execution = pipeline.start(
parameters=dict(
model2eval2threshold=0.8,
)
)
# execution = pipeline.start()
time.sleep(10)
dp(execution.list_steps())
execution.wait()
if is_cache:
dp(execution.list_steps())
###Output
_____no_output_____ |
astr-119.15.ipynb | ###Markdown
Create a simple solar system model
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from collections import namedtuple
###Output
_____no_output_____
###Markdown
Define a planet class
###Code
class planet():
"A planet in our solar system"
def _init_(self,semimajor,eccetricity):
self.x = np.zeros(2)
self.v = np.zeros(2)
self.a_g = np.zeros(2)
self.t = 0.0
self.dt = 0.0
self.a = semimajor
self.e = eccentricity
self.istep = 0
self.name = ""
###Output
_____no_output_____
###Markdown
Define a dictionary with some constants
###Code
solar_system = { "M_sun":1.0, "G":39.4784176043574320}
###Output
_____no_output_____
###Markdown
Define some functions for setting circular velocity, and acceleration
###Code
def SolarCircularVelocity(p):
G = solar_system["G"]
M = solar_system["M_sun"]
r = ( p.x[0]**2 + p.x[1]**2 )**0.5
return (G*M/r)*0.5
def SolarGravitationalAcceleration(p):
G = solar_system["G"]
M = solar_system["M_sun"]
r = ( p.x[0]**2 + p.x[1]**2 )**0.5
a_grav = -1.0*G*M/r**2
if(p.x[0]==0.0):
if(p.x[1]>0.0):
theta = 0.5*np.pi
else:
theta = 1.5*np.pi
else:
theta = np.arctan2(p.x[1],p.x[0])
return a_grav*np.cos(theta), a_grav*np.sin(theta)
###Output
_____no_output_____
###Markdown
Compute the timestep
###Code
def calc_dt(p):
ETA_TIME_STEP = 0.0004
eta = ETA_TIMME_STEP
v = (p.v[0]**2 + p.v[1]**2)**0.5
a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5
dt = eta * np.fmin(1./np.fabs(v),1./np.fabs(a)**0.5)
return dt
###Output
_____no_output_____ |
Chapter 21 - Saving and Loading Trained Models.ipynb | ###Markdown
Chapter 21 Saving and Loading Trained Models 21.0 IntroductionIn the last 20 chapters around 200 recipies, we have convered how to take raw data nad usem achine learning to create well-performing predictive models. However, for all our work to be worthwhile we eventually need to do something with our model, such as integrating it with an existing software application. To accomplish this goal, we need to be able to bot hsave our models after training and load them when they are needed by an application. This is the focus of the final chapter 21.1 Saving and Loading a scikit-learn Model ProblemYou have trained a scikit-learn model and want to save it and load it elsewhere. SolutionSave the model as a pickle file:
###Code
# load libraries
from sklearn.ensemble import RandomForestClassifier
from sklearn import datasets
from sklearn.externals import joblib
# load data
iris = datasets.load_iris()
features = iris.data
target = iris.target
# create decision tree classifier object
classifier = RandomForestClassifier()
# train model
model = classifier.fit(features, target)
# save model as pickle file
joblib.dump(model, "model.pkl")
###Output
/Users/f00/anaconda/envs/machine_learning_cookbook/lib/python3.6/site-packages/sklearn/ensemble/weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.
from numpy.core.umath_tests import inner1d
###Markdown
Once the model is saved we can use scikit-learn in our destination application (e.g., web application) to load the model:
###Code
# load model from file
classifier = joblib.load("model.pkl")
###Output
_____no_output_____
###Markdown
And use it to make predictions
###Code
# create new observation
new_observation = [[ 5.2, 3.2, 1.1, 0.1]]
# predict obserrvation's class
classifier.predict(new_observation)
###Output
_____no_output_____
###Markdown
DiscussionThe first step in using a model in production is to save that model as a file that can be loaded by another application or workflow. We can accomplish this by saving the model as a pickle file, a Python-specific data format. Specifically, to save the model we use `joblib`, which is a library extending pickle for cases when we have large NumPy arrays--a common occurance for trained models in scikit-learn.When saving scikit-learn models, be aware that saved models might not be compatible between versions of scikit-learn; therefore, it can be helpful to include the version of scikit-learn used in the model in the filename:
###Code
# import library
import sklearn
# get scikit-learn version
scikit_version = joblib.__version__
# save model as pickle file
joblib.dump(model, "model_(version).pkl".format(version=scikit_version))
###Output
_____no_output_____
###Markdown
21.2 Saving and Loading a Keras Model ProblemYou have a trained Keras model and want to save it and load it elsewhere. SolutionSave the model as HDF5:
###Code
# load libraries
import numpy as np
from keras.datasets import imdb
from keras.preprocessing.text import Tokenizer
from keras import models
from keras import layers
from keras.models import load_model
# set random seed
np.random.seed(0)
# set the number of features we want
number_of_features = 1000
# load data and target vector from movie review data
(train_Data, train_target), (test_data, test_target) = imdb.load_data(num_words=number_of_features)
# convert movie review data to a one-hot encoded feature matrix
tokenizer = Tokenizer(num_words=number_of_features)
train_features = tokenizer.sequences_to_matrix(train_data, mode="binary")
test_features = tokenizer.sequences_to_matrix(test_data, mode="binary")
# start neural network
network = models.Sequential()
# add fully connected layer with ReLU activation function
network.add(layers.Dense(units=16, activation="relu", input_shape=(number_of_features,)))
# add fully connected layer with a sigmoid activation function
network.add(layers.Dense(units=1, activation="sigmoid"))
# compile neural network
network.compile(loss="binary_crossentropy", optimizer="rmsprop", metrics=["accuracy"])
# train neural network
history = network.fit(train_features, train_target, epochs=3, verbose=0, batch_size=100, validation_data=(test_features, test_target))
# save neural network
network.save("model.h5")
###Output
Using Theano backend.
###Markdown
We can then load the model either in another application or for additional training
###Code
# load neural network
network = load_model("model.h5")
###Output
_____no_output_____ |
tutorials/turbo_1.ipynb | ###Markdown
BO with TuRBO-1 and TS/qEIIn this tutorial, we show how to implement Trust Region Bayesian Optimization (TuRBO) [1] in a closed loop in BoTorch.This implementation uses one trust region (TuRBO-1) and supports either parallel expected improvement (qEI) or Thompson sampling (TS). We optimize the $10D$ Ackley function on the domain $[-10, 15]^{10}$ and show that TuRBO-1 outperforms qEI as well as Sobol.Since botorch assumes a maximization problem, we will attempt to maximize $-f(x)$ to achieve $\max_x -f(x)=0$.[1]: [Eriksson, David, et al. Scalable global optimization via local Bayesian optimization. Advances in Neural Information Processing Systems. 2019](https://proceedings.neurips.cc/paper/2019/file/6c990b7aca7bc7058f5e98ea909e924b-Paper.pdf)
###Code
import math
from dataclasses import dataclass
import torch
from botorch.acquisition import qExpectedImprovement
from botorch.fit import fit_gpytorch_model
from botorch.generation import MaxPosteriorSampling
from botorch.models import FixedNoiseGP, SingleTaskGP
from botorch.optim import optimize_acqf
from botorch.test_functions import Ackley
from botorch.utils.transforms import unnormalize
from torch.quasirandom import SobolEngine
import gpytorch
from gpytorch.constraints import Interval
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.priors import HorseshoePrior
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.double
###Output
_____no_output_____
###Markdown
Optimize the 10-dimensional Ackley functionThe goal is to minimize the popular Ackley function:$f(x_1,\ldots,x_d) = -20\exp\left(-0.2 \sqrt{\frac{1}{d} \sum_{j=1}^d x_j^2} \right) -\exp \left( \frac{1}{d} \sum_{j=1}^d \cos(2 \pi x_j) \right) + 20 + e$over the domain $[-10, 15]^{10}$. The global optimal value of $0$ is attained at $x_1 = \ldots = x_d = 0$.As mentioned above, since botorch assumes a maximization problem, we instead maximize $-f(x)$.
###Code
fun = Ackley(dim=10, negate=True).to(dtype=dtype, device=device)
fun.bounds[0, :].fill_(-10)
fun.bounds[1, :].fill_(15)
dim = fun.dim
lb, ub = fun.bounds
def eval_objective(x):
"""This is a helper function we use to unnormalize and evalaute a point"""
return fun(unnormalize(x, fun.bounds))
###Output
_____no_output_____
###Markdown
Maintain the TuRBO stateTuRBO needs to maintain a state, which includes the length of the trust region, success and failure counters, success and failure tolerance, etc. In this tutorial we store the state in a dataclass and update the state of TuRBO after each batch evaluation. **Note**: These settings assume that the domain has been scaled to $[0, 1]^d$ and that the same batch size is used for each iteration.
###Code
@dataclass
class TurboState:
dim: int
batch_size: int
length: float = 0.8
length_min: float = 0.5 ** 7
length_max: float = 1.6
failure_counter: int = 0
failure_tolerance: int = float("nan") # Note: Post-initialized
success_counter: int = 0
success_tolerance: int = 10 # Note: The original paper uses 3
best_value: float = -float("inf")
restart_triggered: bool = False
def __post_init__(self):
self.failure_tolerance = math.ceil(
max([4.0 / self.batch_size, float(self.dim) / self.batch_size])
)
def update_state(state, Y_next):
if max(Y_next) > state.best_value + 1e-3 * math.fabs(state.best_value):
state.success_counter += 1
state.failure_counter = 0
else:
state.success_counter = 0
state.failure_counter += 1
if state.success_counter == state.success_tolerance: # Expand trust region
state.length = min(2.0 * state.length, state.length_max)
state.success_counter = 0
elif state.failure_counter == state.failure_tolerance: # Shrink trust region
state.length /= 2.0
state.failure_counter = 0
state.best_value = max(state.best_value, max(Y_next).item())
if state.length < state.length_min:
state.restart_triggered = True
return state
###Output
_____no_output_____
###Markdown
Take a look at the state
###Code
state = TurboState(dim=dim, batch_size=4)
print(state)
###Output
TurboState(dim=10, batch_size=4, length=0.8, length_min=0.0078125, length_max=1.6, failure_counter=0, failure_tolerance=3, success_counter=0, success_tolerance=10, best_value=-inf, restart_triggered=False)
###Markdown
Generate initial pointsThis generates an initial set of Sobol points that we use to start of the BO loop.
###Code
def get_initial_points(dim, n_pts):
sobol = SobolEngine(dimension=dim, scramble=True)
X_init = sobol.draw(n=n_pts).to(dtype=dtype, device=device)
return X_init
###Output
_____no_output_____
###Markdown
Generate new batchGiven the current `state` and a probabilistic (GP) `model` built from observations `X` and `Y`, we generate a new batch of points. This method works on the domain $[0, 1]^d$, so make sure to not pass in observations from the true domain. `unnormalize` is called before the true function is evaluated which will first map the points back to the original domain.We support either TS and qEI which can be specified via the `acqf` argument.
###Code
def generate_batch(
state,
model, # GP model
X, # Evaluated points on the domain [0, 1]^d
Y, # Function values
batch_size,
n_candidates=None, # Number of candidates for Thompson sampling
num_restarts=10,
raw_samples=512,
acqf="ts", # "ei" or "ts"
):
assert acqf in ("ts", "ei")
assert X.min() >= 0.0 and X.max() <= 1.0 and torch.all(torch.isfinite(Y))
if n_candidates is None:
n_candidates = min(5000, max(2000, 200 * X.shape[-1]))
# Scale the TR to be proportional to the lengthscales
x_center = X[Y.argmax(), :].clone()
weights = model.covar_module.base_kernel.lengthscale.squeeze().detach()
weights = weights / weights.mean()
weights = weights / torch.prod(weights.pow(1.0 / len(weights)))
tr_lb = torch.clamp(x_center - weights * state.length / 2.0, 0.0, 1.0)
tr_ub = torch.clamp(x_center + weights * state.length / 2.0, 0.0, 1.0)
if acqf == "ts":
dim = X.shape[-1]
sobol = SobolEngine(dim, scramble=True)
pert = sobol.draw(n_candidates).to(dtype=dtype, device=device)
pert = tr_lb + (tr_ub - tr_lb) * pert
# Create a perturbation mask
prob_perturb = min(20.0 / dim, 1.0)
mask = (
torch.rand(n_candidates, dim, dtype=dtype, device=device)
<= prob_perturb
)
ind = torch.where(mask.sum(dim=1) == 0)[0]
mask[ind, torch.randint(0, dim - 1, size=(len(ind),), device=device)] = 1
# Create candidate points from the perturbations and the mask
X_cand = x_center.expand(n_candidates, dim).clone()
X_cand[mask] = pert[mask]
# Sample on the candidate points
thompson_sampling = MaxPosteriorSampling(model=model, replacement=False)
X_next = thompson_sampling(X_cand, num_samples=batch_size)
elif acqf == "ei":
ei = qExpectedImprovement(model, train_Y.max(), maximize=True)
X_next, acq_value = optimize_acqf(
ei,
bounds=torch.stack([tr_lb, tr_ub]),
q=batch_size,
num_restarts=num_restarts,
raw_samples=raw_samples,
)
return X_next
###Output
_____no_output_____
###Markdown
Optimization loopThis simple loop runs one instance of TuRBO-1 with Thompson sampling until convergence.TuRBO-1 is a local optimizer that can be used for a fixed evaluation budget in a multi-start fashion. Once TuRBO converges, `state["restart_triggered"]` will be set to true and the run should be aborted. If you want to run more evaluations with TuRBO, you simply generate a new set of initial points and then keep generating batches until convergence or when the evaluation budget has been exceeded. It's important to note that evaluations from previous instances are discarded when TuRBO restarts.NOTE: We use a `SingleTaskGP` with a noise constraint to keep the noise from getting too large as the problem is noise-free.
###Code
batch_size = 4
n_init = 20 # 2*dim, which corresponds to 5 batches of 4
X_turbo = get_initial_points(dim, n_init)
Y_turbo = torch.tensor(
[eval_objective(x) for x in X_turbo], dtype=dtype, device=device
).unsqueeze(-1)
state = TurboState(dim, batch_size=batch_size)
while not state.restart_triggered: # Run until TuRBO converges
# Fit a GP model
train_Y = (Y_turbo - Y_turbo.mean()) / Y_turbo.std()
likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-3))
model = SingleTaskGP(X_turbo, train_Y, likelihood=likelihood)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
# Create a batch
X_next = generate_batch(
state=state,
model=model,
X=X_turbo,
Y=train_Y,
batch_size=batch_size,
n_candidates=min(5000, max(2000, 200 * dim)),
num_restarts=10,
raw_samples=512,
acqf="ts",
)
Y_next = torch.tensor(
[eval_objective(x) for x in X_next], dtype=dtype, device=device
).unsqueeze(-1)
# Update state
state = update_state(state=state, Y_next=Y_next)
# Append data
X_turbo = torch.cat((X_turbo, X_next), dim=0)
Y_turbo = torch.cat((Y_turbo, Y_next), dim=0)
# Print current status
print(
f"{len(X_turbo)}) Best value: {state.best_value:.2e}, TR length: {state.length:.2e}"
)
###Output
24) Best value: -1.62e+01, TR length: 8.00e-01
28) Best value: -1.36e+01, TR length: 8.00e-01
32) Best value: -1.34e+01, TR length: 8.00e-01
36) Best value: -1.34e+01, TR length: 8.00e-01
40) Best value: -1.30e+01, TR length: 8.00e-01
44) Best value: -1.28e+01, TR length: 8.00e-01
48) Best value: -1.18e+01, TR length: 8.00e-01
52) Best value: -9.22e+00, TR length: 8.00e-01
56) Best value: -9.22e+00, TR length: 8.00e-01
60) Best value: -9.21e+00, TR length: 8.00e-01
64) Best value: -9.21e+00, TR length: 8.00e-01
68) Best value: -9.21e+00, TR length: 8.00e-01
72) Best value: -9.21e+00, TR length: 4.00e-01
76) Best value: -7.59e+00, TR length: 4.00e-01
80) Best value: -6.80e+00, TR length: 4.00e-01
84) Best value: -5.27e+00, TR length: 4.00e-01
88) Best value: -5.27e+00, TR length: 4.00e-01
92) Best value: -5.27e+00, TR length: 4.00e-01
96) Best value: -5.27e+00, TR length: 2.00e-01
100) Best value: -4.00e+00, TR length: 2.00e-01
104) Best value: -4.00e+00, TR length: 2.00e-01
108) Best value: -3.90e+00, TR length: 2.00e-01
112) Best value: -3.90e+00, TR length: 2.00e-01
116) Best value: -3.90e+00, TR length: 2.00e-01
120) Best value: -3.90e+00, TR length: 1.00e-01
124) Best value: -2.88e+00, TR length: 1.00e-01
128) Best value: -2.24e+00, TR length: 1.00e-01
132) Best value: -2.24e+00, TR length: 1.00e-01
136) Best value: -2.24e+00, TR length: 1.00e-01
140) Best value: -2.24e+00, TR length: 5.00e-02
144) Best value: -1.91e+00, TR length: 5.00e-02
148) Best value: -1.91e+00, TR length: 5.00e-02
152) Best value: -1.91e+00, TR length: 5.00e-02
156) Best value: -1.54e+00, TR length: 5.00e-02
160) Best value: -1.54e+00, TR length: 5.00e-02
164) Best value: -1.38e+00, TR length: 5.00e-02
168) Best value: -1.38e+00, TR length: 5.00e-02
172) Best value: -1.38e+00, TR length: 5.00e-02
176) Best value: -1.38e+00, TR length: 2.50e-02
180) Best value: -9.05e-01, TR length: 2.50e-02
184) Best value: -9.05e-01, TR length: 2.50e-02
188) Best value: -9.05e-01, TR length: 2.50e-02
192) Best value: -8.34e-01, TR length: 2.50e-02
196) Best value: -8.34e-01, TR length: 2.50e-02
200) Best value: -8.34e-01, TR length: 2.50e-02
204) Best value: -8.34e-01, TR length: 1.25e-02
208) Best value: -7.10e-01, TR length: 1.25e-02
212) Best value: -7.10e-01, TR length: 1.25e-02
216) Best value: -6.75e-01, TR length: 1.25e-02
220) Best value: -6.75e-01, TR length: 1.25e-02
224) Best value: -6.75e-01, TR length: 1.25e-02
228) Best value: -3.55e-01, TR length: 1.25e-02
232) Best value: -3.55e-01, TR length: 1.25e-02
236) Best value: -2.47e-01, TR length: 1.25e-02
240) Best value: -2.47e-01, TR length: 1.25e-02
244) Best value: -1.96e-01, TR length: 1.25e-02
248) Best value: -1.96e-01, TR length: 1.25e-02
252) Best value: -1.96e-01, TR length: 1.25e-02
256) Best value: -1.96e-01, TR length: 6.25e-03
###Markdown
EIAs a baseline, we compare TuRBO to qEI
###Code
X_ei = get_initial_points(dim, n_init)
Y_ei = torch.tensor(
[eval_objective(x) for x in X_ei], dtype=dtype, device=device
).unsqueeze(-1)
while len(Y_ei) < len(Y_turbo):
train_Y = (Y_ei - Y_ei.mean()) / Y_ei.std()
likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-3))
model = SingleTaskGP(X_ei, train_Y, likelihood=likelihood)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
# Create a batch
ei = qExpectedImprovement(model, train_Y.max(), maximize=True)
candidate, acq_value = optimize_acqf(
ei,
bounds=torch.stack(
[
torch.zeros(dim, dtype=dtype, device=device),
torch.ones(dim, dtype=dtype, device=device),
]
),
q=batch_size,
num_restarts=10,
raw_samples=512,
)
Y_next = torch.tensor(
[eval_objective(x) for x in candidate], dtype=dtype, device=device
).unsqueeze(-1)
# Append data
X_ei = torch.cat((X_ei, candidate), axis=0)
Y_ei = torch.cat((Y_ei, Y_next), axis=0)
# Print current status
print(f"{len(X_ei)}) Best value: {Y_ei.max().item():.2e}")
###Output
24) Best value: -1.07e+01
28) Best value: -1.04e+01
32) Best value: -9.46e+00
36) Best value: -8.97e+00
40) Best value: -8.97e+00
44) Best value: -8.97e+00
48) Best value: -8.97e+00
52) Best value: -8.97e+00
56) Best value: -8.22e+00
60) Best value: -8.22e+00
64) Best value: -8.22e+00
68) Best value: -7.37e+00
72) Best value: -7.37e+00
76) Best value: -7.37e+00
80) Best value: -7.26e+00
84) Best value: -7.26e+00
88) Best value: -7.26e+00
92) Best value: -7.26e+00
96) Best value: -7.26e+00
100) Best value: -7.26e+00
104) Best value: -7.26e+00
108) Best value: -7.26e+00
112) Best value: -7.26e+00
116) Best value: -7.26e+00
120) Best value: -7.26e+00
124) Best value: -7.26e+00
128) Best value: -7.26e+00
132) Best value: -7.26e+00
136) Best value: -7.26e+00
140) Best value: -7.26e+00
144) Best value: -7.26e+00
148) Best value: -7.26e+00
152) Best value: -7.26e+00
156) Best value: -7.26e+00
160) Best value: -7.26e+00
164) Best value: -7.26e+00
168) Best value: -7.26e+00
172) Best value: -7.26e+00
176) Best value: -7.26e+00
180) Best value: -7.26e+00
184) Best value: -7.26e+00
188) Best value: -7.26e+00
192) Best value: -7.26e+00
196) Best value: -7.26e+00
200) Best value: -7.26e+00
204) Best value: -7.26e+00
208) Best value: -7.26e+00
212) Best value: -7.26e+00
216) Best value: -7.26e+00
220) Best value: -7.26e+00
224) Best value: -7.26e+00
228) Best value: -7.26e+00
232) Best value: -7.26e+00
236) Best value: -7.26e+00
240) Best value: -7.26e+00
244) Best value: -7.26e+00
248) Best value: -7.26e+00
252) Best value: -7.26e+00
256) Best value: -7.26e+00
###Markdown
Sobol
###Code
X_Sobol = (SobolEngine(dim, scramble=True).draw(len(X_turbo)).to(dtype=dtype, device=device))
Y_Sobol = torch.tensor([eval_objective(x) for x in X_Sobol], dtype=dtype, device=device).unsqueeze(-1)
###Output
_____no_output_____
###Markdown
Compare the methods
###Code
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
%matplotlib inline
names = ["TuRBO-1", "EI", "Sobol"]
runs = [Y_turbo, Y_ei, Y_Sobol]
fig, ax = plt.subplots(figsize=(8, 6))
for name, run in zip(names, runs):
fx = np.maximum.accumulate(run.cpu())
plt.plot(fx, marker="", lw=3)
plt.plot([0, len(Y_turbo)], [fun.optimal_value, fun.optimal_value], "k--", lw=3)
plt.xlabel("Function value", fontsize=18)
plt.xlabel("Number of evaluations", fontsize=18)
plt.title("10D Ackley", fontsize=24)
plt.xlim([0, len(Y_turbo)])
plt.ylim([-20, 1])
plt.grid(True)
plt.tight_layout()
plt.legend(
names + ["Global optimal value"],
loc="lower center",
bbox_to_anchor=(0, -0.08, 1, 1),
bbox_transform=plt.gcf().transFigure,
ncol=4,
fontsize=16,
)
plt.show()
###Output
_____no_output_____
###Markdown
BO with TuRBO-1 and TS/qEIIn this tutorial, we show how to implement Trust Region Bayesian Optimization (TuRBO) [1] in a closed loop in BoTorch.This implementation uses one trust region (TuRBO-1) and supports either parallel expected improvement (qEI) or Thompson sampling (TS). We optimize the $20D$ Ackley function on the domain $[-5, 10]^{20}$ and show that TuRBO-1 outperforms qEI as well as Sobol.Since botorch assumes a maximization problem, we will attempt to maximize $-f(x)$ to achieve $\max_x -f(x)=0$.[1]: [Eriksson, David, et al. Scalable global optimization via local Bayesian optimization. Advances in Neural Information Processing Systems. 2019](https://proceedings.neurips.cc/paper/2019/file/6c990b7aca7bc7058f5e98ea909e924b-Paper.pdf)
###Code
import os
import math
from dataclasses import dataclass
import torch
from botorch.acquisition import qExpectedImprovement
from botorch.fit import fit_gpytorch_model
from botorch.generation import MaxPosteriorSampling
from botorch.models import SingleTaskGP
from botorch.optim import optimize_acqf
from botorch.test_functions import Ackley
from botorch.utils.transforms import unnormalize
from torch.quasirandom import SobolEngine
import gpytorch
from gpytorch.constraints import Interval
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.priors import HorseshoePrior
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.double
SMOKE_TEST = os.environ.get("SMOKE_TEST")
###Output
_____no_output_____
###Markdown
Optimize the 20-dimensional Ackley functionThe goal is to minimize the popular Ackley function:$f(x_1,\ldots,x_d) = -20\exp\left(-0.2 \sqrt{\frac{1}{d} \sum_{j=1}^d x_j^2} \right) -\exp \left( \frac{1}{d} \sum_{j=1}^d \cos(2 \pi x_j) \right) + 20 + e$over the domain $[-5, 10]^{20}$. The global optimal value of $0$ is attained at $x_1 = \ldots = x_d = 0$.As mentioned above, since botorch assumes a maximization problem, we instead maximize $-f(x)$.
###Code
fun = Ackley(dim=20, negate=True).to(dtype=dtype, device=device)
fun.bounds[0, :].fill_(-5)
fun.bounds[1, :].fill_(10)
dim = fun.dim
lb, ub = fun.bounds
batch_size = 4
n_init = 2 * dim
max_cholesky_size = float("inf") # Always use Cholesky
def eval_objective(x):
"""This is a helper function we use to unnormalize and evalaute a point"""
return fun(unnormalize(x, fun.bounds))
###Output
_____no_output_____
###Markdown
Maintain the TuRBO stateTuRBO needs to maintain a state, which includes the length of the trust region, success and failure counters, success and failure tolerance, etc. In this tutorial we store the state in a dataclass and update the state of TuRBO after each batch evaluation. **Note**: These settings assume that the domain has been scaled to $[0, 1]^d$ and that the same batch size is used for each iteration.
###Code
@dataclass
class TurboState:
dim: int
batch_size: int
length: float = 0.8
length_min: float = 0.5 ** 7
length_max: float = 1.6
failure_counter: int = 0
failure_tolerance: int = float("nan") # Note: Post-initialized
success_counter: int = 0
success_tolerance: int = 10 # Note: The original paper uses 3
best_value: float = -float("inf")
restart_triggered: bool = False
def __post_init__(self):
self.failure_tolerance = math.ceil(
max([4.0 / self.batch_size, float(self.dim) / self.batch_size])
)
def update_state(state, Y_next):
if max(Y_next) > state.best_value + 1e-3 * math.fabs(state.best_value):
state.success_counter += 1
state.failure_counter = 0
else:
state.success_counter = 0
state.failure_counter += 1
if state.success_counter == state.success_tolerance: # Expand trust region
state.length = min(2.0 * state.length, state.length_max)
state.success_counter = 0
elif state.failure_counter == state.failure_tolerance: # Shrink trust region
state.length /= 2.0
state.failure_counter = 0
state.best_value = max(state.best_value, max(Y_next).item())
if state.length < state.length_min:
state.restart_triggered = True
return state
###Output
_____no_output_____
###Markdown
Take a look at the state
###Code
state = TurboState(dim=dim, batch_size=batch_size)
print(state)
###Output
TurboState(dim=20, batch_size=4, length=0.8, length_min=0.0078125, length_max=1.6, failure_counter=0, failure_tolerance=5, success_counter=0, success_tolerance=10, best_value=-inf, restart_triggered=False)
###Markdown
Generate initial pointsThis generates an initial set of Sobol points that we use to start of the BO loop.
###Code
def get_initial_points(dim, n_pts, seed=0):
sobol = SobolEngine(dimension=dim, scramble=True, seed=seed)
X_init = sobol.draw(n=n_pts).to(dtype=dtype, device=device)
return X_init
###Output
_____no_output_____
###Markdown
Generate new batchGiven the current `state` and a probabilistic (GP) `model` built from observations `X` and `Y`, we generate a new batch of points. This method works on the domain $[0, 1]^d$, so make sure to not pass in observations from the true domain. `unnormalize` is called before the true function is evaluated which will first map the points back to the original domain.We support either TS and qEI which can be specified via the `acqf` argument.
###Code
def generate_batch(
state,
model, # GP model
X, # Evaluated points on the domain [0, 1]^d
Y, # Function values
batch_size,
n_candidates=None, # Number of candidates for Thompson sampling
num_restarts=10,
raw_samples=512,
acqf="ts", # "ei" or "ts"
):
assert acqf in ("ts", "ei")
assert X.min() >= 0.0 and X.max() <= 1.0 and torch.all(torch.isfinite(Y))
if n_candidates is None:
n_candidates = min(5000, max(2000, 200 * X.shape[-1]))
# Scale the TR to be proportional to the lengthscales
x_center = X[Y.argmax(), :].clone()
weights = model.covar_module.base_kernel.lengthscale.squeeze().detach()
weights = weights / weights.mean()
weights = weights / torch.prod(weights.pow(1.0 / len(weights)))
tr_lb = torch.clamp(x_center - weights * state.length / 2.0, 0.0, 1.0)
tr_ub = torch.clamp(x_center + weights * state.length / 2.0, 0.0, 1.0)
if acqf == "ts":
dim = X.shape[-1]
sobol = SobolEngine(dim, scramble=True)
pert = sobol.draw(n_candidates).to(dtype=dtype, device=device)
pert = tr_lb + (tr_ub - tr_lb) * pert
# Create a perturbation mask
prob_perturb = min(20.0 / dim, 1.0)
mask = (
torch.rand(n_candidates, dim, dtype=dtype, device=device)
<= prob_perturb
)
ind = torch.where(mask.sum(dim=1) == 0)[0]
mask[ind, torch.randint(0, dim - 1, size=(len(ind),), device=device)] = 1
# Create candidate points from the perturbations and the mask
X_cand = x_center.expand(n_candidates, dim).clone()
X_cand[mask] = pert[mask]
# Sample on the candidate points
thompson_sampling = MaxPosteriorSampling(model=model, replacement=False)
with torch.no_grad(): # We don't need gradients when using TS
X_next = thompson_sampling(X_cand, num_samples=batch_size)
elif acqf == "ei":
ei = qExpectedImprovement(model, train_Y.max(), maximize=True)
X_next, acq_value = optimize_acqf(
ei,
bounds=torch.stack([tr_lb, tr_ub]),
q=batch_size,
num_restarts=num_restarts,
raw_samples=raw_samples,
)
return X_next
###Output
_____no_output_____
###Markdown
Optimization loopThis simple loop runs one instance of TuRBO-1 with Thompson sampling until convergence.TuRBO-1 is a local optimizer that can be used for a fixed evaluation budget in a multi-start fashion. Once TuRBO converges, `state["restart_triggered"]` will be set to true and the run should be aborted. If you want to run more evaluations with TuRBO, you simply generate a new set of initial points and then keep generating batches until convergence or when the evaluation budget has been exceeded. It's important to note that evaluations from previous instances are discarded when TuRBO restarts.NOTE: We use a `SingleTaskGP` with a noise constraint to keep the noise from getting too large as the problem is noise-free.
###Code
X_turbo = get_initial_points(dim, n_init)
Y_turbo = torch.tensor(
[eval_objective(x) for x in X_turbo], dtype=dtype, device=device
).unsqueeze(-1)
state = TurboState(dim, batch_size=batch_size)
NUM_RESTARTS = 10 if not SMOKE_TEST else 2
RAW_SAMPLES = 512 if not SMOKE_TEST else 4
N_CANDIDATES = min(5000, max(2000, 200 * dim)) if not SMOKE_TEST else 4
while not state.restart_triggered: # Run until TuRBO converges
# Fit a GP model
train_Y = (Y_turbo - Y_turbo.mean()) / Y_turbo.std()
likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-3))
covar_module = ScaleKernel( # Use the same lengthscale prior as in the TuRBO paper
MaternKernel(nu=2.5, ard_num_dims=dim, lengthscale_constraint=Interval(0.005, 4.0))
)
model = SingleTaskGP(X_turbo, train_Y, covar_module=covar_module, likelihood=likelihood)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
# Do the fitting and acquisition function optimization inside the Cholesky context
with gpytorch.settings.max_cholesky_size(max_cholesky_size):
# Fit the model
fit_gpytorch_model(mll)
# Create a batch
X_next = generate_batch(
state=state,
model=model,
X=X_turbo,
Y=train_Y,
batch_size=batch_size,
n_candidates=N_CANDIDATES,
num_restarts=NUM_RESTARTS,
raw_samples=RAW_SAMPLES,
acqf="ts",
)
Y_next = torch.tensor(
[eval_objective(x) for x in X_next], dtype=dtype, device=device
).unsqueeze(-1)
# Update state
state = update_state(state=state, Y_next=Y_next)
# Append data
X_turbo = torch.cat((X_turbo, X_next), dim=0)
Y_turbo = torch.cat((Y_turbo, Y_next), dim=0)
# Print current status
print(
f"{len(X_turbo)}) Best value: {state.best_value:.2e}, TR length: {state.length:.2e}"
)
###Output
44) Best value: -1.11e+01, TR length: 8.00e-01
48) Best value: -1.08e+01, TR length: 8.00e-01
52) Best value: -1.01e+01, TR length: 8.00e-01
56) Best value: -1.01e+01, TR length: 8.00e-01
60) Best value: -1.01e+01, TR length: 8.00e-01
64) Best value: -9.05e+00, TR length: 8.00e-01
68) Best value: -9.05e+00, TR length: 8.00e-01
72) Best value: -9.05e+00, TR length: 8.00e-01
76) Best value: -9.05e+00, TR length: 8.00e-01
80) Best value: -9.05e+00, TR length: 8.00e-01
84) Best value: -8.11e+00, TR length: 8.00e-01
88) Best value: -7.41e+00, TR length: 8.00e-01
92) Best value: -7.41e+00, TR length: 8.00e-01
96) Best value: -7.41e+00, TR length: 8.00e-01
100) Best value: -7.41e+00, TR length: 8.00e-01
104) Best value: -7.41e+00, TR length: 8.00e-01
108) Best value: -7.41e+00, TR length: 4.00e-01
112) Best value: -6.57e+00, TR length: 4.00e-01
116) Best value: -6.27e+00, TR length: 4.00e-01
120) Best value: -6.24e+00, TR length: 4.00e-01
124) Best value: -5.58e+00, TR length: 4.00e-01
128) Best value: -5.58e+00, TR length: 4.00e-01
132) Best value: -5.57e+00, TR length: 4.00e-01
136) Best value: -5.57e+00, TR length: 4.00e-01
140) Best value: -5.57e+00, TR length: 4.00e-01
144) Best value: -5.57e+00, TR length: 4.00e-01
148) Best value: -5.57e+00, TR length: 4.00e-01
152) Best value: -5.33e+00, TR length: 4.00e-01
156) Best value: -5.32e+00, TR length: 4.00e-01
160) Best value: -5.32e+00, TR length: 4.00e-01
164) Best value: -5.32e+00, TR length: 4.00e-01
168) Best value: -5.32e+00, TR length: 4.00e-01
172) Best value: -5.32e+00, TR length: 2.00e-01
176) Best value: -4.98e+00, TR length: 2.00e-01
180) Best value: -4.27e+00, TR length: 2.00e-01
184) Best value: -4.04e+00, TR length: 2.00e-01
188) Best value: -4.04e+00, TR length: 2.00e-01
192) Best value: -4.04e+00, TR length: 2.00e-01
196) Best value: -4.04e+00, TR length: 2.00e-01
200) Best value: -4.04e+00, TR length: 2.00e-01
204) Best value: -4.02e+00, TR length: 2.00e-01
208) Best value: -4.02e+00, TR length: 2.00e-01
212) Best value: -3.90e+00, TR length: 2.00e-01
216) Best value: -3.90e+00, TR length: 2.00e-01
220) Best value: -3.84e+00, TR length: 2.00e-01
224) Best value: -3.84e+00, TR length: 2.00e-01
228) Best value: -3.84e+00, TR length: 2.00e-01
232) Best value: -3.84e+00, TR length: 2.00e-01
236) Best value: -3.84e+00, TR length: 2.00e-01
240) Best value: -3.84e+00, TR length: 1.00e-01
244) Best value: -3.65e+00, TR length: 1.00e-01
248) Best value: -3.35e+00, TR length: 1.00e-01
252) Best value: -3.35e+00, TR length: 1.00e-01
256) Best value: -3.03e+00, TR length: 1.00e-01
260) Best value: -3.03e+00, TR length: 1.00e-01
264) Best value: -3.03e+00, TR length: 1.00e-01
268) Best value: -2.74e+00, TR length: 1.00e-01
272) Best value: -2.74e+00, TR length: 1.00e-01
276) Best value: -2.74e+00, TR length: 1.00e-01
280) Best value: -2.74e+00, TR length: 1.00e-01
284) Best value: -2.52e+00, TR length: 1.00e-01
288) Best value: -2.52e+00, TR length: 1.00e-01
292) Best value: -2.49e+00, TR length: 1.00e-01
296) Best value: -2.49e+00, TR length: 1.00e-01
300) Best value: -2.49e+00, TR length: 1.00e-01
304) Best value: -2.49e+00, TR length: 1.00e-01
308) Best value: -2.49e+00, TR length: 1.00e-01
312) Best value: -2.49e+00, TR length: 5.00e-02
316) Best value: -2.09e+00, TR length: 5.00e-02
320) Best value: -2.09e+00, TR length: 5.00e-02
324) Best value: -2.09e+00, TR length: 5.00e-02
328) Best value: -1.83e+00, TR length: 5.00e-02
332) Best value: -1.83e+00, TR length: 5.00e-02
336) Best value: -1.83e+00, TR length: 5.00e-02
340) Best value: -1.81e+00, TR length: 5.00e-02
344) Best value: -1.81e+00, TR length: 5.00e-02
348) Best value: -1.81e+00, TR length: 5.00e-02
352) Best value: -1.81e+00, TR length: 5.00e-02
356) Best value: -1.81e+00, TR length: 5.00e-02
360) Best value: -1.81e+00, TR length: 2.50e-02
364) Best value: -1.38e+00, TR length: 2.50e-02
368) Best value: -1.38e+00, TR length: 2.50e-02
372) Best value: -1.38e+00, TR length: 2.50e-02
376) Best value: -1.38e+00, TR length: 2.50e-02
380) Best value: -1.11e+00, TR length: 2.50e-02
384) Best value: -1.11e+00, TR length: 2.50e-02
388) Best value: -1.11e+00, TR length: 2.50e-02
392) Best value: -1.11e+00, TR length: 2.50e-02
396) Best value: -1.11e+00, TR length: 2.50e-02
400) Best value: -1.11e+00, TR length: 1.25e-02
404) Best value: -9.81e-01, TR length: 1.25e-02
408) Best value: -8.63e-01, TR length: 1.25e-02
412) Best value: -8.63e-01, TR length: 1.25e-02
416) Best value: -8.63e-01, TR length: 1.25e-02
420) Best value: -8.63e-01, TR length: 1.25e-02
424) Best value: -8.63e-01, TR length: 1.25e-02
428) Best value: -8.63e-01, TR length: 6.25e-03
###Markdown
GP-EIAs a baseline, we compare TuRBO to qEI
###Code
X_ei = get_initial_points(dim, n_init)
Y_ei = torch.tensor(
[eval_objective(x) for x in X_ei], dtype=dtype, device=device
).unsqueeze(-1)
while len(Y_ei) < len(Y_turbo):
train_Y = (Y_ei - Y_ei.mean()) / Y_ei.std()
likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-3))
model = SingleTaskGP(X_ei, train_Y, likelihood=likelihood)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
# Create a batch
ei = qExpectedImprovement(model, train_Y.max(), maximize=True)
candidate, acq_value = optimize_acqf(
ei,
bounds=torch.stack(
[
torch.zeros(dim, dtype=dtype, device=device),
torch.ones(dim, dtype=dtype, device=device),
]
),
q=batch_size,
num_restarts=NUM_RESTARTS,
raw_samples=RAW_SAMPLES,
)
Y_next = torch.tensor(
[eval_objective(x) for x in candidate], dtype=dtype, device=device
).unsqueeze(-1)
# Append data
X_ei = torch.cat((X_ei, candidate), axis=0)
Y_ei = torch.cat((Y_ei, Y_next), axis=0)
# Print current status
print(f"{len(X_ei)}) Best value: {Y_ei.max().item():.2e}")
###Output
44) Best value: -1.12e+01
48) Best value: -1.06e+01
52) Best value: -9.38e+00
56) Best value: -9.38e+00
60) Best value: -8.60e+00
64) Best value: -8.60e+00
68) Best value: -8.60e+00
72) Best value: -8.60e+00
76) Best value: -8.60e+00
80) Best value: -8.60e+00
84) Best value: -8.60e+00
88) Best value: -8.60e+00
92) Best value: -8.60e+00
96) Best value: -8.60e+00
100) Best value: -8.60e+00
104) Best value: -8.60e+00
108) Best value: -8.60e+00
112) Best value: -8.60e+00
116) Best value: -8.60e+00
120) Best value: -8.60e+00
124) Best value: -8.60e+00
128) Best value: -8.60e+00
132) Best value: -8.60e+00
136) Best value: -8.60e+00
140) Best value: -8.60e+00
144) Best value: -8.60e+00
148) Best value: -8.60e+00
152) Best value: -8.60e+00
156) Best value: -8.60e+00
160) Best value: -8.60e+00
164) Best value: -8.60e+00
168) Best value: -8.60e+00
172) Best value: -8.60e+00
176) Best value: -8.60e+00
180) Best value: -8.60e+00
184) Best value: -8.60e+00
188) Best value: -8.60e+00
192) Best value: -8.60e+00
196) Best value: -8.60e+00
200) Best value: -8.60e+00
204) Best value: -8.60e+00
208) Best value: -8.60e+00
212) Best value: -8.60e+00
216) Best value: -8.60e+00
220) Best value: -8.60e+00
224) Best value: -8.60e+00
228) Best value: -8.60e+00
232) Best value: -8.60e+00
236) Best value: -8.60e+00
240) Best value: -8.60e+00
244) Best value: -8.60e+00
248) Best value: -8.60e+00
252) Best value: -8.60e+00
256) Best value: -8.60e+00
260) Best value: -8.60e+00
264) Best value: -8.60e+00
268) Best value: -8.60e+00
272) Best value: -8.60e+00
276) Best value: -8.60e+00
280) Best value: -8.60e+00
284) Best value: -8.60e+00
288) Best value: -8.60e+00
292) Best value: -8.60e+00
296) Best value: -8.60e+00
300) Best value: -8.60e+00
304) Best value: -8.60e+00
308) Best value: -8.60e+00
312) Best value: -8.60e+00
316) Best value: -8.60e+00
320) Best value: -8.60e+00
324) Best value: -8.60e+00
328) Best value: -8.60e+00
332) Best value: -8.60e+00
336) Best value: -8.60e+00
340) Best value: -8.60e+00
344) Best value: -8.60e+00
348) Best value: -8.60e+00
352) Best value: -8.60e+00
356) Best value: -8.60e+00
360) Best value: -8.60e+00
364) Best value: -8.60e+00
368) Best value: -8.60e+00
372) Best value: -8.60e+00
376) Best value: -8.60e+00
380) Best value: -8.60e+00
384) Best value: -8.60e+00
388) Best value: -8.60e+00
392) Best value: -8.60e+00
396) Best value: -8.60e+00
400) Best value: -8.60e+00
404) Best value: -8.60e+00
408) Best value: -8.60e+00
412) Best value: -8.60e+00
416) Best value: -8.60e+00
420) Best value: -8.60e+00
424) Best value: -8.60e+00
428) Best value: -8.60e+00
###Markdown
Sobol
###Code
X_Sobol = SobolEngine(dim, scramble=True, seed=0).draw(len(X_turbo)).to(dtype=dtype, device=device)
Y_Sobol = torch.tensor([eval_objective(x) for x in X_Sobol], dtype=dtype, device=device).unsqueeze(-1)
###Output
_____no_output_____
###Markdown
Compare the methods
###Code
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
%matplotlib inline
names = ["TuRBO-1", "EI", "Sobol"]
runs = [Y_turbo, Y_ei, Y_Sobol]
fig, ax = plt.subplots(figsize=(8, 6))
for name, run in zip(names, runs):
fx = np.maximum.accumulate(run.cpu())
plt.plot(fx, marker="", lw=3)
plt.plot([0, len(Y_turbo)], [fun.optimal_value, fun.optimal_value], "k--", lw=3)
plt.xlabel("Function value", fontsize=18)
plt.xlabel("Number of evaluations", fontsize=18)
plt.title("20D Ackley", fontsize=24)
plt.xlim([0, len(Y_turbo)])
plt.ylim([-15, 1])
plt.grid(True)
plt.tight_layout()
plt.legend(
names + ["Global optimal value"],
loc="lower center",
bbox_to_anchor=(0, -0.08, 1, 1),
bbox_transform=plt.gcf().transFigure,
ncol=4,
fontsize=16,
)
plt.show()
###Output
_____no_output_____
###Markdown
BO with TuRBO-1 and TS/qEIIn this tutorial, we show how to implement Trust Region Bayesian Optimization (TuRBO) [1] in a closed loop in BoTorch.This implementation uses one trust region (TuRBO-1) and supports either parallel expected improvement (qEI) or Thompson sampling (TS). We optimize the $10D$ Ackley function on the domain $[-10, 15]^{10}$ and show that TuRBO-1 outperforms qEI as well as Sobol.Since botorch assumes a maximization problem, we will attempt to maximize $-f(x)$ to achieve $\max_x -f(x)=0$.[1]: [Eriksson, David, et al. Scalable global optimization via local Bayesian optimization. Advances in Neural Information Processing Systems. 2019](https://proceedings.neurips.cc/paper/2019/file/6c990b7aca7bc7058f5e98ea909e924b-Paper.pdf)
###Code
import os
import math
from dataclasses import dataclass
import torch
from botorch.acquisition import qExpectedImprovement
from botorch.fit import fit_gpytorch_model
from botorch.generation import MaxPosteriorSampling
from botorch.models import FixedNoiseGP, SingleTaskGP
from botorch.optim import optimize_acqf
from botorch.test_functions import Ackley
from botorch.utils.transforms import unnormalize
from torch.quasirandom import SobolEngine
import gpytorch
from gpytorch.constraints import Interval
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.priors import HorseshoePrior
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.double
SMOKE_TEST = os.environ.get("SMOKE_TEST")
###Output
_____no_output_____
###Markdown
Optimize the 10-dimensional Ackley functionThe goal is to minimize the popular Ackley function:$f(x_1,\ldots,x_d) = -20\exp\left(-0.2 \sqrt{\frac{1}{d} \sum_{j=1}^d x_j^2} \right) -\exp \left( \frac{1}{d} \sum_{j=1}^d \cos(2 \pi x_j) \right) + 20 + e$over the domain $[-10, 15]^{10}$. The global optimal value of $0$ is attained at $x_1 = \ldots = x_d = 0$.As mentioned above, since botorch assumes a maximization problem, we instead maximize $-f(x)$.
###Code
fun = Ackley(dim=10, negate=True).to(dtype=dtype, device=device)
fun.bounds[0, :].fill_(-10)
fun.bounds[1, :].fill_(15)
dim = fun.dim
lb, ub = fun.bounds
def eval_objective(x):
"""This is a helper function we use to unnormalize and evalaute a point"""
return fun(unnormalize(x, fun.bounds))
###Output
_____no_output_____
###Markdown
Maintain the TuRBO stateTuRBO needs to maintain a state, which includes the length of the trust region, success and failure counters, success and failure tolerance, etc. In this tutorial we store the state in a dataclass and update the state of TuRBO after each batch evaluation. **Note**: These settings assume that the domain has been scaled to $[0, 1]^d$ and that the same batch size is used for each iteration.
###Code
@dataclass
class TurboState:
dim: int
batch_size: int
length: float = 0.8
length_min: float = 0.5 ** 7
length_max: float = 1.6
failure_counter: int = 0
failure_tolerance: int = float("nan") # Note: Post-initialized
success_counter: int = 0
success_tolerance: int = 10 # Note: The original paper uses 3
best_value: float = -float("inf")
restart_triggered: bool = False
def __post_init__(self):
self.failure_tolerance = math.ceil(
max([4.0 / self.batch_size, float(self.dim) / self.batch_size])
)
def update_state(state, Y_next):
if max(Y_next) > state.best_value + 1e-3 * math.fabs(state.best_value):
state.success_counter += 1
state.failure_counter = 0
else:
state.success_counter = 0
state.failure_counter += 1
if state.success_counter == state.success_tolerance: # Expand trust region
state.length = min(2.0 * state.length, state.length_max)
state.success_counter = 0
elif state.failure_counter == state.failure_tolerance: # Shrink trust region
state.length /= 2.0
state.failure_counter = 0
state.best_value = max(state.best_value, max(Y_next).item())
if state.length < state.length_min:
state.restart_triggered = True
return state
###Output
_____no_output_____
###Markdown
Take a look at the state
###Code
state = TurboState(dim=dim, batch_size=4)
print(state)
###Output
TurboState(dim=10, batch_size=4, length=0.8, length_min=0.0078125, length_max=1.6, failure_counter=0, failure_tolerance=3, success_counter=0, success_tolerance=10, best_value=-inf, restart_triggered=False)
###Markdown
Generate initial pointsThis generates an initial set of Sobol points that we use to start of the BO loop.
###Code
def get_initial_points(dim, n_pts):
sobol = SobolEngine(dimension=dim, scramble=True)
X_init = sobol.draw(n=n_pts).to(dtype=dtype, device=device)
return X_init
###Output
_____no_output_____
###Markdown
Generate new batchGiven the current `state` and a probabilistic (GP) `model` built from observations `X` and `Y`, we generate a new batch of points. This method works on the domain $[0, 1]^d$, so make sure to not pass in observations from the true domain. `unnormalize` is called before the true function is evaluated which will first map the points back to the original domain.We support either TS and qEI which can be specified via the `acqf` argument.
###Code
def generate_batch(
state,
model, # GP model
X, # Evaluated points on the domain [0, 1]^d
Y, # Function values
batch_size,
n_candidates=None, # Number of candidates for Thompson sampling
num_restarts=10,
raw_samples=512,
acqf="ts", # "ei" or "ts"
):
assert acqf in ("ts", "ei")
assert X.min() >= 0.0 and X.max() <= 1.0 and torch.all(torch.isfinite(Y))
if n_candidates is None:
n_candidates = min(5000, max(2000, 200 * X.shape[-1]))
# Scale the TR to be proportional to the lengthscales
x_center = X[Y.argmax(), :].clone()
weights = model.covar_module.base_kernel.lengthscale.squeeze().detach()
weights = weights / weights.mean()
weights = weights / torch.prod(weights.pow(1.0 / len(weights)))
tr_lb = torch.clamp(x_center - weights * state.length / 2.0, 0.0, 1.0)
tr_ub = torch.clamp(x_center + weights * state.length / 2.0, 0.0, 1.0)
if acqf == "ts":
dim = X.shape[-1]
sobol = SobolEngine(dim, scramble=True)
pert = sobol.draw(n_candidates).to(dtype=dtype, device=device)
pert = tr_lb + (tr_ub - tr_lb) * pert
# Create a perturbation mask
prob_perturb = min(20.0 / dim, 1.0)
mask = (
torch.rand(n_candidates, dim, dtype=dtype, device=device)
<= prob_perturb
)
ind = torch.where(mask.sum(dim=1) == 0)[0]
mask[ind, torch.randint(0, dim - 1, size=(len(ind),), device=device)] = 1
# Create candidate points from the perturbations and the mask
X_cand = x_center.expand(n_candidates, dim).clone()
X_cand[mask] = pert[mask]
# Sample on the candidate points
thompson_sampling = MaxPosteriorSampling(model=model, replacement=False)
X_next = thompson_sampling(X_cand, num_samples=batch_size)
elif acqf == "ei":
ei = qExpectedImprovement(model, train_Y.max(), maximize=True)
X_next, acq_value = optimize_acqf(
ei,
bounds=torch.stack([tr_lb, tr_ub]),
q=batch_size,
num_restarts=num_restarts,
raw_samples=raw_samples,
)
return X_next
###Output
_____no_output_____
###Markdown
Optimization loopThis simple loop runs one instance of TuRBO-1 with Thompson sampling until convergence.TuRBO-1 is a local optimizer that can be used for a fixed evaluation budget in a multi-start fashion. Once TuRBO converges, `state["restart_triggered"]` will be set to true and the run should be aborted. If you want to run more evaluations with TuRBO, you simply generate a new set of initial points and then keep generating batches until convergence or when the evaluation budget has been exceeded. It's important to note that evaluations from previous instances are discarded when TuRBO restarts.NOTE: We use a `SingleTaskGP` with a noise constraint to keep the noise from getting too large as the problem is noise-free.
###Code
batch_size = 4
n_init = 20 # 2*dim, which corresponds to 5 batches of 4
X_turbo = get_initial_points(dim, n_init)
Y_turbo = torch.tensor(
[eval_objective(x) for x in X_turbo], dtype=dtype, device=device
).unsqueeze(-1)
state = TurboState(dim, batch_size=batch_size)
NUM_RESTARTS = 10 if not SMOKE_TEST else 2
RAW_SAMPLES = 512 if not SMOKE_TEST else 4
N_CANDIDATES = min(5000, max(2000, 200 * dim)) if not SMOKE_TEST else 4
while not state.restart_triggered: # Run until TuRBO converges
# Fit a GP model
train_Y = (Y_turbo - Y_turbo.mean()) / Y_turbo.std()
likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-3))
model = SingleTaskGP(X_turbo, train_Y, likelihood=likelihood)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
# Create a batch
X_next = generate_batch(
state=state,
model=model,
X=X_turbo,
Y=train_Y,
batch_size=batch_size,
n_candidates=N_CANDIDATES,
num_restarts=NUM_RESTARTS,
raw_samples=RAW_SAMPLES,
acqf="ts",
)
Y_next = torch.tensor(
[eval_objective(x) for x in X_next], dtype=dtype, device=device
).unsqueeze(-1)
# Update state
state = update_state(state=state, Y_next=Y_next)
# Append data
X_turbo = torch.cat((X_turbo, X_next), dim=0)
Y_turbo = torch.cat((Y_turbo, Y_next), dim=0)
# Print current status
print(
f"{len(X_turbo)}) Best value: {state.best_value:.2e}, TR length: {state.length:.2e}"
)
###Output
24) Best value: -1.51e+01, TR length: 8.00e-01
28) Best value: -1.51e+01, TR length: 8.00e-01
32) Best value: -1.35e+01, TR length: 8.00e-01
36) Best value: -1.25e+01, TR length: 8.00e-01
40) Best value: -1.11e+01, TR length: 8.00e-01
44) Best value: -1.11e+01, TR length: 8.00e-01
48) Best value: -1.11e+01, TR length: 8.00e-01
52) Best value: -9.97e+00, TR length: 8.00e-01
56) Best value: -9.97e+00, TR length: 8.00e-01
60) Best value: -9.97e+00, TR length: 8.00e-01
64) Best value: -9.97e+00, TR length: 4.00e-01
68) Best value: -7.77e+00, TR length: 4.00e-01
72) Best value: -7.77e+00, TR length: 4.00e-01
76) Best value: -6.39e+00, TR length: 4.00e-01
80) Best value: -6.00e+00, TR length: 4.00e-01
84) Best value: -6.00e+00, TR length: 4.00e-01
88) Best value: -6.00e+00, TR length: 4.00e-01
92) Best value: -6.00e+00, TR length: 2.00e-01
96) Best value: -4.14e+00, TR length: 2.00e-01
100) Best value: -4.14e+00, TR length: 2.00e-01
104) Best value: -4.14e+00, TR length: 2.00e-01
108) Best value: -3.80e+00, TR length: 2.00e-01
112) Best value: -3.80e+00, TR length: 2.00e-01
116) Best value: -3.80e+00, TR length: 2.00e-01
120) Best value: -3.80e+00, TR length: 1.00e-01
124) Best value: -2.43e+00, TR length: 1.00e-01
128) Best value: -2.43e+00, TR length: 1.00e-01
132) Best value: -2.43e+00, TR length: 1.00e-01
136) Best value: -2.43e+00, TR length: 5.00e-02
140) Best value: -2.29e+00, TR length: 5.00e-02
144) Best value: -2.29e+00, TR length: 5.00e-02
148) Best value: -2.29e+00, TR length: 5.00e-02
152) Best value: -2.27e+00, TR length: 5.00e-02
156) Best value: -2.15e+00, TR length: 5.00e-02
160) Best value: -2.15e+00, TR length: 5.00e-02
164) Best value: -1.88e+00, TR length: 5.00e-02
168) Best value: -1.88e+00, TR length: 5.00e-02
172) Best value: -1.88e+00, TR length: 5.00e-02
176) Best value: -1.88e+00, TR length: 2.50e-02
180) Best value: -1.63e+00, TR length: 2.50e-02
184) Best value: -8.17e-01, TR length: 2.50e-02
188) Best value: -8.17e-01, TR length: 2.50e-02
192) Best value: -8.17e-01, TR length: 2.50e-02
196) Best value: -8.17e-01, TR length: 1.25e-02
200) Best value: -8.17e-01, TR length: 1.25e-02
204) Best value: -8.17e-01, TR length: 1.25e-02
208) Best value: -6.86e-01, TR length: 1.25e-02
212) Best value: -6.86e-01, TR length: 1.25e-02
216) Best value: -6.86e-01, TR length: 1.25e-02
220) Best value: -6.86e-01, TR length: 6.25e-03
###Markdown
EIAs a baseline, we compare TuRBO to qEI
###Code
X_ei = get_initial_points(dim, n_init)
Y_ei = torch.tensor(
[eval_objective(x) for x in X_ei], dtype=dtype, device=device
).unsqueeze(-1)
while len(Y_ei) < len(Y_turbo):
train_Y = (Y_ei - Y_ei.mean()) / Y_ei.std()
likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-3))
model = SingleTaskGP(X_ei, train_Y, likelihood=likelihood)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
# Create a batch
ei = qExpectedImprovement(model, train_Y.max(), maximize=True)
candidate, acq_value = optimize_acqf(
ei,
bounds=torch.stack(
[
torch.zeros(dim, dtype=dtype, device=device),
torch.ones(dim, dtype=dtype, device=device),
]
),
q=batch_size,
num_restarts=NUM_RESTARTS,
raw_samples=RAW_SAMPLES,
)
Y_next = torch.tensor(
[eval_objective(x) for x in candidate], dtype=dtype, device=device
).unsqueeze(-1)
# Append data
X_ei = torch.cat((X_ei, candidate), axis=0)
Y_ei = torch.cat((Y_ei, Y_next), axis=0)
# Print current status
print(f"{len(X_ei)}) Best value: {Y_ei.max().item():.2e}")
###Output
24) Best value: -1.33e+01
28) Best value: -1.22e+01
32) Best value: -1.13e+01
36) Best value: -1.00e+01
40) Best value: -8.75e+00
44) Best value: -8.01e+00
48) Best value: -8.01e+00
52) Best value: -8.01e+00
56) Best value: -8.01e+00
60) Best value: -8.01e+00
64) Best value: -8.01e+00
68) Best value: -8.01e+00
72) Best value: -8.01e+00
76) Best value: -8.01e+00
80) Best value: -8.01e+00
84) Best value: -8.01e+00
88) Best value: -8.01e+00
92) Best value: -8.01e+00
96) Best value: -8.01e+00
100) Best value: -8.01e+00
104) Best value: -8.01e+00
108) Best value: -8.01e+00
112) Best value: -8.01e+00
116) Best value: -8.01e+00
120) Best value: -8.01e+00
124) Best value: -8.01e+00
128) Best value: -8.01e+00
132) Best value: -8.01e+00
136) Best value: -8.01e+00
140) Best value: -8.01e+00
144) Best value: -8.01e+00
148) Best value: -8.01e+00
152) Best value: -8.01e+00
156) Best value: -8.01e+00
160) Best value: -8.01e+00
164) Best value: -8.01e+00
168) Best value: -8.01e+00
172) Best value: -8.01e+00
176) Best value: -8.01e+00
180) Best value: -8.01e+00
184) Best value: -8.01e+00
188) Best value: -8.01e+00
192) Best value: -8.01e+00
196) Best value: -8.01e+00
200) Best value: -8.01e+00
204) Best value: -8.01e+00
208) Best value: -8.01e+00
212) Best value: -8.01e+00
216) Best value: -8.01e+00
220) Best value: -8.01e+00
###Markdown
Sobol
###Code
X_Sobol = SobolEngine(dim, scramble=True).draw(len(X_turbo)).to(dtype=dtype, device=device)
Y_Sobol = torch.tensor([eval_objective(x) for x in X_Sobol], dtype=dtype, device=device).unsqueeze(-1)
###Output
_____no_output_____
###Markdown
Compare the methods
###Code
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
%matplotlib inline
names = ["TuRBO-1", "EI", "Sobol"]
runs = [Y_turbo, Y_ei, Y_Sobol]
fig, ax = plt.subplots(figsize=(8, 6))
for name, run in zip(names, runs):
fx = np.maximum.accumulate(run.cpu())
plt.plot(fx, marker="", lw=3)
plt.plot([0, len(Y_turbo)], [fun.optimal_value, fun.optimal_value], "k--", lw=3)
plt.xlabel("Function value", fontsize=18)
plt.xlabel("Number of evaluations", fontsize=18)
plt.title("10D Ackley", fontsize=24)
plt.xlim([0, len(Y_turbo)])
plt.ylim([-20, 1])
plt.grid(True)
plt.tight_layout()
plt.legend(
names + ["Global optimal value"],
loc="lower center",
bbox_to_anchor=(0, -0.08, 1, 1),
bbox_transform=plt.gcf().transFigure,
ncol=4,
fontsize=16,
)
plt.show()
###Output
_____no_output_____ |
mimic/notebooks/text_preprocessing.ipynb | ###Markdown
Implementing Word2Vec
###Code
sentence_lengths = []
data = ''
for sentence in report_findings:
data+= sentence
sentence_lengths.append(len(sentence))
from matplotlib import pyplot as plt
plt.hist(np.array(sentence_lengths), bins = 30)
plt.show()
print('max_length: ', max(sentence_lengths))
print('mean sent_length: ', np.mean(sentence_lengths))
print(len(data))
tokenizer = get_tokenizer("basic_english")
tokens = tokenizer(data)
print(len(tokens))
tokens = list(set(tokens))
print(len(tokens))
word2idx = {w: idx for (idx, w) in enumerate(tokens)}
idx2word = {idx: w for (idx, w) in enumerate(tokens)}
vocab_size = len(tokens)
print(idx2word)
# todo: this: https://github.com/iffsid/mmvae/blob/public/src/datasets.py
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered."""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
class CUBSentences(Dataset):
def __init__(self, root_data_dir: str, split: str, transform=None, **kwargs):
"""split: 'trainval' or 'test' """
super().__init__()
self.data_dir = os.path.join(root_data_dir, 'cub')
self.split = split
self.max_sequence_length = kwargs.get('max_sequence_length', 32)
self.min_occ = kwargs.get('min_occ', 3)
self.transform = transform
os.makedirs(os.path.join(root_data_dir, "lang_emb"), exist_ok=True)
self.gen_dir = os.path.join(self.data_dir, "oc:{}_msl:{}".
format(self.min_occ, self.max_sequence_length))
if split == 'train':
self.raw_data_path = os.path.join(self.data_dir, 'text_trainvalclasses.txt')
elif split == 'test':
self.raw_data_path = os.path.join(self.data_dir, 'text_testclasses.txt')
else:
raise Exception("Only train or test split is available")
os.makedirs(self.gen_dir, exist_ok=True)
self.data_file = 'cub.{}.s{}'.format(split, self.max_sequence_length)
self.vocab_file = 'cub.vocab'
if not os.path.exists(os.path.join(self.gen_dir, self.data_file)):
print("Data file not found for {} split at {}. Creating new... (this may take a while)".
format(split.upper(), os.path.join(self.gen_dir, self.data_file)))
self._create_data()
else:
self._load_data()
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sent = self.data[str(idx)]['idx']
if self.transform is not None:
sent = self.transform(sent)
return sent, self.data[str(idx)]['length']
@property
def vocab_size(self):
return len(self.w2i)
@property
def pad_idx(self):
return self.w2i['<pad>']
@property
def eos_idx(self):
return self.w2i['<eos>']
@property
def unk_idx(self):
return self.w2i['<unk>']
def get_w2i(self):
return self.w2i
def get_i2w(self):
return self.i2w
def _load_data(self, vocab=True):
with open(os.path.join(self.gen_dir, self.data_file), 'rb') as file:
self.data = json.load(file)
if vocab:
self._load_vocab()
def _load_vocab(self):
if not os.path.exists(os.path.join(self.gen_dir, self.vocab_file)):
self._create_vocab()
with open(os.path.join(self.gen_dir, self.vocab_file), 'r') as vocab_file:
vocab = json.load(vocab_file)
self.w2i, self.i2w = vocab['w2i'], vocab['i2w']
def _create_data(self):
if self.split == 'train' and not os.path.exists(os.path.join(self.gen_dir, self.vocab_file)):
self._create_vocab()
else:
self._load_vocab()
with open(self.raw_data_path, 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
data = defaultdict(dict)
pad_count = 0
for i, line in enumerate(sentences):
words = word_tokenize(line)
tok = words[:self.max_sequence_length - 1]
tok = tok + ['<eos>']
length = len(tok)
if self.max_sequence_length > length:
tok.extend(['<pad>'] * (self.max_sequence_length - length))
pad_count += 1
idx = [self.w2i.get(w, self.w2i['<exc>']) for w in tok]
id = len(data)
data[id]['tok'] = tok
data[id]['idx'] = idx
data[id]['length'] = length
print("{} out of {} sentences are truncated with max sentence length {}.".
format(len(sentences) - pad_count, len(sentences), self.max_sequence_length))
with io.open(os.path.join(self.gen_dir, self.data_file), 'wb') as data_file:
data = json.dumps(data, ensure_ascii=False)
data_file.write(data.encode('utf8', 'replace'))
self._load_data(vocab=False)
def _create_vocab(self):
assert self.split == 'train', "Vocablurary can only be created for training file."
with open(self.raw_data_path, 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
occ_register = OrderedCounter()
w2i = dict()
i2w = dict()
special_tokens = ['<exc>', '<pad>', '<eos>']
for st in special_tokens:
i2w[len(w2i)] = st
w2i[st] = len(w2i)
texts = []
unq_words = []
for i, line in enumerate(sentences):
words = word_tokenize(line)
occ_register.update(words)
texts.append(words)
for w, occ in occ_register.items():
if occ > self.min_occ and w not in special_tokens:
i2w[len(w2i)] = w
w2i[w] = len(w2i)
else:
unq_words.append(w)
assert len(w2i) == len(i2w)
print("Vocablurary of {} keys created, {} words are excluded (occurrence <= {})."
.format(len(w2i), len(unq_words), self.min_occ))
vocab = dict(w2i=w2i, i2w=i2w)
with io.open(os.path.join(self.gen_dir, self.vocab_file), 'wb') as vocab_file:
data = json.dumps(vocab, ensure_ascii=False)
vocab_file.write(data.encode('utf8', 'replace'))
with open(os.path.join(self.gen_dir, 'cub.unique'), 'wb') as unq_file:
pickle.dump(np.array(unq_words), unq_file)
with open(os.path.join(self.gen_dir, 'cub.all'), 'wb') as a_file:
pickle.dump(occ_register, a_file)
self._load_vocab()
tx = lambda data: torch.Tensor(data)
maxSentLen = 32
t_data = CUBSentences('', split='train', transform=tx, max_sequence_length=maxSentLen)
###Output
Data file not found for TRAIN split at cub/oc:3_msl:32/cub.train.s32. Creating new... (this may take a while)
|
assignments/assignment05/InteractEx04.ipynb | ###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x = np.linspace(-1.0,1.0,size)
def N(mu, sigma):
if sigma == 0:
N = 0
else:
N = np.exp(-1*((x-mu)**2)/(2*(sigma**2)))/(sigma*((2*np.pi)**0.5))
return N
y = m*x + b + N(0,sigma)
return x, y
random_line(0.0,0.0,1.0,500)
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x, y = random_line(m, b, sigma, size)
plt.scatter(x,y,color=color)
plt.xlim(min(x),max(x))
plt.ylim(min(y),max(y))
plt.tick_params(direction='out', width=1, which='both')
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0,10.0,0.1), b=(-5.0,5.0,0.1), sigma=(0.0,5.0,0.01), size=(10,100,10), color=['red','green','blue'])
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
# YOUR CODE HERE
#http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.randn.html#numpy.random.randn
x = np.linspace(-1.0, 1.0, num=size)
y = (m * x) + b + (sigma * np.random.randn(size))
return x, y
print(random_line(2, 3, 2, 20))
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x, y = random_line(m, b, sigma, size)
ax = plt.subplot(111)
plt.scatter(x, y , color=color)
ticks_out(ax)
plt.xlim((-1.1, 1.1))
plt.ylim((-10.0, 10.0))
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
interact(plot_random_line, m=(-10.0, 10.0, 0.1), b=(-5.0, 5.0, 0.1), sigma = (0.0, 5.0, 0.01), size = (10, 100, 10), color = ["green", "red", "blue"])
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
y = m*x + b+ N(0, sigma**2)
m.dtype(float)
b.dtype(float)
sigma = np.std(x,axis=y,dtpe=np.float64)
for x in range(-1.0,1.0):
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
# YOUR CODE HERE
raise NotImplementedError()
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
raise NotImplementedError()
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
# YOUR CODE HERE
x = np.linspace(-1.0, 1.0, size)
if sigma==0.0:
y = m*x+b
else:
y = m*x + b + np.random.normal(0, sigma**2, size)
return x, y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
# YOUR CODE HERE
x, y = random_line(m, b, sigma, size)
plt.scatter(x, y, color=color)
plt.xlabel('Random X')
plt.ylabel('Random Y')
plt.title('Line scatter')
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
interact(plot_random_line, m=(-10.0, 10.0, 0.1), b=(-5.0, 5.0, 0.1), sigma=(0.0, 5.0, 0.01), size=(10, 100, 10), color=('red', 'green', 'blue'))
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
# YOUR CODE HERE
raise NotImplementedError()
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
# YOUR CODE HERE
raise NotImplementedError()
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
raise NotImplementedError()
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
import scipy.stats
###Output
_____no_output_____
###Markdown
After doing some research on stackoverflow, I learned how to use scipy.stats to generate normally distributed random noise for my y-values.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x = np.linspace(-1.0,1.0,size)
noise = scipy.stats.norm.rvs(loc=0, scale=sigma, size=size)
y = m*x + b + noise
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x = np.linspace(-1.0,1.0,size)
noise = scipy.stats.norm.rvs(loc=0, scale=sigma, size=size)
y = m*x + b + noise
f = plt.figure(figsize=(7,5))
plt.scatter(x,y, color='%s' % color, marker='o', alpha = .85)
plt.tick_params(right=False, top=False, axis='both', direction='out')
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Random Line Scatter Data')
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0,10.0,0.1), b=(-5.0,5.0,0.1), sigma=(0.0,5.0,0.01), size=(10,100,10), color={'red':'r',
'green':'g',
'blue':'b'})
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x=np.linspace(-1.0,1.0,size)
N=np.empty(size)
if sigma==0.0: #I received some help from classmates here
y=m*x+b
else:
for i in range(size):
N[i]=np.random.normal(0,sigma**2)
y=m*x+b+N
return(x,y)
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x=np.linspace(-1.0,1.0,size)
N=np.empty(size)
if sigma==0.0:
y=m*x+b
else:
for i in range(size):
N[i]=np.random.normal(0,sigma**2)
y=m*x+b+N
plt.figure(figsize=(9,6))
plt.scatter(x,y,color=color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plt.box(False)
plt.grid(True)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line,m=(-10.0,10.0,0.1), b=(-5.0,5.0,0.1), sigma=(0.0,5.0,0.01),size=(10,100,10),color=('r','b','g'))
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x=np.linspace(-1.0,1.0,size)
if sigma==0:
y = m*x + b
else:
y = m*x + b + np.random.normal(0,sigma**2,size)
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x,y=random_line(m,b,sigma,size)
plt.scatter(x,y,color=color)#makes the scatter
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact (plot_random_line,m=(-10.0,10.0,0.1),b=(-5.0,5.0,0.1),sigma=(0.0,5.0,.01),size=(10,100,10),color=('red','blue','green'));
#makes the whole thing interactive
assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
# YOUR CODE HERE
x=np.linspace(-1.0,1.0,size)
if sigma==0:
y = m*x + b
else:
y = m*x + b + np.random.normal(0,sigma**2,size)
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x,y = random_line(m, b, sigma, size)
plt.scatter(x,y,color=color)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
interact(plot_random_line,m=(-10.0,10.0,0.1),b=(-5.0,5.0,0.1),sigma=(0.0,5.0,0.1),size=(10,100,10),color=('red','blue','green'))
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x = np.linspace(-1., 1., size)
n = (sigma ** 2)*(np.random.randn(size))
y = m*x + b + n
return x, y
print (random_line(2, 3, 4, size=10))
#raise NotImplementedError()
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
x, y = random_line(m, b, sigma, size)
"""Plot a random line with slope m, intercept b and size points."""
plt.scatter(x, y, c=color)
#raise NotImplementedError()
plot_random_line(5.0, -1.0, 2.0, 50)
plt.xlim(-1.1,1.1)
plt.ylim(-10.,10.)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.,10.,.1), b=(-5.,5.,.1), sigma=(0.,5.,.01), size=(10,100,10), color={'red':'r','green':'g', 'blue':'b'});
#raise NotImplementedError()
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x = np.linspace(-1, 1, size)
n = np.random.randn(size)
y = np.zeros(size)
for a in range(size):
y[a] = m*x[a] + b + (sigma * n[a])
# formula for normal sitribution found on SciPy.org
return x, y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x, y = random_line(m, b, sigma, size)
plt.scatter(x,y,color=color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0,10.0,0.1),b=(-5.0,5.0,.1),sigma=(0.0,5.0,.01),size=(10,100,10),color = ['red','green','blue']);
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x = np.linspace(-1.0,1.0,size)
errors = np.random.normal(sigma**2)
y = np.asarray(m*x + b + errors)
print(x)
print(y)
#?np.random.normal
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
[-1. 0. 1.]
[ 1.86236529 1.86236529 1.86236529]
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x = np.linspace(-1.0,1.0,size)
errors = np.random.normal(loc = 0, scale = sigma**2)
y = m*x + b + errors
plt.scatter(x, y,size, c = color)
plt.title('Awesome Random Line')
plt.xlabel('The x-axis')
plt.ylabel('The y-axis')
plt.grid(True)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
#?plt.xlim
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=[-10.0,1.0,0.1], b = [-5.0,5.0,0.1], sigma = [0.0,5.0,0.01], size=[10,100,10], color = ['red','green','blue'])
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x=np.linspace(-1.0,1.0,size)
if sigma==0.0: #worked with Jack Porter to find N(o,sigma) and to work out sigma 0.0 case also explained to him list comprehension
y=np.array([i*m+b for i in x]) #creates an array of y values
else:
# N=1/(sigma*np.pi**.5)*np.exp(-(x**2)/(2*sigma**2)) #incorrectly thought this would need to be the N(0,sigma)
y=np.array([i*m+b+np.random.normal(0,sigma**2) for i in x]) #creates an array of y values for each value of x so that y has gaussian noise
return x,y
# plt.plot(x,y,'b' )
# plt.box(False)
# plt.axvline(x=0,linewidth=.2,color='k')
# plt.axhline(y=0,linewidth=.2,color='k')
# ax=plt.gca()
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x,y=random_line(m,b,sigma,size) #worked with Jack Porter, before neither of us reassigned x,y
plt.plot(x,y,color )
plt.box(False)
plt.axvline(x=0,linewidth=.2,color='k')
plt.axhline(y=0,linewidth=.2,color='k')
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
ax=plt.gca()
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Line w/ Gaussian Noise')
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0,10.0,0.1),b=(-5.0,5.0,0.1),sigma=(0.0,5.0,0.1),size=(10,100,10), color={'red':'r','green':'g','blue':'b'})
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from random import randint
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x = np.linspace(-1.0, 1.0, size)
if sigma > 0:
N = np.random.normal(0, sigma, size)
else:
N = 0
y = m*x + b + N
return(x, y)
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
plt.xlim(-1.1,1.1)
plt.ylim(-10.0, 10.0)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("y = mx + b + N (0, $\sigma$ ** 2)", fontsize=16)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
x, y = random_line(m,b,sigma,size=10)
plt.scatter(x,y,color=color)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0,10.0,0.1), b=(-5.0,5.0,0.1), sigma=(0.0,5.0,0.01), size=(10,100,10), color={"red":'red', "green": 'green', "blue":'blue'});
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x = np.linspace(-1.0,1.0, size)
y = np.zeros(size)
if sigma==0.0:
for i in range(size):
y[i] = m*x[i]+b
return x,y
for i in range(size):
y[i] = m*x[i]+b+np.random.normal(0,sigma)
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both', top=False)
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both', right=False)
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x,y = random_line(m, b, sigma, size)
ax = plt.subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ticks_out(ax)
plt.scatter(x,y,color=color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plt.xlabel("Random Xs")
plt.ylabel("Random Ys")
plt.title("Some Random Points")
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0, 10.0, 0.1), b=(-5.0, 5.0, 0.1), sigma=(0.0, 5.0, .01), size=(10, 10000, 10), color=['red', 'blue', 'green'])
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
# YOUR CODE HERE
#raise NotImplementedError()
x = np.linspace(-1.0,1.0,size)
if sigma==0:
y=m*x+b
else:
#np.random.normal() creates normal distribution array
y = (m*x)+b+np.random.normal(0.0, sigma**2, size)
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
# YOUR CODE HERE
#raise NotImplementedError()
x,y=random_line(m, b, sigma, size)
plt.scatter(x,y,color=color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plt.box(False)
plt.xlabel('x')
plt.ylabel('y(x)')
plt.title('Random Line')
plt.tick_params(axis='y', right='off', direction='out')
plt.tick_params(axis='x', top='off', direction='out')
plt.grid(True)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
#raise NotImplementedError()
interact(plot_random_line, m=(-10.0,10.0), b=(-5.0,5.0),sigma=(0.0,5.0,0.01),size=(10,100,10), color={'red':'r','blue':'b','green':'g'})
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
n=np.random.standard_normal?
n=np.random.standard_normal
n=np.random.randn
n=np.random.randn
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
N=np.random.normal(0,sigma**2)
x=np.linspace(-1.0,1.0,size)
if sigma==0:
y=m*x +b
else:
y=m*x +b+N
return y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x=np.linspace(-1.0,1.0,size)
plt.plot(x,random_line(m,b,sigma,size),color)
plt.ylim(-10.0,10.0)
plt.vlines(0,-10,10)
plt.hlines(0,-1,1)
plt.box(False)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line,m=(-10.0,10.0),b=(-5.0,5.0),sigma=(0,5.0,.01),size=(10,100,10),color=('r','g','b'))
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, x, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
y = m * x + b + N(0, sigma**2)
return y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
# YOUR CODE HERE
raise NotImplementedError()
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
raise NotImplementedError()
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
xarray = np.linspace(-1.0,1.0,size)
yarray = np.array([m*x + b + ((1/np.sqrt(2*np.pi*sigma**2))*np.exp(-(x**2)/2*sigma**2)) for x in xarray])
return xarray, yarray
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
/usr/local/lib/python3.4/dist-packages/IPython/kernel/__main__.py:23: RuntimeWarning: divide by zero encountered in double_scalars
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
# YOUR CODE HERE
raise NotImplementedError()
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
raise NotImplementedError()
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
_____no_output_____
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
x=np.linspace(-1.0,1.0,size)
if sigma==0:
y=m*x+b
else:
y=m*x+b+np.random.normal(0.0,sigma**2,size)
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
ran_line1, ran_line2=random_line(m,b,sigma,size)
f=plt.figure(figsize=(10,6))
plt.scatter(ran_line1,ran_line2,color=color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plt.grid(True)
plt.title('Line with Gaussian Noise')
plt.xlabel('X'), plt.ylabel('Y')
plt.tick_params(axis='x',direction='inout')
plt.tick_params(axis='y',direction='inout')
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0,10.0,0.1),b=(-5.0,5.0,0.1),sigma=(0.0,5.0,0.01),size=(10,100,10),color={'red':'r','green':'g','blue':'b'});
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
f=np.linspace(-1.0,1.0, size)
N=np.empty(size)
if sigma == 0.0:
g=m*f+b
else:
for i in range(size):
N[i]=np.random.normal(0,sigma**2)
g=m*f+b+N
return(f, g)
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
X, Y = random_line(m, b, sigma, size=10)
plt.scatter(X,Y, c=color)
plt.title('A line with Gaussian Noise')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(-1.1,1.1)
plt.ylim(-10,10)
plt.tick_params(axis='both', length=0)
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
interact(plot_random_line, m=(-10.0,10.0,0.1), b=(-5.0,5.0,.1), sigma=(0.0,5.0,.01), size=(10,100,10), color=('red', 'green', 'blue'))
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____
###Markdown
Interact Exercise 4 Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
###Output
:0: FutureWarning: IPython widgets are experimental and may change in the future.
###Markdown
Line with Gaussian noise Write a function named `random_line` that creates `x` and `y` data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:$$y = m x + b + N(0,\sigma^2)$$Be careful about the `sigma=0.0` case.
###Code
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
# YOUR CODE HERE
x=np.linspace(-1,1,size)
y=m*x+b + sigma*np.random.randn(size)
return x,y
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
###Output
_____no_output_____
###Markdown
Write a function named `plot_random_line` that takes the same arguments as `random_line` and creates a random line using `random_line` and then plots the `x` and `y` points using Matplotlib's `scatter` function:* Make the marker color settable through a `color` keyword argument with a default of `red`.* Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.* Customize your plot to make it effective and beautiful.
###Code
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
# YOUR CODE HERE
f=plt.figure(figsize=(9,6))
x=np.linspace(-1,1,size)
y=m*x+b + sigma*np.random.randn(size)
plt.scatter(x,y)
plt.ylim(-10,10)
plt.xlim(-1.1,1.1)
plt.title("Plot of Line With Set Slope and Y-Intercept, with Random Noise Added Along Slope")
plt.xlabel("X-Axis")
plt.ylabel("Y-Axis")
plt.tick_params(direction='out')
plt.tight_layout
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
###Output
_____no_output_____
###Markdown
Use `interact` to explore the `plot_random_line` function using:* `m`: a float valued slider from `-10.0` to `10.0` with steps of `0.1`.* `b`: a float valued slider from `-5.0` to `5.0` with steps of `0.1`.* `sigma`: a float valued slider from `0.0` to `5.0` with steps of `0.01`.* `size`: an int valued slider from `10` to `100` with steps of `10`.* `color`: a dropdown with options for `red`, `green` and `blue`.
###Code
# YOUR CODE HERE
interact (plot_random_line,m=(-10.0,10.0,0.1),b=(-5.0,5.0,0.1),sigma=(0,5.0,0.01),size=(10,100,10),color={'red':'r.','green':'g.','blue':'b.'})
#### assert True # use this cell to grade the plot_random_line interact
###Output
_____no_output_____ |
Projects/P2_Image_Captioning/1_Preliminaries.ipynb | ###Markdown
Computer Vision Nanodegree Project: Image Captioning---In this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/home). You will also design a CNN-RNN model for automatically generating image captions.Note that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**. Feel free to use the links below to navigate the notebook:- [Step 1](step1): Explore the Data Loader- [Step 2](step2): Use the Data Loader to Obtain Batches- [Step 3](step3): Experiment with the CNN Encoder- [Step 4](step4): Implement the RNN Decoder Step 1: Explore the Data LoaderWe have already written a [data loader](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) that you can use to load the COCO dataset in batches. In the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**. > For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.The `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words. 5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file. We will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!
###Code
import sys
sys.path.append('/opt/cocoapi/PythonAPI')
from pycocotools.coco import COCO
!pip install nltk
import nltk
nltk.download('punkt')
from data_loader import get_loader
from torchvision import transforms
# Define a transform to pre-process the training images.
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
# Set the minimum word count threshold.
vocab_threshold = 5
# Specify the batch size.
batch_size = 10
# Obtain the data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=False)
###Output
Requirement already satisfied: nltk in /opt/conda/lib/python3.6/site-packages (3.2.5)
Requirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from nltk) (1.11.0)
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Unzipping tokenizers/punkt.zip.
loading annotations into memory...
Done (t=1.03s)
creating index...
index created!
[0/414113] Tokenizing captions...
[100000/414113] Tokenizing captions...
[200000/414113] Tokenizing captions...
[300000/414113] Tokenizing captions...
[400000/414113] Tokenizing captions...
loading annotations into memory...
Done (t=0.96s)
creating index...
###Markdown
When you ran the code cell above, the data loader was stored in the variable `data_loader`. You can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). Exploring the `__getitem__` MethodThe `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). When the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`). Image Pre-Processing Image pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):```python Convert image to tensor and pre-process using transformimage = Image.open(os.path.join(self.img_folder, path)).convert('RGB')image = self.transform(image)```After loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader. Caption Pre-Processing The captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.To understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:```pythondef __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, end_word, unk_word, annotations_file, vocab_from_file, img_folder): ... self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word, end_word, unk_word, annotations_file, vocab_from_file) ...```From the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**. We use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):```python Convert caption to tensor of word ids.tokens = nltk.tokenize.word_tokenize(str(caption).lower()) line 1caption = [] line 2caption.append(self.vocab(self.vocab.start_word)) line 3caption.extend([self.vocab(token) for token in tokens]) line 4caption.append(self.vocab(self.vocab.end_word)) line 5caption = torch.Tensor(caption).long() line 6```As you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.
###Code
sample_caption = 'A person doing a trick on a rail while riding a skateboard.'
###Output
_____no_output_____
###Markdown
In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.
###Code
import nltk
sample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())
print(sample_tokens)
###Output
['a', 'person', 'doing', 'a', 'trick', 'on', 'a', 'rail', 'while', 'riding', 'a', 'skateboard', '.']
###Markdown
In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.This special start word (`""`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word=""`).As you will see below, the integer `0` is always used to mark the start of a caption.
###Code
sample_caption = []
start_word = data_loader.dataset.vocab.start_word
print('Special start word:', start_word)
sample_caption.append(data_loader.dataset.vocab(start_word))
print(sample_caption)
###Output
Special start word: <start>
[0]
###Markdown
In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.
###Code
sample_caption.extend([data_loader.dataset.vocab(token) for token in sample_tokens])
print(sample_caption)
###Output
[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18]
###Markdown
In **`line 5`**, we append a final integer to mark the end of the caption. Identical to the case of the special start word (above), the special end word (`""`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word=""`).As you will see below, the integer `1` is always used to mark the end of a caption.
###Code
end_word = data_loader.dataset.vocab.end_word
print('Special end word:', end_word)
sample_caption.append(data_loader.dataset.vocab(end_word))
print(sample_caption)
###Output
Special end word: <end>
[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18, 1]
###Markdown
Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.htmltorch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).
###Code
import torch
sample_caption = torch.Tensor(sample_caption).long()
print(sample_caption)
###Output
tensor([ 0, 3, 98, 754, 3, 396, 39, 3, 1009,
207, 139, 3, 753, 18, 1])
###Markdown
And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:```[, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', ]```This list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:```[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]```Finally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above. As you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**. ```pythondef __call__(self, word): if not word in self.word2idx: return self.word2idx[self.unk_word] return self.word2idx[word]```The `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.htmldictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.Use the code cell below to view a subset of this dictionary.
###Code
# Preview the word2idx dictionary.
dict(list(data_loader.dataset.vocab.word2idx.items())[:10])
###Output
_____no_output_____
###Markdown
We also print the total number of keys.
###Code
# Print the total number of keys in the word2idx dictionary.
print('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))
###Output
Total number of tokens in vocabulary: 8855
###Markdown
As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader.
###Code
# Modify the minimum word count threshold.
vocab_threshold = 4
# Obtain the data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=False)
# Print the total number of keys in the word2idx dictionary.
print('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))
###Output
Total number of tokens in vocabulary: 9955
###Markdown
There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`""`) and special end word (`""`). There is one more special token, corresponding to unknown words (`""`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.
###Code
unk_word = data_loader.dataset.vocab.unk_word
print('Special unknown word:', unk_word)
print('All unknown words are mapped to this integer:', data_loader.dataset.vocab(unk_word))
###Output
Special unknown word: <unk>
All unknown words are mapped to this integer: 2
###Markdown
Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions.
###Code
print(data_loader.dataset.vocab('jfkafejw'))
print(data_loader.dataset.vocab('ieowoqjf'))
###Output
2
2
###Markdown
The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.If you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect. But once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.Note that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.
###Code
# Obtain the data loader (from file). Note that it runs much faster than before!
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_from_file=True)
###Output
Vocabulary successfully loaded from vocab.pkl file!
loading annotations into memory...
###Markdown
In the next section, you will learn how to use the data loader to obtain batches of training data. Step 2: Use the Data Loader to Obtain BatchesThe captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption). In the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare.
###Code
from collections import Counter
# Tally the total number of training captions with each length.
counter = Counter(data_loader.dataset.caption_lengths)
lengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)
for value, count in lengths:
print('value: %2d --- count: %5d' % (value, count))
###Output
value: 10 --- count: 86334
value: 11 --- count: 79948
value: 9 --- count: 71934
value: 12 --- count: 57637
value: 13 --- count: 37645
value: 14 --- count: 22335
value: 8 --- count: 20771
value: 15 --- count: 12841
value: 16 --- count: 7729
value: 17 --- count: 4842
value: 18 --- count: 3104
value: 19 --- count: 2014
value: 7 --- count: 1597
value: 20 --- count: 1451
value: 21 --- count: 999
value: 22 --- count: 683
value: 23 --- count: 534
value: 24 --- count: 383
value: 25 --- count: 277
value: 26 --- count: 215
value: 27 --- count: 159
value: 28 --- count: 115
value: 29 --- count: 86
value: 30 --- count: 58
value: 31 --- count: 49
value: 32 --- count: 44
value: 34 --- count: 39
value: 37 --- count: 32
value: 33 --- count: 31
value: 35 --- count: 31
value: 36 --- count: 26
value: 38 --- count: 18
value: 39 --- count: 18
value: 43 --- count: 16
value: 44 --- count: 16
value: 48 --- count: 12
value: 45 --- count: 11
value: 42 --- count: 10
value: 40 --- count: 9
value: 49 --- count: 9
value: 46 --- count: 9
value: 47 --- count: 7
value: 50 --- count: 6
value: 51 --- count: 6
value: 41 --- count: 6
value: 52 --- count: 5
value: 54 --- count: 3
value: 56 --- count: 2
value: 6 --- count: 2
value: 53 --- count: 2
value: 55 --- count: 2
value: 57 --- count: 1
###Markdown
To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.Run the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.These indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.
###Code
import numpy as np
import torch.utils.data as data
# Randomly sample a caption length, and sample indices with that length.
indices = data_loader.dataset.get_train_indices()
print('sampled indices:', indices)
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
new_sampler = data.sampler.SubsetRandomSampler(indices=indices)
data_loader.batch_sampler.sampler = new_sampler
# Obtain the batch.
images, captions = next(iter(data_loader))
print('images.shape:', images.shape)
print('captions.shape:', captions.shape)
# (Optional) Uncomment the lines of code below to print the pre-processed images and captions.
# print('images:', images)
# print('captions:', captions)
###Output
sampled indices: [301725, 139727, 201598, 11672, 197396, 112646, 36753, 182462, 24467, 94460]
images.shape: torch.Size([10, 3, 224, 224])
captions.shape: torch.Size([10, 19])
###Markdown
Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!You will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__In the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning. Step 3: Experiment with the CNN EncoderRun the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**.
###Code
# Watch for any changes in model.py, and re-load it automatically.
% load_ext autoreload
% autoreload 2
# Import EncoderCNN and DecoderRNN.
from model import EncoderCNN, DecoderRNN
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.
###Code
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
Run the code cell below to instantiate the CNN encoder in `encoder`. The pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.
###Code
# Specify the dimensionality of the image embedding.
embed_size = 256
#-#-#-# Do NOT modify the code below this line. #-#-#-#
# Initialize the encoder. (Optional: Add additional arguments if necessary.)
encoder = EncoderCNN(embed_size)
# Move the encoder to GPU if CUDA is available.
encoder.to(device)
# Move last batch of images (from Step 2) to GPU if CUDA is available.
images = images.to(device)
# Pass the images through the encoder.
features = encoder(images)
print('type(features):', type(features))
print('features.shape:', features.shape)
# Check that your encoder satisfies some requirements of the project! :D
assert type(features)==torch.Tensor, "Encoder output needs to be a PyTorch Tensor."
assert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), "The shape of the encoder output is incorrect."
###Output
type(features): <class 'torch.Tensor'>
features.shape: torch.Size([10, 256])
###Markdown
The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.You are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.htmlnormalization-layers). > You are **not** required to change anything about the encoder.For this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.If you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`. Step 4: Implement the RNN DecoderBefore executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.Your decoder will be an instance of the `DecoderRNN` class and must accept as input:- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.Note that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**. > While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`. Although you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input. In the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.htmltorch.nn.CrossEntropyLoss) optimizer in PyTorch.
###Code
# Specify the number of features in the hidden state of the RNN decoder.
hidden_size = 512
#-#-#-# Do NOT modify the code below this line. #-#-#-#
# Store the size of the vocabulary.
vocab_size = len(data_loader.dataset.vocab)
# Initialize the decoder.
decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
# Move the decoder to GPU if CUDA is available.
decoder.to(device)
# Move last batch of captions (from Step 1) to GPU if CUDA is available
captions = captions.to(device)
# Pass the encoder output and captions through the decoder.
outputs = decoder(features, captions)
print('type(outputs):', type(outputs))
print('outputs.shape:', outputs.shape)
# Check that your decoder satisfies some requirements of the project! :D
assert type(outputs)==torch.Tensor, "Decoder output needs to be a PyTorch Tensor."
assert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), "The shape of the decoder output is incorrect."
###Output
type(outputs): <class 'torch.Tensor'>
outputs.shape: torch.Size([10, 19, 9955])
|
appendix/teach_me_qiskit_2018/hadamard_action/Approach 3.ipynb | ###Markdown
Trusted Notebook" width="250 px" align="left"> Hadamard Action: Approach 3 Jupyter Notebook 3/3 for the *Teach Me QISKIT* Tutorial Competition- Connor Fieweger Trusted Notebook" width="750 px" align="left"> Starting with QISKit:In order to run this notebook, one must first download the Quantum Information Software Kit (QISKit) library from IBM at https://github.com/QISKit/qiskit-sdk-py (as well as supplementary libraries numpy and SciPy and an up-to-date version of python). One ought to also sign up for an IBM Q Experience account at https://quantumexperience.ng.bluemix.net/qx/experience in order to generate an APIToken (go to My Account > Advanced) for accessing the backends provided by IBM. The account sign up and APIToken specifcation is not actually necessary since this notebook assumes use of the local qasm simulator for the sake of simplicity, but its recommended, as seeing your code executed on an actual quantum device in some other location is really quite amazing and one of the unique capabilities of the QISKit library.
###Code
# import necessary libraries
import numpy as np
from pprint import pprint
from qiskit import QuantumProgram
from qiskit.tools.visualization import plot_histogram
#import Qconfig
# When working worth external backends (more on this below),
# be sure that the working directory has a
# Qconfig.py file for importing your APIToken from
# your IBM Q Experience account.
# An example file has been provided, so for working
# in this notebook you can simply set
# the variable values to your credentials and rename
# this file as 'Qconfig.py'
###Output
_____no_output_____
###Markdown
The final approach to showing equivalence of the presented circuit diagrams is to implement the QISKit library in order to compute and measure the final state. This is done by creating instances of classes in python that represent a circuit with a given set of registers and then using class methods on these circuits to make the class equivalent of gate operations on the qubits. The operations are then executed using a method that calls a backend, i.e. some computing machine invisible to the programmer, to perform the computation and then stores the results. The backend can either be a classical simulator that attempts to mimick the behavior of a quantum circuit as best as it can or an actual quantum computer chip in the dilution refrigerators at the Watson research center. In reading this notebook, one ought to dig around in the files for QISKit to find the relevant class and method definitions -- the particularly relevant ones in this notebook will be QuantumProgram, QuantumCircuit, and the Register family (ClassicalRegister, QuantumRegister, Register), so take some time now to read through these files. Circuit i)For i), the initial state of the input is represented by the tensor product of the two input qubits in the initial register. This is given by:$$|\Psi> = |\psi_1> \otimes |\psi_2> = |\psi_2\psi_1>$$Where each |$\psi$> can be either |0> or |1>*Note the convention change in the order of qubits in the product state representation on the right -- see appendix notebook under 'Reading a circuit diagram' for why there is a discrepancy here. This notebook will follow the above for consistency with IBM's documentation, which follows the same convention: (https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=006-Multi-Qubit_Gates~2F001-Multi-Qubit_Gates)*
###Code
# This initial state register
# can be realized in python by creating an instance of the
# QISKit QuantumProgram Class with a quantum register of 2 qubits
# and 2 classical ancilla bits for measuring the states
i = QuantumProgram()
n = 2
i_q = i.create_quantum_register("i_q", n)
i_c = i.create_classical_register("i_c", n)
#i.set_api(Qconfig.APItoken, Qconfig.config['url']) # set the APIToken and API url
i.available_backends() #check backends - if you've set up your APIToken properly you
#should be able to see the quantum chips and simulators at IBM
###Output
_____no_output_____
###Markdown
https://github.com/QISKit/ibmqx-backend-information/tree/master/backends/ -- follow this url for background on how the quantum chips/simulators work.*Note: when working with the quantum chip backends, especially when applying CNOTs, be sure to check documentation on the allowed two-qubit gate configurations.*
###Code
for backend in i.available_backends(): #check backend status
print(backend)
pprint(i.get_backend_status(backend))
###Output
local_qiskit_simulator
{'available': True}
local_unitary_simulator
{'available': True}
local_clifford_simulator
{'available': True}
local_qasm_simulator
{'available': True}
###Markdown
Throughout the notebook, we'll need to evaluate the final state of a given the circuit and display the results, so let's define a function for this:
###Code
def execute_and_plot(qp, circuits, backend = "local_qasm_simulator"):
"""Executes circuits and plots the final
state histograms the for each circuit.
Adapted from 'execute_and_plot' function
in the beginners_guide_composer_examples
notebook provided in IBM's QISKit
tutorial library on GitHub.
Args:
qp: QuantumProgram containing the circuits
circuits (list): list of circuits to execute
backend (string): allows for specifying the backend
to execute on. Defaults to local qasm simulator
downloaded with QISKit library, but can be specified
to run on an actual quantum chip by using the string
names of the available backends at IBM.
"""
# Store the results of the circuit implementation
# using the .execute() method
results = qp.execute(circuits, backend = backend)
for circuit in circuits:
plot_histogram(results.get_counts(circuit)) # .get_counts()
# method returns a dictionary that maps each possible
# final state to the number of instances of
# said state over n evaluations
# (n defaults to 1024 for local qasm simulator),
# where multiple evaluations are a necessity since
# quantum computation outputs are statistically
# informed
###Output
_____no_output_____
###Markdown
Note: when working with the quantum chip backends, especially when applying CNOTs, be sure to check documentation on the allowed two-qubit gate configurations at: https://github.com/QISKit/ibmqx-backend-information/tree/master/backends/ . This program assumes use of the local qasm simulator. Creating a QuantumCircuit instance and storing it in our QuantumProgram allows us to build up a set of operations to apply to this circuit through class methods and then execute this set of operation, so lets do this for each possible input state and read out the end result.
###Code
# Initialize circuit:
cnot_i_00 = i.create_circuit("cnot_i_00", [i_q], [i_c])
# Note: qubits are assumed by QISKit
# to be initialized in the |0> state
# Apply gates according to diagram:
cnot_i_00.cx(i_q[0], i_q[1]) # Apply CNOT on line 2 controlled by line 1
# Measure final state:
cnot_i_00.measure(i_q[0], i_c[0]) # Write qubit 1 state onto classical ancilla bit 1
cnot_i_00.measure(i_q[1], i_c[1]) # Write qubit 2 state onto classical ancilla bit 2
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_00"])
###Output
_____no_output_____
###Markdown
*Note: The set of circuit operations to be executed can also be specified through a 'QASM', or a string that contains the registers and the set of operators to apply. We can get this string for the circuit we just made through the `.get_qasm()` method. This is also helpful for checking our implementation of the circuit, as we can read off the operations and make sure they match up with the diagram*
###Code
print(i.get_qasm('cnot_i_00'))
###Output
OPENQASM 2.0;
include "qelib1.inc";
qreg i_q[2];
creg i_c[2];
cx i_q[0],i_q[1];
measure i_q[0] -> i_c[0];
measure i_q[1] -> i_c[1];
###Markdown
*These QASM strings can also be used the other way around to create a circuit through the `.load_qasm_file()` and `load_qasm_text()` methods for the QuantumProgram class.*Continuing input by input,
###Code
# Initialize circuit:
cnot_i_01 = i.create_circuit("cnot_i_01", [i_q], [i_c])
cnot_i_01.x(i_q[0]) # Set the 1st qubit to |1> by flipping
# the initialized |0> with an X gate before implementing
# the circuit
# Apply gates according to diagram:
cnot_i_01.cx(i_q[0], i_q[1]) # Apply CNOT controlled by line 1
# Measure final state:
cnot_i_01.measure(i_q[0], i_c[0])
cnot_i_01.measure(i_q[1], i_c[1])
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_01"])
# Initialize circuit:
cnot_i_10 = i.create_circuit("cnot_i_10", [i_q], [i_c])
cnot_i_10.x(i_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_i_10.cx(i_q[0], i_q[1]) # Apply CNOT controlled by line 1
# Measure final state:
cnot_i_10.measure(i_q[0], i_c[0])
cnot_i_10.measure(i_q[1], i_c[1])
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_10"])
# Initialize circuit:
cnot_i_11 = i.create_circuit("cnot_i_11", [i_q], [i_c])
cnot_i_11.x(i_q[0]) # Set the 1st qubit to |1>
cnot_i_11.x(i_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_i_11.cx(i_q[0], i_q[1]) # Apply CNOT controlled by line 1
# Measure final states:
cnot_i_11.measure(i_q[0], i_c[0])
cnot_i_11.measure(i_q[1], i_c[1])
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_11"])
###Output
_____no_output_____
###Markdown
Reading these off, we have $[\Psi = |00>,|10>,|01>,|11>]\rightarrow [\Psi' = |00>,|10>,|11>,|01>]$. Note that this is the same answer (up to convention in product-state notation) as obtained for approaches 1 and 2, only this time we have had a far less tedious time of writing out logic operations or matrices thanks to the QISKit library abstracting much of this away for us. While the numpy library was helpful for making linear algebra operations, the matrices had to be user defined and this method does not have nearly the scalability or ease of computation that QISKit offers. Circuit ii)
###Code
# For circuit ii, we can again create a QuantumProgram instance to
# realize a quantum register of size 2 with 2 classical ancilla bits
# for measurement
ii = QuantumProgram()
n = 2
ii_q = ii.create_quantum_register("ii_q", n)
ii_c = ii.create_classical_register("ii_c", n)
#ii.set_api(Qconfig.APItoken, Qconfig.config['url']) # set the APIToken and API url
ii.available_backends() #check backends - if you've set up your APIToken properly you
#should be able to see the quantum chips and simulators at IBM
for backend in ii.available_backends(): #check backend status
print(backend)
pprint(ii.get_backend_status(backend))
###Output
local_qiskit_simulator
{'available': True}
local_unitary_simulator
{'available': True}
local_clifford_simulator
{'available': True}
local_qasm_simulator
{'available': True}
###Markdown
Now for executing circuit ii):
###Code
# Initialize circuit:
cnot_ii_00 = ii.create_circuit("cnot_ii_00", [ii_q], [ii_c])
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel, note that specifying
# a register a a gate method argument applies the operation to all
# qubits in the register
cnot_ii_00.cx(ii_q[1], ii_q[0]) #apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state:
cnot_ii_00.measure(ii_q[0], ii_c[0])
cnot_ii_00.measure(ii_q[1], ii_c[1])
# Display final state probabilities
execute_and_plot(ii, ["cnot_ii_00"])
# Initialize circuit:
cnot_ii_01 = ii.create_circuit("cnot_ii_01", [ii_q], [ii_c])
cnot_ii_01.x(ii_q[0]) # Set the 1st qubit to |1>
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
cnot_ii_01.cx(ii_q[1], ii_q[0]) # Apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state:
cnot_ii_01.measure(ii_q[0], ii_c[0])
cnot_ii_01.measure(ii_q[1], ii_c[1])
# Display final state probabilities:
execute_and_plot(ii, ["cnot_ii_01"])
# Initialize circuits
cnot_ii_10 = ii.create_circuit("cnot_ii_10", [ii_q], [ii_c])
cnot_ii_10.x(ii_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
cnot_ii_10.cx(ii_q[1], ii_q[0]) # Apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state:
cnot_ii_10.measure(ii_q[0], ii_c[0])
cnot_ii_10.measure(ii_q[1], ii_c[1])
# Display final state probabilities:
execute_and_plot(ii, ["cnot_ii_10"])
# Initialize circuits:
cnot_ii_11 = ii.create_circuit("cnot_ii_11", [ii_q], [ii_c])
cnot_ii_11.x(ii_q[0]) # Set the 1st qubit to |1>
cnot_ii_11.x(ii_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
cnot_ii_11.cx(ii_q[1], ii_q[0]) # Apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state
cnot_ii_11.measure(ii_q[0], ii_c[0])
cnot_ii_11.measure(ii_q[1], ii_c[1])
# Display final state probabilities
execute_and_plot(ii, ["cnot_ii_11"])
###Output
_____no_output_____
###Markdown
Reading off the computed final state, we see that it matches the computed final state of i), and so the circuits are considered equivalent $\square$. Another implementation: The input-by-input approach is helpful for first steps in understanding QISKit, but is also more long-winded than necessary. For a solution to the problem that uses QISKit more concisely/cleverly:
###Code
def circuit_i():
i = QuantumProgram()
i_q = i.create_quantum_register('i_q', 2)
i_c = i.create_classical_register('i_c', 2)
initial_states = ['00','01','10','11']
initial_circuits = {state: i.create_circuit('%s'%(state), [i_q], [i_c]) \
for state in initial_states}
final_circuits = {}
for state in initial_states:
if state[0] is '1':
initial_circuits[state].x(i_q[0])
if state[1] is '1':
initial_circuits[state].x(i_q[1])
initial_circuits[state].cx(i_q[0], i_q[1])
initial_circuits[state].measure(i_q[0], i_c[0])
initial_circuits[state].measure(i_q[1], i_c[1])
final_circuits[state] = initial_circuits[state]
return i
def circuit_ii():
ii = QuantumProgram()
ii_q = ii.create_quantum_register('ii_q', 2)
ii_c = ii.create_classical_register('ii_c', 2)
initial_states = ['00','01','10','11']
circuits = {state: ii.create_circuit('%s'%(state), [ii_q], [ii_c]) \
for state in initial_states}
for state in initial_states:
if state[0] is '1':
circuits[state].x(ii_q[0])
if state[1] is '1':
circuits[state].x(ii_q[1])
circuits[state].h(ii_q)
circuits[state].cx(ii_q[1], ii_q[0])
circuits[state].h(ii_q)
circuits[state].measure(ii_q[0], ii_c[0])
circuits[state].measure(ii_q[1], ii_c[1])
return ii
i = circuit_i()
ii = circuit_ii()
#i.set_api(Qconfig.APItoken, Qconfig.config['url'])
#ii.set_api(Qconfig.APItoken, Qconfig.config['url'])
results_i = i.execute(list(i.get_circuit_names()))
results_ii = ii.execute(list(ii.get_circuit_names()))
results_i_mapping = {circuit: results_i.get_counts(circuit) for circuit in list(i.get_circuit_names())}
results_ii_mapping = {circuit: results_ii.get_counts(circuit) for circuit in list(ii.get_circuit_names())}
print(results_i_mapping)
print(results_ii_mapping)
###Output
{'01': {'10': 1024}, '10': {'11': 1024}, '00': {'00': 1024}, '11': {'01': 1024}}
{'01': {'10': 1024}, '10': {'11': 1024}, '00': {'00': 1024}, '11': {'01': 1024}}
###Markdown
Trusted Notebook" width="250 px" align="left"> Hadamard Action: Approach 3 Jupyter Notebook 3/3 for the *Teach Me QISKIT* Tutorial Competition- Connor Fieweger Trusted Notebook" width="750 px" align="left"> Starting with QISKit:In order to run this notebook, one must first download the Quantum Information Software Kit (QISKit) library from IBM at https://github.com/QISKit/qiskit-sdk-py (as well as supplementary libraries numpy and SciPy and an up-to-date version of python). One ought to also sign up for an IBM Q Experience account at https://quantumexperience.ng.bluemix.net/qx/experience in order to generate an APIToken (go to My Account > Advanced) for accessing the backends provided by IBM. The account sign up and APIToken specifcation is not actually necessary since this notebook assumes use of the local qasm simulator for the sake of simplicity, but its recommended, as seeing your code executed on an actual quantum device in some other location is really quite amazing and one of the unique capabilities of the QISKit library.
###Code
# import necessary libraries
import numpy as np
from pprint import pprint
from qiskit import QuantumProgram
from qiskit.tools.visualization import plot_histogram
#import Qconfig
# When working worth external backends (more on this below),
# be sure that the working directory has a
# Qconfig.py file for importing your APIToken from
# your IBM Q Experience account.
# An example file has been provided, so for working
# in this notebook you can simply set
# the variable values to your credentials and rename
# this file as 'Qconfig.py'
###Output
_____no_output_____
###Markdown
The final approach to showing equivalence of the presented circuit diagrams is to implement the QISKit library in order to compute and measure the final state. This is done by creating instances of classes in python that represent a circuit with a given set of registers and then using class methods on these circuits to make the class equivalent of gate operations on the qubits. The operations are then executed using a method that calls a backend, i.e. some computing machine invisible to the programmer, to perform the computation and then stores the results. The backend can either be a classical simulator that attempts to mimick the behavior of a quantum circuit as best as it can or an actual quantum computer chip in the dilution refrigerators at the Watson research center. In reading this notebook, one ought to dig around in the files for QISKit to find the relevant class and method definitions -- the particularly relevant ones in this notebook will be QuantumProgram, QuantumCircuit, and the Register family (ClassicalRegister, QuantumRegister, Register), so take some time now to read through these files. Circuit i)For i), the initial state of the input is represented by the tensor product of the two input qubits in the initial register. This is given by:$$|\Psi> = |\psi_1> \otimes |\psi_2> = |\psi_2\psi_1>$$Where each |$\psi$> can be either |0> or |1>*Note the convention change in the order of qubits in the product state representation on the right -- see appendix notebook under 'Reading a circuit diagram' for why there is a discrepancy here. This notebook will follow the above for consistency with IBM's documentation, which follows the same convention: (https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=006-Multi-Qubit_Gates~2F001-Multi-Qubit_Gates)*
###Code
# This initial state register
# can be realized in python by creating an instance of the
# QISKit QuantumProgram Class with a quantum register of 2 qubits
# and 2 classical ancilla bits for measuring the states
i = QuantumProgram()
n = 2
i_q = i.create_quantum_register("i_q", n)
i_c = i.create_classical_register("i_c", n)
#i.set_api(Qconfig.APItoken, Qconfig.config['url']) # set the APIToken and API url
i.available_backends() #check backends - if you've set up your APIToken properly you
#should be able to see the quantum chips and simulators at IBM
###Output
_____no_output_____
###Markdown
https://github.com/QISKit/ibmqx-backend-information/tree/master/backends/ -- follow this url for background on how the quantum chips/simulators work.*Note: when working with the quantum chip backends, especially when applying CNOTs, be sure to check documentation on the allowed two-qubit gate configurations.*
###Code
for backend in i.available_backends(): #check backend status
print(backend)
pprint(i.get_backend_status(backend))
###Output
local_qiskit_simulator
{'available': True}
local_unitary_simulator
{'available': True}
local_clifford_simulator
{'available': True}
local_qasm_simulator
{'available': True}
###Markdown
Throughout the notebook, we'll need to evaluate the final state of a given the circuit and display the results, so let's define a function for this:
###Code
def execute_and_plot(qp, circuits, backend = "local_qasm_simulator"):
"""Executes circuits and plots the final
state histograms the for each circuit.
Adapted from 'execute_and_plot' function
in the beginners_guide_composer_examples
notebook provided in IBM's QISKit
tutorial library on GitHub.
Args:
qp: QuantumProgram containing the circuits
circuits (list): list of circuits to execute
backend (string): allows for specifying the backend
to execute on. Defaults to local qasm simulator
downloaded with QISKit library, but can be specified
to run on an actual quantum chip by using the string
names of the available backends at IBM.
"""
# Store the results of the circuit implementation
# using the .execute() method
results = qp.execute(circuits, backend = backend)
for circuit in circuits:
plot_histogram(results.get_counts(circuit)) # .get_counts()
# method returns a dictionary that maps each possible
# final state to the number of instances of
# said state over n evaluations
# (n defaults to 1024 for local qasm simulator),
# where multiple evaluations are a necessity since
# quantum computation outputs are statistically
# informed
###Output
_____no_output_____
###Markdown
Note: when working with the quantum chip backends, especially when applying CNOTs, be sure to check documentation on the allowed two-qubit gate configurations at: https://github.com/QISKit/ibmqx-backend-information/tree/master/backends/ . This program assumes use of the local qasm simulator. Creating a QuantumCircuit instance and storing it in our QuantumProgram allows us to build up a set of operations to apply to this circuit through class methods and then execute this set of operation, so lets do this for each possible input state and read out the end result.
###Code
# Initialize circuit:
cnot_i_00 = i.create_circuit("cnot_i_00", [i_q], [i_c])
# Note: qubits are assumed by QISKit
# to be initialized in the |0> state
# Apply gates according to diagram:
cnot_i_00.cx(i_q[0], i_q[1]) # Apply CNOT on line 2 controlled by line 1
# Measure final state:
cnot_i_00.measure(i_q[0], i_c[0]) # Write qubit 1 state onto classical ancilla bit 1
cnot_i_00.measure(i_q[1], i_c[1]) # Write qubit 2 state onto classical ancilla bit 2
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_00"])
###Output
_____no_output_____
###Markdown
*Note: The set of circuit operations to be executed can also be specified through a 'QASM', or a string that contains the registers and the set of operators to apply. We can get this string for the circuit we just made through the `.get_qasm()` method. This is also helpful for checking our implementation of the circuit, as we can read off the operations and make sure they match up with the diagram*
###Code
print(i.get_qasm('cnot_i_00'))
###Output
OPENQASM 2.0;
include "qelib1.inc";
qreg i_q[2];
creg i_c[2];
cx i_q[0],i_q[1];
measure i_q[0] -> i_c[0];
measure i_q[1] -> i_c[1];
###Markdown
*These QASM strings can also be used the other way around to create a circuit through the `.load_qasm_file()` and `load_qasm_text()` methods for the QuantumProgram class.*Continuing input by input,
###Code
# Initialize circuit:
cnot_i_01 = i.create_circuit("cnot_i_01", [i_q], [i_c])
cnot_i_01.x(i_q[0]) # Set the 1st qubit to |1> by flipping
# the initialized |0> with an X gate before implementing
# the circuit
# Apply gates according to diagram:
cnot_i_01.cx(i_q[0], i_q[1]) # Apply CNOT controlled by line 1
# Measure final state:
cnot_i_01.measure(i_q[0], i_c[0])
cnot_i_01.measure(i_q[1], i_c[1])
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_01"])
# Initialize circuit:
cnot_i_10 = i.create_circuit("cnot_i_10", [i_q], [i_c])
cnot_i_10.x(i_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_i_10.cx(i_q[0], i_q[1]) # Apply CNOT controlled by line 1
# Measure final state:
cnot_i_10.measure(i_q[0], i_c[0])
cnot_i_10.measure(i_q[1], i_c[1])
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_10"])
# Initialize circuit:
cnot_i_11 = i.create_circuit("cnot_i_11", [i_q], [i_c])
cnot_i_11.x(i_q[0]) # Set the 1st qubit to |1>
cnot_i_11.x(i_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_i_11.cx(i_q[0], i_q[1]) # Apply CNOT controlled by line 1
# Measure final states:
cnot_i_11.measure(i_q[0], i_c[0])
cnot_i_11.measure(i_q[1], i_c[1])
# Display final state probabilities:
execute_and_plot(i, ["cnot_i_11"])
###Output
_____no_output_____
###Markdown
Reading these off, we have $[\Psi = |00>,|10>,|01>,|11>]\rightarrow [\Psi' = |00>,|10>,|11>,|01>]$. Note that this is the same answer (up to convention in product-state notation) as obtained for approaches 1 and 2, only this time we have had a far less tedious time of writing out logic operations or matrices thanks to the QISKit library abstracting much of this away for us. While the numpy library was helpful for making linear algebra operations, the matrices had to be user defined and this method does not have nearly the scalability or ease of computation that QISKit offers. Circuit ii)
###Code
# For circuit ii, we can again create a QuantumProgram instance to
# realize a quantum register of size 2 with 2 classical ancilla bits
# for measurement
ii = QuantumProgram()
n = 2
ii_q = ii.create_quantum_register("ii_q", n)
ii_c = ii.create_classical_register("ii_c", n)
#ii.set_api(Qconfig.APItoken, Qconfig.config['url']) # set the APIToken and API url
ii.available_backends() #check backends - if you've set up your APIToken properly you
#should be able to see the quantum chips and simulators at IBM
for backend in ii.available_backends(): #check backend status
print(backend)
pprint(ii.get_backend_status(backend))
###Output
local_qiskit_simulator
{'available': True}
local_unitary_simulator
{'available': True}
local_clifford_simulator
{'available': True}
local_qasm_simulator
{'available': True}
###Markdown
Now for executing circuit ii):
###Code
# Initialize circuit:
cnot_ii_00 = ii.create_circuit("cnot_ii_00", [ii_q], [ii_c])
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel, note that specifying
# a register a a gate method argument applies the operation to all
# qubits in the register
cnot_ii_00.cx(ii_q[1], ii_q[0]) #apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state:
cnot_ii_00.measure(ii_q[0], ii_c[0])
cnot_ii_00.measure(ii_q[1], ii_c[1])
# Display final state probabilities
execute_and_plot(ii, ["cnot_ii_00"])
# Initialize circuit:
cnot_ii_01 = ii.create_circuit("cnot_ii_01", [ii_q], [ii_c])
cnot_ii_01.x(ii_q[0]) # Set the 1st qubit to |1>
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
cnot_ii_01.cx(ii_q[1], ii_q[0]) # Apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state:
cnot_ii_01.measure(ii_q[0], ii_c[0])
cnot_ii_01.measure(ii_q[1], ii_c[1])
# Display final state probabilities:
execute_and_plot(ii, ["cnot_ii_01"])
# Initialize circuits
cnot_ii_10 = ii.create_circuit("cnot_ii_10", [ii_q], [ii_c])
cnot_ii_10.x(ii_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
cnot_ii_10.cx(ii_q[1], ii_q[0]) # Apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state:
cnot_ii_10.measure(ii_q[0], ii_c[0])
cnot_ii_10.measure(ii_q[1], ii_c[1])
# Display final state probabilities:
execute_and_plot(ii, ["cnot_ii_10"])
# Initialize circuits:
cnot_ii_11 = ii.create_circuit("cnot_ii_11", [ii_q], [ii_c])
cnot_ii_11.x(ii_q[0]) # Set the 1st qubit to |1>
cnot_ii_11.x(ii_q[1]) # Set the 2nd qubit to |1>
# Apply gates according to diagram:
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
cnot_ii_11.cx(ii_q[1], ii_q[0]) # Apply CNOT controlled by line 2
cnot_ii_00.h(ii_q) # Apply hadamards in parallel
# Measure final state
cnot_ii_11.measure(ii_q[0], ii_c[0])
cnot_ii_11.measure(ii_q[1], ii_c[1])
# Display final state probabilities
execute_and_plot(ii, ["cnot_ii_11"])
###Output
_____no_output_____
###Markdown
Reading off the computed final state, we see that it matches the computed final state of i), and so the circuits are considered equivalent $\square$. Another implementation: The input-by-input approach is helpful for first steps in understanding QISKit, but is also more long-winded than necessary. For a solution to the problem that uses QISKit more concisely/cleverly:
###Code
def circuit_i():
i = QuantumProgram()
i_q = i.create_quantum_register('i_q', 2)
i_c = i.create_classical_register('i_c', 2)
initial_states = ['00','01','10','11']
initial_circuits = {state: i.create_circuit('%s'%(state), [i_q], [i_c]) \
for state in initial_states}
final_circuits = {}
for state in initial_states:
if state[0] is '1':
initial_circuits[state].x(i_q[0])
if state[1] is '1':
initial_circuits[state].x(i_q[1])
initial_circuits[state].cx(i_q[0], i_q[1])
initial_circuits[state].measure(i_q[0], i_c[0])
initial_circuits[state].measure(i_q[1], i_c[1])
final_circuits[state] = initial_circuits[state]
return i
def circuit_ii():
ii = QuantumProgram()
ii_q = ii.create_quantum_register('ii_q', 2)
ii_c = ii.create_classical_register('ii_c', 2)
initial_states = ['00','01','10','11']
circuits = {state: ii.create_circuit('%s'%(state), [ii_q], [ii_c]) \
for state in initial_states}
for state in initial_states:
if state[0] is '1':
circuits[state].x(ii_q[0])
if state[1] is '1':
circuits[state].x(ii_q[1])
circuits[state].h(ii_q)
circuits[state].cx(ii_q[1], ii_q[0])
circuits[state].h(ii_q)
circuits[state].measure(ii_q[0], ii_c[0])
circuits[state].measure(ii_q[1], ii_c[1])
return ii
i = circuit_i()
ii = circuit_ii()
#i.set_api(Qconfig.APItoken, Qconfig.config['url'])
#ii.set_api(Qconfig.APItoken, Qconfig.config['url'])
results_i = i.execute(list(i.get_circuit_names()))
results_ii = ii.execute(list(ii.get_circuit_names()))
results_i_mapping = {circuit: results_i.get_counts(circuit) for circuit in list(i.get_circuit_names())}
results_ii_mapping = {circuit: results_ii.get_counts(circuit) for circuit in list(ii.get_circuit_names())}
print(results_i_mapping)
print(results_ii_mapping)
###Output
{'01': {'10': 1024}, '10': {'11': 1024}, '00': {'00': 1024}, '11': {'01': 1024}}
{'01': {'10': 1024}, '10': {'11': 1024}, '00': {'00': 1024}, '11': {'01': 1024}}
|
Class_scaled_Gridsearch.ipynb | ###Markdown
Scaling
###Code
# import packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# read dataframe in
df = pd.read_csv('data/kickstarter_preprocess.csv')
df.columns
###Output
_____no_output_____
###Markdown
features to keep: preparation, duration_days, goal, pledged_per_backer, parent_name, blurb_len_w, slug_len_w, 'launched_month'
###Code
# drop unimportant features
df.drop(['backers_count', 'country', 'usd_pledged', 'blurb_len_c', 'slug_len_c', 'cat_in_slug',
'category_parent_id', 'category_id', 'category_name', 'created_year', 'created_month', 'deadline_year',
'deadline_month', 'launched_year', 'rel_pledged_goal', 'filled_parent', 'staff_pick'],
axis=1, inplace=True)
df.columns
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 177593 entries, 0 to 177592
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 goal 177593 non-null float64
1 state 177593 non-null object
2 blurb_len_w 177593 non-null int64
3 slug_len_w 177593 non-null int64
4 launched_month 177593 non-null int64
5 duration_days 177593 non-null int64
6 preparation 177593 non-null int64
7 pledged_per_backer 177593 non-null int64
8 parent_name 177593 non-null object
dtypes: float64(1), int64(6), object(2)
memory usage: 12.2+ MB
###Markdown
drop rows with state == canceled, rows with wrong categories
###Code
df = df.drop(df[df['state'] == "canceled" ].index)
df.shape
categories = ["Games", "Art", "Photography", "Film & Video", "Design", "Technology"]
df = df[df.parent_name.isin(categories)]
df.shape
###Output
_____no_output_____
###Markdown
make dummies (state, category_name)
###Code
#df.staff_pick = df.staff_pick.astype('int')
df['state'] = np.where(df['state'] == 'successful', 1, 0)
df.groupby('state').state.count()
# convert the categorical variable parent_name into dummy/indicator variables
df_dum2 = pd.get_dummies(df.parent_name, prefix='parent_name')
df = df.drop(['parent_name'], axis=1)
df = pd.concat([df, df_dum2], axis=1)
# making a categorical variable for launched_month q1, q2, q3, q4
df.loc[df['launched_month'] < 4, 'time_yr'] = 'q1'
df.loc[(df['launched_month'] >= 4) & (df['launched_month'] < 7), 'time_yr'] = 'q2'
df.loc[(df['launched_month'] >= 7) & (df['launched_month'] < 10), 'time_yr'] = 'q3'
df.loc[df['launched_month'] > 9, 'time_yr'] = 'q4'
df_dum3 = pd.get_dummies(df.time_yr, prefix='time_yr')
df = df.drop(['time_yr'], axis=1)
df = df.drop(['launched_month'], axis=1)
df = pd.concat([df, df_dum3], axis=1)
df.columns
df.info()
df.head()
###Output
_____no_output_____
###Markdown
Train-Test-Split
###Code
from sklearn.model_selection import train_test_split, cross_val_score
y = df.state
X = df.drop('state', axis=1)
# Train-test-split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
###Output
_____no_output_____
###Markdown
Scaling
###Code
from sklearn.preprocessing import StandardScaler
# we have to define which columns we want to scale.
col_scale = ['goal', 'blurb_len_w', 'slug_len_w', 'duration_days', 'preparation', 'pledged_per_backer']
###Output
_____no_output_____
###Markdown
Data standardization
###Code
# Scaling with standard scaler
scaler = StandardScaler()
X_train_scaled_st = scaler.fit_transform(X_train[col_scale])
X_test_scaled_st = scaler.transform(X_test[col_scale])
# Concatenating scaled and dummy columns
X_train_preprocessed_st = np.concatenate([X_train_scaled_st, X_train.drop(col_scale, axis=1)], axis=1)
X_test_preprocessed_st = np.concatenate([X_test_scaled_st, X_test.drop(col_scale, axis=1)], axis=1)
###Output
_____no_output_____
###Markdown
Data normalization Scaling with MinMaxScaler Try to scale you data with the MinMaxScaler() from sklearn. It follows the same syntax as the StandardScaler. Don't forget: you have to import the scaler at the top of your notebook. from sklearn.preprocessing import MinMaxScalerscaler = MinMaxScaler()X_train_scaled_nor = scaler.fit_transform(X_train[col_scale])X_test_scaled_nor = scaler.transform(X_test[col_scale]) Concatenating scaled and dummy columns X_train_preprocessed_nor = np.concatenate([X_train_scaled_nor, X_train.drop(col_scale, axis=1)], axis=1)X_test_preprocessed_nor = np.concatenate([X_test_scaled_nor, X_test.drop(col_scale, axis=1)], axis=1)
###Code
df.groupby('state').state.count()
###Output
_____no_output_____
###Markdown
Model Classification and Gridsearch (tuning hyperparameters) Logistic Regression
###Code
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
# fit model
lr = LogisticRegression()
lr.fit(X_train_preprocessed_st, y_train)
y_pred = lr.predict(X_test_preprocessed_st)
confusion_matrix(y_test, y_pred)
# normalization
#print (classification_report(y_test, y_pred))
# standardization
print classification_report(y_test, y_pred))
# Gridsearch https://www.kaggle.com/enespolat/grid-search-with-logistic-regression
grid = {"C":np.logspace(-3,3,7), "penalty":["l1","l2"]} # l1 lasso l2 ridge
logreg = LogisticRegression()
logreg_cv = GridSearchCV(logreg,grid,cv=10)
logreg_cv.fit(X_train_preprocessed_st,y_train)
print("tuned hpyerparameters :(best parameters) ",logreg_cv.best_params_)
print("accuracy :",logreg_cv.best_score_)
# fit model
lr2 = LogisticRegression(C=1000.0,penalty="l2")
lr2.fit(X_train_preprocessed_st, y_train)
y_pred = lr2.predict(X_test_preprocessed_st)
confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
0 0.75 0.69 0.72 7732
1 0.74 0.80 0.77 8676
accuracy 0.74 16408
macro avg 0.75 0.74 0.74 16408
weighted avg 0.74 0.74 0.74 16408
###Markdown
Kernel SVM
###Code
import pylab as pl
import scipy.optimize as opt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)
print ('Train set:', X_train_preprocessed_st.shape, y_train.shape)
print ('Test set:', X_test_preprocessed_st.shape, y_test.shape)
from sklearn import svm
clf = svm.SVC(kernel='rbf')
clf.fit(X_train_preprocessed_st, y_train)
from sklearn.metrics import classification_report, confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0,1])
np.set_printoptions(precision=2)
print (classification_report(y_test, y_pred))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['failed','successful'],normalize= False, title='Confusion matrix')
param_grid = [{'kernel': ['rbf'],
'gamma': [0.0001, 0.001, 0.01, 0.1, 1],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'],
'C': [1, 10, 100, 1000]}]
grid = GridSearchCV(clf, param_grid, verbose=True, n_jobs=-1)
result = grid.fit(X_train_preprocessed_st, y_train)
# Print best parameters
print('Best Parameters:', result.best_params_)
# Print best score
print('Best Score:', result.best_score_)
clf2 = svm.SVC(kernel='rbf')
clf2.fit(X_train_preprocessed_st, y_train)
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0,1])
np.set_printoptions(precision=2)
print (classification_report(y_test, y_pred))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['failed','successful'],normalize= False, title='Confusion matrix')
###Output
_____no_output_____
###Markdown
Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
# Create the model with 100 trees
model = RandomForestClassifier(n_estimators=100,
random_state=42,
max_features = 'sqrt',
n_jobs=-1, verbose = 1)
# Fit on training data
model.fit(X_train_preprocessed_st, y_train)
y_pred = model.predict(X_test_preprocessed_st)
# Training predictions (to demonstrate overfitting)
train_rf_predictions = model.predict(X_train_preprocessed_st)
train_rf_probs = model.predict_proba(X_train_preprocessed_st)[:, 1]
# Testing predictions (to determine performance)
rf_predictions = model.predict(X_test_preprocessed_st)
rf_probs = model.predict_proba(X_test_preprocessed_st)[:, 1]
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0,1])
np.set_printoptions(precision=2)
print (classification_report(y_test, y_pred))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['failed','successful'],normalize= False, title='Confusion matrix')
print (classification_report(y_test, y_pred))
###Output
precision recall f1-score support
0 0.86 0.73 0.79 7732
1 0.79 0.89 0.84 8676
accuracy 0.81 16408
macro avg 0.82 0.81 0.81 16408
weighted avg 0.82 0.81 0.81 16408
###Markdown
Random Forest: Optimization through Random Search
###Code
# Hyperparameter grid
param_grid = {
'n_estimators': np.linspace(10, 200).astype(int),
'max_depth': [None] + list(np.linspace(3, 20).astype(int)),
'max_features': ['auto', 'sqrt', None] + list(np.arange(0.5, 1, 0.1)),
'max_leaf_nodes': [None] + list(np.linspace(10, 50, 500).astype(int)),
'min_samples_split': [2, 5, 10],
'bootstrap': [True, False]
}
# Estimator for use in random search
estimator = RandomForestClassifier(random_state = 42)
# Create the random search model
rs = RandomizedSearchCV(estimator, param_grid, n_jobs = -1,
scoring = 'roc_auc', cv = 3,
n_iter = 10, verbose = 5, random_state=42)
# Fit
rs.fit(X_train_preprocessed_st, y_train)
rs.best_params_
# Create the model with 100 trees
model = RandomForestClassifier(n_estimators=196,
random_state=42,
min_samples_split=10,
max_leaf_nodes=49,
max_features=0.7,
max_depth=17,
bootstrap=True,
n_jobs=-1, verbose = 1)
# Fit on training data
model.fit(X_train_preprocessed_st, y_train)
y_pred = model.predict(X_test_preprocessed_st)
# Training predictions (to demonstrate overfitting)
train_rf_predictions = model.predict(X_train_preprocessed_st)
train_rf_probs = model.predict_proba(X_train_preprocessed_st)[:, 1]
# Testing predictions (to determine performance)
rf_predictions = model.predict(X_test_preprocessed_st)
rf_probs = model.predict_proba(X_test_preprocessed_st)[:, 1]
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0,1])
np.set_printoptions(precision=2)
print (classification_report(y_test, y_pred))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['failed','successful'],normalize= False, title='Confusion matrix')
###Output
precision recall f1-score support
0 0.88 0.69 0.77 7732
1 0.77 0.92 0.84 8676
accuracy 0.81 16408
macro avg 0.83 0.80 0.81 16408
weighted avg 0.82 0.81 0.81 16408
###Markdown
Use best model
###Code
best_model = rs.best_estimator_
train_rf_predictions = best_model.predict(X_train_preprocessed_st)
train_rf_probs = best_model.predict_proba(X_train_preprocessed_st)[:, 1]
rf_predictions = best_model.predict(X_test_preprocessed_st)
rf_probs = best_model.predict_proba(X_test_preprocessed_st)[:, 1]
n_nodes = []
max_depths = []
for ind_tree in best_model.estimators_:
n_nodes.append(ind_tree.tree_.node_count)
max_depths.append(ind_tree.tree_.max_depth)
print(f'Average number of nodes {int(np.mean(n_nodes))}')
print(f'Average maximum depth {int(np.mean(max_depths))}')
def evaluate_model(predictions, probs, train_predictions, train_probs):
"""Compare machine learning model to baseline performance.
Computes statistics and shows ROC curve."""
baseline = {}
baseline['recall'] = recall_score(y_test, [1 for _ in range(len(y_test))])
baseline['precision'] = precision_score(y_test, [1 for _ in range(len(y_test))])
baseline['roc'] = 0.5
results = {}
results['recall'] = recall_score(y_test, predictions)
results['precision'] = precision_score(y_test, predictions)
results['roc'] = roc_auc_score(y_test, probs)
train_results = {}
train_results['recall'] = recall_score(y_train, train_predictions)
train_results['precision'] = precision_score(y_train, train_predictions)
train_results['roc'] = roc_auc_score(y_train, train_probs)
for metric in ['recall', 'precision', 'roc']:
print(f'{metric.capitalize()} Baseline: {round(baseline[metric], 2)} Test: {round(results[metric], 2)} Train: {round(train_results[metric], 2)}')
# Calculate false positive rates and true positive rates
base_fpr, base_tpr, _ = roc_curve(y_test, [1 for _ in range(len(y_test))])
model_fpr, model_tpr, _ = roc_curve(y_test, probs)
plt.figure(figsize = (8, 6))
plt.rcParams['font.size'] = 16
# Plot both curves
plt.plot(base_fpr, base_tpr, 'b', label = 'baseline')
plt.plot(model_fpr, model_tpr, 'r', label = 'model')
plt.legend();
plt.xlabel('False Positive Rate'); plt.ylabel('True Positive Rate'); plt.title('ROC Curves');
evaluate_model(rf_predictions, rf_probs, train_rf_predictions, train_rf_probs)
###Output
_____no_output_____ |
docs/tutorials/baseline development documentation/(baseline development) Silver per m2.ipynb | ###Markdown
Silver per m2 Calculations This journal documents the calculations and assumptions for the silver baseline file used in the calculator.
###Code
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
density_Ag = 10.49 #g/cm3, source Wikipedia
###Output
_____no_output_____
###Markdown
Pre-Journal CalculationsFrom the ITRPVs, we have grams of Ag per cell from 2009 through 2019, with projections through 2030. Data for silver per cell for 4 different types of cell were extracted from ITRPV graphs with "webplotdigitizer" then rounded to ~2 significant figures. The 4 types of cell noted in ITRPV 2019 and 2020 are Monofacial p-type, Bifacial p-type, HJT n-type, and n-type. Some mathmatical assumptions:1) n-type cells account for only 5% of the world market share and have for the last decade. While the amount of silver in the two different n-type cells is noteably different, because their marketshare is so small, these two n-type cell silver quantities were averaged together.2) The difference in silver per cell between bifacial and monofacial cells is not significant, and were therefore averaged together.Therefore the process for determining the average silver per cell across the different technologies was: average silver per cell = 0.95*(average of monofacial and bifacial p-type) + 0.05*(average of n-type) This math was completed in the google spreadsheet of raw datathen copied to a csv and is uploaded here.
###Code
#read in the csv of 2009 through 2030 data for silver per cell.
cwd = os.getcwd() #grabs current working directory
skipcols = ['Source']
itrpv_ag_gpc = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/ag_g_per_cell.csv",
index_col='Year', usecols=lambda x: x not in skipcols)
itrpv_ag_gpc
#plot the raw data
plt.plot(itrpv_ag_gpc, marker="o")
plt.title("Silver mass per cell over time")
plt.ylabel("Silver, grams/cell")
###Output
_____no_output_____
###Markdown
Based on looking at the plot of original data, it doesn't seem crazy to linearly interpolate for missing data
###Code
ag_gpc = itrpv_ag_gpc.interpolate()
plt.plot(ag_gpc, marker="o")
plt.title("Silver mass per cell over time")
plt.ylabel("Silver, grams/cell")
###Output
_____no_output_____
###Markdown
Convert to a per module area basis (not per cell)
###Code
#import cell per m2 from the silicon baseline
cpm2 = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/output_cell_per_m2.csv",
index_col='Year', usecols=lambda x: x not in skipcols)
#print(cpm2)
#convert silver per cell to silver per m^2 of module, based on output from silicon baseline
ag_gpc.columns = cpm2.columns = ['ag_g_per_m2'] #rename to a common name
ag_gpm2 = ag_gpc.mul(cpm2, 'columns') #multiply
plt.plot(ag_gpm2)
plt.title("Silver mass per module m2 over time")
plt.ylabel("Silver, grams/module m2")
###Output
_____no_output_____
###Markdown
Extend projection through 2050It appears that the silver per cell is expected to level out by 2025 or so. We will extend 2030 values through 2050 as a "lower limit" or minimal further improvement.
###Code
#create an empty df as a place holder
yrs = pd.Series(index=range(2031,2050), dtype='float64')
tempdf = pd.DataFrame(yrs, columns=['ag_g_per_m2'])
fulldf = pd.concat([ag_gpm2,tempdf]) #attach it to rest of df
#set the 2050 value to the same as 2030
fulldf.loc[2050] = fulldf.loc[2030]
#interpolate for missing values
ag_gpm2_full = fulldf.interpolate()
#print(ag_gpm2_full)
#plot
plt.plot(ag_gpm2_full)
plt.title("Silver mass per module area over time")
plt.ylabel("Silver, grams/module m2")
#print out to csv
ag_gpm2_full.to_csv(cwd+'/../../PV_ICE/baselines/SupportingMaterial/output_ag_g_per_m2.csv', index=True)
###Output
_____no_output_____
###Markdown
Silver per m2 Calculations This journal documents the calculations and assumptions for the silver baseline file used in the calculator.
###Code
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
density_Ag = 10.49 #g/cm3, source Wikipedia
###Output
_____no_output_____
###Markdown
Pre-Journal CalculationsFrom the ITRPVs, we have grams of Ag per cell from 2009 through 2019, with projections through 2030. Data for silver per cell for 4 different types of cell were extracted from ITRPV graphs with "webplotdigitizer" then rounded to ~2 significant figures. The 4 types of cell noted in ITRPV 2019 and 2020 are Monofacial p-type, Bifacial p-type, HJT n-type, and n-type. Some mathmatical assumptions:1) n-type cells account for only 5% of the world market share and have for the last decade. While the amount of silver in the two different n-type cells is noteably different, because their marketshare is so small, these two n-type cell silver quantities were averaged together.2) The difference in silver per cell between bifacial and monofacial cells is not significant, and were therefore averaged together.Therefore the process for determining the average silver per cell across the different technologies was: average silver per cell = 0.95*(average of monofacial and bifacial p-type) + 0.05*(average of n-type) This math was completed in the google spreadsheet of raw datathen copied to a csv and is uploaded here.
###Code
#read in the csv of 2009 through 2030 data for silver per cell.
cwd = os.getcwd() #grabs current working directory
skipcols = ['Source']
itrpv_ag_gpc = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/ag_g_per_cell.csv",
index_col='Year', usecols=lambda x: x not in skipcols)
itrpv_ag_gpc
#plot the raw data
plt.plot(itrpv_ag_gpc, marker="o")
plt.title("Silver mass per cell over time")
plt.ylabel("Silver, grams/cell")
###Output
_____no_output_____
###Markdown
Based on looking at the plot of original data, it doesn't seem crazy to linearly interpolate for missing data
###Code
ag_gpc = itrpv_ag_gpc.interpolate()
plt.plot(ag_gpc, marker="o")
plt.title("Silver mass per cell over time")
plt.ylabel("Silver, grams/cell")
###Output
_____no_output_____
###Markdown
Convert to a per module area basis (not per cell)
###Code
#import cell per m2 from the silicon baseline
cpm2 = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/output_cell_per_m2.csv",
index_col='Year', usecols=lambda x: x not in skipcols)
#print(cpm2)
#convert silver per cell to silver per m^2 of module, based on output from silicon baseline
ag_gpc.columns = cpm2.columns = ['ag_g_per_m2'] #rename to a common name
ag_gpm2 = ag_gpc.mul(cpm2, 'columns') #multiply
plt.plot(ag_gpm2)
plt.title("Silver mass per module m2 over time")
plt.ylabel("Silver, grams/module m2")
###Output
_____no_output_____
###Markdown
Extend projection through 2050It appears that the silver per cell is expected to level out by 2025 or so. We will extend 2030 values through 2050 as a "lower limit" or minimal further improvement.
###Code
#create an empty df as a place holder
yrs = pd.Series(index=range(2031,2050), dtype='float64')
tempdf = pd.DataFrame(yrs, columns=['ag_g_per_m2'])
fulldf = pd.concat([ag_gpm2,tempdf]) #attach it to rest of df
#set the 2050 value to the same as 2030
fulldf.loc[2050] = fulldf.loc[2030]
#interpolate for missing values
ag_gpm2_full = fulldf.interpolate()
#print(ag_gpm2_full)
#plot
plt.plot(ag_gpm2_full)
plt.title("Silver mass per module area over time")
plt.ylabel("Silver, grams/module m2")
#print out to csv
ag_gpm2_full.to_csv(cwd+'/../../PV_ICE/baselines/SupportingMaterial/output_ag_g_per_m2.csv', index=True)
###Output
_____no_output_____
###Markdown
Silver per m2 Calculations This journal documents the calculations and assumptions for the silver baseline file used in the calculator.
###Code
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
density_Ag = 10.49 #g/cm3, source Wikipedia
###Output
_____no_output_____
###Markdown
Pre-Journal CalculationsFrom the ITRPVs, we have grams of Ag per cell from 2009 through 2019, with projections through 2030. Data for silver per cell for 4 different types of cell were extracted from ITRPV graphs with "webplotdigitizer" then rounded to ~2 significant figures. The 4 types of cell noted in ITRPV 2019 and 2020 are Monofacial p-type, Bifacial p-type, HJT n-type, and n-type. Some mathmatical assumptions:1) n-type cells account for only 5% of the world market share and have for the last decade. While the amount of silver in the two different n-type cells is noteably different, because their marketshare is so small, these two n-type cell silver quantities were averaged together.2) The difference in silver per cell between bifacial and monofacial cells is not significant, and were therefore averaged together.Therefore the process for determining the average silver per cell across the different technologies was: average silver per cell = 0.95*(average of monofacial and bifacial p-type) + 0.05*(average of n-type) This math was completed in the google spreadsheet of raw datathen copied to a csv and is uploaded here.
###Code
#read in the csv of 2009 through 2030 data for silver per cell.
cwd = os.getcwd() #grabs current working directory
skipcols = ['Source']
itrpv_ag_gpc = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/ag_g_per_cell.csv",
index_col='Year', usecols=lambda x: x not in skipcols)
itrpv_ag_gpc
#plot the raw data
plt.plot(itrpv_ag_gpc, marker="o")
plt.title("Silver mass per cell over time")
plt.ylabel("Silver, grams/cell")
###Output
_____no_output_____
###Markdown
Based on looking at the plot of original data, it doesn't seem crazy to linearly interpolate for missing data
###Code
ag_gpc = itrpv_ag_gpc.interpolate()
plt.plot(ag_gpc, marker="o")
plt.title("Silver mass per cell over time")
plt.ylabel("Silver, grams/cell")
###Output
_____no_output_____
###Markdown
Convert to a per module area basis (not per cell)
###Code
#import cell per m2 from the silicon baseline
cpm2 = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/output_cell_per_m2.csv",
index_col='Year', usecols=lambda x: x not in skipcols)
#print(cpm2)
#convert silver per cell to silver per m^2 of module, based on output from silicon baseline
ag_gpc.columns = cpm2.columns = ['ag_g_per_m2'] #rename to a common name
ag_gpm2 = ag_gpc.mul(cpm2, 'columns') #multiply
plt.plot(ag_gpm2)
plt.title("Silver mass per module m2 over time")
plt.ylabel("Silver, grams/module m2")
###Output
_____no_output_____
###Markdown
Extend projection through 2050It appears that the silver per cell is expected to level out by 2025 or so. We will extend 2030 values through 2050 as a "lower limit" or minimal further improvement.
###Code
#create an empty df as a place holder
yrs = pd.Series(index=range(2031,2050), dtype='float64')
tempdf = pd.DataFrame(yrs, columns=['ag_g_per_m2'])
fulldf = pd.concat([ag_gpm2,tempdf]) #attach it to rest of df
#set the 2050 value to the same as 2030
fulldf.loc[2050] = fulldf.loc[2030]
#interpolate for missing values
ag_gpm2_full = fulldf.interpolate()
#print(ag_gpm2_full)
#plot
plt.plot(ag_gpm2_full)
plt.title("Silver mass per module area over time")
plt.ylabel("Silver, grams/module m2")
#print out to csv
ag_gpm2_full.to_csv(cwd+'/../../PV_ICE/baselines/SupportingMaterial/output_ag_g_per_m2.csv', index=True)
###Output
_____no_output_____ |
nbs/12_experiment.speed-lsh_synthetic-task.ipynb | ###Markdown
LSH evaluation speed We want to test the speed of during evaluation in seconds per step, as reported in the right part of table 5 of the paper: https://arxiv.org/pdf/2001.04451.pdf  get data Helper method to get data. Assume 1 step of training and 10 of validation.
###Code
def get_dataloaders(bs=32, sl=1024, train_steps=1, valid_steps=10, seed=123):
train_sz, valid_sz = bs*train_steps, bs*valid_steps
dls = DataLoaders.from_dsets(DeterministicTwinSequence(sl, train_sz, seed=seed),
DeterministicTwinSequence(sl, valid_sz, seed=seed),
bs=bs, shuffle=False, device='cuda')
return dls
###Output
_____no_output_____
###Markdown
get model Helper method to get `LSHLM` method. If `n_hashes=0` full attention is used.
###Code
def get_lshlm(n_hashes=1, sl=1024, use_lsh=True):
if n_hashes==0: use_lsh=False
return LSHLM(vocab_sz=128, d_model=256, n_layers=1, n_heads=4,
max_seq_len=sl,bucket_size=64, n_hashes=n_hashes,
causal=True, use_lsh=use_lsh)
###Output
_____no_output_____
###Markdown
train Get a learner that is trained for 1 epoch (just in case).
###Code
def get_learner(dls, model, n_epochs=1, lr=1e-3):
learn = Learner(dls, model, opt_func=adafactor,
loss_func=CrossEntropyLossFlat(ignore_index=-100),
metrics=MaskedAccuracy(),
cbs=[MaskTargCallback()]).to_fp16()
with learn.no_bar():
with learn.no_logging():
learn.fit(n_epochs, lr)
return learn
###Output
_____no_output_____
###Markdown
time evaluation
###Code
'function to get average time per step of validation'
def time_eval(learn,dls, n_rounds=10):
with learn.no_bar():
t = timeit(learn.validate, number=n_rounds)
steps = dls.valid.n / dls.valid.bs
return t / n_rounds / steps
###Output
_____no_output_____
###Markdown
Loop experiment setup
###Code
n_lsh=[0, 1,2,4,8]
sls =[1024, 2048, 4096, 8192, 16384, 32768]
bss =[32, 16, 8, 4, 2, 1]
train_steps, valid_steps = 1,10
cols = ['sl', 'bs', 'n-lsh', 'time']
results = []
for sl, bs in zip(sls, bss):
for n_hashes in n_lsh:
if n_hashes==0 and sl>8192:
results.append((sl, bs, n_hashes, np.nan)) # won't fit in memory
else:
dls = get_dataloaders(bs=bs, sl=sl, train_steps=train_steps, valid_steps=valid_steps)
model = get_lshlm(n_hashes=n_hashes, sl=sl)
learn = get_learner(dls, model)
t = time_eval(learn, dls)
del(learn, model, dls)
torch.cuda.empty_cache()
results.append((sl, bs, n_hashes, t))
df = pd.DataFrame(results, columns=cols)
df.head()
df.to_csv('lsh-timing.csv')
def get_label(nh):
return f'lsh-{nh}' if nh>0 else 'full attention'
def get_linestyle(nh):
return '--' if nh == 0 else '-'
fig, ax = plt.subplots(figsize=(8,5))
for nh, c in zip(n_lsh, ['k','r', 'b', 'g', 'y']):
dat = df.loc[df['n-lsh']==nh]
ax.plot(dat['sl'], dat['time'], color=c, label=get_label(nh), linestyle=get_linestyle(nh))
ax.set_yscale('log')
ax.set_xscale('log', basex=2)
ax.set_xlabel('sequence length / batch')
ax.set_yticks([0.1, 1])
ax.set_xticks(sls)
ax.set_xticklabels(f'{sl}/{bs}' for sl, bs in zip(sls, bss))
ax.legend(loc='upper left')
ax.set_ylabel('seconds / step');
###Output
_____no_output_____
###Markdown
LSH evaluation speed We want to test the speed of during evaluation in seconds per step, as reported in the right part of table 5 of the paper: https://arxiv.org/pdf/2001.04451.pdf  get data Helper method to get data. Assume 1 step of training and 10 of validation.
###Code
def get_dataloaders(bs=32, sl=1024, train_steps=1, valid_steps=10, seed=123):
train_sz, valid_sz = bs*train_steps, bs*valid_steps
dls = DataLoaders.from_dsets(DeterministicTwinSequence(sl, train_sz, seed=seed),
DeterministicTwinSequence(sl, valid_sz, seed=seed),
bs=bs, shuffle=False, device='cuda')
return dls
###Output
_____no_output_____
###Markdown
get model Helper method to get `LSHLM` method. If `n_hashes=0` full attention is used.
###Code
def get_lshlm(n_hashes=1, sl=1024, use_lsh=True):
if n_hashes==0: use_lsh=False
return LSHLM(vocab_sz=128, d_model=256, n_layers=1, n_heads=4,
max_seq_len=sl,bucket_size=64, n_hashes=n_hashes,
causal=True, use_lsh=use_lsh)
###Output
_____no_output_____
###Markdown
train Get a learner that is trained for 1 epoch (just in case).
###Code
def get_learner(dls, model, n_epochs=1, lr=1e-3):
learn = Learner(dls, model, opt_func=adafactor,
loss_func=CrossEntropyLossFlat(ignore_index=-100),
metrics=MaskedAccuracy(),
cbs=[MaskTargCallback()]).to_fp16()
with learn.no_bar():
with learn.no_logging():
learn.fit(n_epochs, lr)
return learn
###Output
_____no_output_____
###Markdown
time evaluation
###Code
'function to get average time per step of validation'
def time_eval(learn,dls, n_rounds=10):
with learn.no_bar():
t = timeit(learn.validate, number=n_rounds)
steps = dls.valid.n / dls.valid.bs
return t / n_rounds / steps
###Output
_____no_output_____
###Markdown
Loop experiment setup
###Code
n_lsh=[0, 1,2,4,8]
sls =[1024, 2048, 4096, 8192, 16384, 32768]
bss =[32, 16, 8, 4, 2, 1]
train_steps, valid_steps = 1,10
cols = ['sl', 'bs', 'n-lsh', 'time']
results = []
for sl, bs in zip(sls, bss):
for n_hashes in n_lsh:
if n_hashes==0 and sl>4096:
results.append((sl, bs, n_hashes, np.nan)) # won't fit in memory
else:
dls = get_dataloaders(bs=bs, sl=sl, train_steps=train_steps, valid_steps=valid_steps)
model = get_lshlm(n_hashes=n_hashes, sl=sl)
learn = get_learner(dls, model)
t = time_eval(learn, dls)
del(learn, model, dls)
torch.cuda.empty_cache()
results.append((sl, bs, n_hashes, t))
df = pd.DataFrame(results, columns=cols)
df.head()
def get_label(nh):
return f'lsh-{nh}' if nh>0 else 'full attention'
def get_linestyle(nh):
return '--' if nh == 0 else '-'
fig, ax = plt.subplots(figsize=(8,5))
for nh, c in zip(n_lsh, ['k','r', 'b', 'g', 'y']):
dat = df.loc[df['n-lsh']==nh]
ax.plot(dat['sl'], dat['time'], color=c, label=get_label(nh), linestyle=get_linestyle(nh))
ax.set_yscale('log')
ax.set_xscale('log', base=2)
ax.set_xlabel('sequence length / batch')
ax.set_yticks([0.1, 1])
ax.set_xticks(sls)
ax.set_xticklabels(f'{sl}/{bs}' for sl, bs in zip(sls, bss))
ax.legend(loc='upper left')
ax.set_ylabel('seconds / step');
###Output
_____no_output_____ |
Fase 4 - Temas avanzados/Tema 14 - Bases de datos con SQLite/Lección 01 (Apuntes) - Conexion, puntero y consultas básicas.ipynb | ###Markdown
Nota: Estos ejemplos están indicados para hacerse en scripts de código Python, no en Jupyter Conexión a la base de datos, creación y desconexión
###Code
# Importamos el módulo
import sqlite3
# Nos conectamos a la base de datos ejemplo.db (la crea si no existe)
conexion = sqlite3.connect('ejemplo.db')
# Cerramos la conexión, si no la cerramos se mantendrá en uso y no podremos gestionar el fichero
conexion.close()
###Output
_____no_output_____
###Markdown
Creación de una tabla utilizando sintaxis SQLAntes de ejecutar una consulta (query) en código SQL, tenemos que crear un cursor.**Una vez creada la tabla, si intentamos volver a crearla dará error indicándonos que esta ya existe.**
###Code
import sqlite3
conexion = sqlite3.connect('ejemplo.db')
# Creamos el cursor
cursor = conexion.cursor()
# Ahora crearemos una tabla de usuarios para almacenar nombres, edades y emails
cursor.execute("CREATE TABLE usuarios (nombre VARCHAR(100), edad INTEGER, email VARCHAR(100))")
# Guardamos los cambios haciendo un commit
conexion.commit()
conexion.close()
###Output
_____no_output_____
###Markdown
Insertando un registro
###Code
import sqlite3
conexion = sqlite3.connect('ejemplo.db')
cursor = conexion.cursor()
# Insertamos un registro en la tabla de usuarios
cursor.execute("INSERT INTO usuarios VALUES ('Hector', 27, '[email protected]')")
# Guardamos los cambios haciendo un commit
conexion.commit()
conexion.close()
###Output
_____no_output_____
###Markdown
Recuperando el primer registro con .fetchone()
###Code
import sqlite3
conexion = sqlite3.connect('ejemplo.db')
cursor = conexion.cursor()
# Recuperamos los registros de la tabla de usuarios
cursor.execute("SELECT * FROM usuarios")
# Mostrar el cursos a ver que hay ?
print(cursor)
# Recorremos el primer registro con el método fetchone, devuelve una tupla
usuario = cursor.fetchone()
print(usuario)
conexion.close()
###Output
('Hector', 27, '[email protected]')
###Markdown
Insertando varios registros con .executemany()
###Code
import sqlite3
conexion = sqlite3.connect('ejemplo.db')
cursor = conexion.cursor()
# Creamos una lista con varios usuarios
usuarios = [('Mario', 51, '[email protected]'),
('Mercedes', 38, '[email protected]'),
('Juan', 19, '[email protected]'),
]
# Ahora utilizamos el método executemany() para insertar varios
cursor.executemany("INSERT INTO usuarios VALUES (?,?,?)", usuarios)
# Guardamos los cambios haciendo un commit
conexion.commit()
conexion.close()
###Output
_____no_output_____
###Markdown
Recuperando varios registros con .fetchall()
###Code
import sqlite3
conexion = sqlite3.connect('ejemplo.db')
cursor = conexion.cursor()
# Recuperamos los registros de la tabla de usuarios
cursor.execute("SELECT * FROM usuarios")
# Recorremos todos los registros con fetchall, y los volvamos en una lista de usuarios
usuarios = cursor.fetchall()
# Ahora podemos recorrer todos los usuarios
for usuario in usuarios:
print(usuario)
conexion.close()
###Output
('Hector', 27, '[email protected]')
('Mario', 51, '[email protected]')
('Mercedes', 38, '[email protected]')
('Juan', 19, '[email protected]')
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.