path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
MidtermExam_Number1.ipynb | ###Markdown
Program 1: Modify the program below by adding two conversion methods - Fahrenheit to Celsius and Kelvin to Celsius (50 points)
###Code
def main():
class TemperatureConversion:
def __init__(self, temp=1):
self._temp = temp
class FahrenheitToCelsius(TemperatureConversion):
def conversion(self):
return ((self._temp - 32) * 5)/9
class KelvinToCelsius(TemperatureConversion):
def conversion(self):
return self._temp - 273.15
tempInCelsius = float(input("Enter the temperature : "))
convert = KelvinToCelsius(tempInCelsius)
print("In kelvin to Celsius : " + str(convert.conversion()) + " c " )
convert = FahrenheitToCelsius(tempInCelsius)
print("In Fahrenheit to Celsius : " +str(convert.conversion()) + " c ")
main()
###Output
Enter the temperature : 45
In kelvin to Celsius : -228.14999999999998 c
In Fahrenheit to Celsius : 7.222222222222222 c
|
nbs/12_cluster_analysis/000_01-kmeans-pca.ipynb | ###Markdown
Description Runs k-means on the pca version of the data. Environment variables
###Code
from IPython.display import display
import conf
N_JOBS = conf.GENERAL["N_JOBS"]
display(N_JOBS)
%env MKL_NUM_THREADS=$N_JOBS
%env OPEN_BLAS_NUM_THREADS=$N_JOBS
%env NUMEXPR_NUM_THREADS=$N_JOBS
%env OMP_NUM_THREADS=$N_JOBS
###Output
env: MKL_NUM_THREADS=2
env: OPEN_BLAS_NUM_THREADS=2
env: NUMEXPR_NUM_THREADS=2
env: OMP_NUM_THREADS=2
###Markdown
Modules loading
###Code
%load_ext autoreload
%autoreload 2
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from utils import generate_result_set_name
###Output
_____no_output_____
###Markdown
Settings
###Code
np.random.seed(0)
INITIAL_RANDOM_STATE = 10000
###Output
_____no_output_____
###Markdown
Input data
###Code
INPUT_SUBSET = "pca"
INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores"
DR_OPTIONS = {
"n_components": 50,
"svd_solver": "full",
"random_state": 0,
}
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
generate_result_set_name(
DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl"
),
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
###Output
_____no_output_____
###Markdown
Clustering
###Code
from sklearn.cluster import KMeans
CLUSTERING_ATTRIBUTES_TO_SAVE = ["n_clusters"]
CLUSTERING_OPTIONS = {}
CLUSTERING_OPTIONS["K_MIN"] = 2
CLUSTERING_OPTIONS["K_MAX"] = 60 # sqrt(3749)
CLUSTERING_OPTIONS["N_REPS_PER_K"] = 5
CLUSTERING_OPTIONS["KMEANS_N_INIT"] = 10
display(CLUSTERING_OPTIONS)
CLUSTERERS = {}
idx = 0
random_state = INITIAL_RANDOM_STATE
for k in range(CLUSTERING_OPTIONS["K_MIN"], CLUSTERING_OPTIONS["K_MAX"] + 1):
for i in range(CLUSTERING_OPTIONS["N_REPS_PER_K"]):
clus = KMeans(
n_clusters=k,
n_init=CLUSTERING_OPTIONS["KMEANS_N_INIT"],
random_state=random_state,
)
method_name = type(clus).__name__
CLUSTERERS[f"{method_name} #{idx}"] = clus
random_state = random_state + 1
idx = idx + 1
display(len(CLUSTERERS))
_iter = iter(CLUSTERERS.items())
display(next(_iter))
display(next(_iter))
clustering_method_name = method_name
display(clustering_method_name)
###Output
_____no_output_____
###Markdown
Output directory
###Code
# output dir for this notebook
RESULTS_DIR = Path(
conf.RESULTS["CLUSTERING_RUNS_DIR"],
f"{INPUT_SUBSET}-{INPUT_STEM}",
).resolve()
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
display(RESULTS_DIR)
###Output
_____no_output_____
###Markdown
Load input file
###Code
data = pd.read_pickle(input_filepath)
data.shape
data.head()
assert not data.isna().any().any()
###Output
_____no_output_____
###Markdown
Clustering Generate ensemble
###Code
from clustering.ensembles.utils import generate_ensemble
ensemble = generate_ensemble(
data,
CLUSTERERS,
attributes=CLUSTERING_ATTRIBUTES_TO_SAVE,
)
# the number should be close to 295 (the number of partitions generated by k-means/spectral clustering)
ensemble.shape
ensemble.head()
ensemble["n_clusters"].value_counts().head()
ensemble_stats = ensemble["n_clusters"].describe()
display(ensemble_stats)
###Output
_____no_output_____
###Markdown
Testing
###Code
assert ensemble_stats["min"] > 1
assert not ensemble["n_clusters"].isna().any()
assert ensemble.shape[0] == len(CLUSTERERS)
# all partitions have the right size
assert np.all(
[part["partition"].shape[0] == data.shape[0] for idx, part in ensemble.iterrows()]
)
# no partition has negative clusters (noisy points)
assert not np.any([(part["partition"] < 0).any() for idx, part in ensemble.iterrows()])
###Output
_____no_output_____
###Markdown
Add clustering quality measures
###Code
from sklearn.metrics import calinski_harabasz_score
ensemble = ensemble.assign(
ch_score=ensemble["partition"].apply(lambda x: calinski_harabasz_score(data, x))
)
ensemble.shape
ensemble.head()
###Output
_____no_output_____
###Markdown
Save
###Code
output_filename = Path(
RESULTS_DIR,
generate_result_set_name(
CLUSTERING_OPTIONS,
prefix=f"{clustering_method_name}-",
suffix=".pkl",
),
).resolve()
display(output_filename)
ensemble.to_pickle(output_filename)
###Output
_____no_output_____
###Markdown
Cluster quality
###Code
with pd.option_context("display.max_rows", None, "display.max_columns", None):
_df = ensemble.groupby(["n_clusters"]).mean()
display(_df)
with sns.plotting_context("talk", font_scale=0.75), sns.axes_style(
"whitegrid", {"grid.linestyle": "--"}
):
fig = plt.figure(figsize=(14, 6))
ax = sns.pointplot(data=ensemble, x="n_clusters", y="ch_score")
ax.set_ylabel("Calinski-Harabasz index")
ax.set_xlabel("Number of clusters ($k$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.grid(True)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Stability Group ensemble by n_clusters
###Code
parts = ensemble.groupby("n_clusters").apply(
lambda x: np.concatenate(x["partition"].apply(lambda x: x.reshape(1, -1)), axis=0)
)
parts.head()
assert np.all(
[
parts.loc[k].shape == (CLUSTERING_OPTIONS["N_REPS_PER_K"], data.shape[0])
for k in parts.index
]
)
###Output
_____no_output_____
###Markdown
Compute stability
###Code
from sklearn.metrics import adjusted_rand_score as ari
from scipy.spatial.distance import pdist
parts_ari = pd.Series(
{k: pdist(parts.loc[k], metric=ari) for k in parts.index}, name="k"
)
parts_ari_stability = parts_ari.apply(lambda x: x.mean())
display(parts_ari_stability.sort_values(ascending=False).head(15))
parts_ari_df = pd.DataFrame.from_records(parts_ari.tolist()).set_index(
parts_ari.index.copy()
)
parts_ari_df.shape
assert (
int(
(CLUSTERING_OPTIONS["N_REPS_PER_K"] * (CLUSTERING_OPTIONS["N_REPS_PER_K"] - 1))
/ 2
)
== parts_ari_df.shape[1]
)
parts_ari_df.head()
###Output
_____no_output_____
###Markdown
Save
###Code
output_filename = Path(
RESULTS_DIR,
generate_result_set_name(
CLUSTERING_OPTIONS,
prefix=f"{clustering_method_name}-stability-",
suffix=".pkl",
),
).resolve()
display(output_filename)
parts_ari_df.to_pickle(output_filename)
###Output
_____no_output_____
###Markdown
Stability plot
###Code
parts_ari_df_plot = (
parts_ari_df.stack()
.reset_index()
.rename(columns={"level_0": "k", "level_1": "idx", 0: "ari"})
)
parts_ari_df_plot.dtypes
parts_ari_df_plot.head()
# with sns.axes_style('whitegrid', {'grid.linestyle': '--'}):
with sns.plotting_context("talk", font_scale=0.75), sns.axes_style(
"whitegrid", {"grid.linestyle": "--"}
):
fig = plt.figure(figsize=(14, 6))
ax = sns.pointplot(data=parts_ari_df_plot, x="k", y="ari")
ax.set_ylabel("Averange ARI")
ax.set_xlabel("Number of clusters ($k$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
# ax.set_ylim(0.0, 1.0)
# ax.set_xlim(CLUSTERING_OPTIONS['K_MIN'], CLUSTERING_OPTIONS['K_MAX'])
plt.grid(True)
plt.tight_layout()
###Output
_____no_output_____ |
forks/MIT_OCW_Linear_Algebra_18_06-master/II_08_Eigenvalues_and_eigenvectors.ipynb | ###Markdown
+ This notebook is part of lecture 21 *Eigenvalues and eigenvectors* in the OCW MIT course 18.06 by Prof Gilbert Strang [1]+ Created by me, Dr Juan H Klopper + Head of Acute Care Surgery + Groote Schuur Hospital + University Cape Town + Email me with your thoughts, comments, suggestions and corrections Linear Algebra OCW MIT18.06 IPython notebook [2] study notes by Dr Juan H Klopper is licensed under a Creative Commons Attribution-NonCommercial 4.0 International License.+ [1] OCW MIT 18.06+ [2] Fernando Pérez, Brian E. Granger, IPython: A System for Interactive Scientific Computing, Computing in Science and Engineering, vol. 9, no. 3, pp. 21-29, May/June 2007, doi:10.1109/MCSE.2007.53. URL: http://ipython.org
###Code
from IPython.core.display import HTML, Image
css_file = 'style.css'
HTML(open(css_file, 'r').read())
from sympy import init_printing, Matrix, symbols, eye
from warnings import filterwarnings
init_printing(use_latex = 'mathjax')
filterwarnings('ignore')
lamda = symbols('lamda') # Note that lambda is a reserved word in python, so we use lamda (without the b)
###Output
_____no_output_____
###Markdown
Eigenvalues and eigenvectors What are eigenvectors? * A Matrix is a mathematical object that acts on a (column) vector, resulting in a new vector, i.e. A**x**=**b*** An eigenvector is the resulting vector that is parallel to **x** (some multiple of **x**)$$ {A}\underline{x}=\lambda \underline{x} $$ * The eigenvectors with an eigenvalue of zero are the vectors in the nullspace* If A is singular (takes some non-zero vector into 0) then &955;=0 What are the eigenvectors and eigenvalues for projection matrices? * A projection matrix P projects some vector (**b**) onto a subspace (in 3-space we are talking about a plane through the origin)* P**b** is not in the same direction as **b*** A vector **x** that is already in the subspace will result in P**x**=**x**, so &955;=1* Another good **x** would be one perpendicular to the subspace, i.e. P**x**=0**x**, so &955;=0 What are the eigenvectors and eigenvalues for permutation matrices? * A permutation matrix such as the one below changes the order of the elements in a (column) vector$$ \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} $$* A good example of a vector that would remain in the same direction after multiplication by the permutation matrix above would the following vector$$ \begin{bmatrix} 1 \\ 1 \end{bmatrix} $$* The eigenvalue would just be &955;=1* The next (eigen)vector would also work$$ \begin{bmatrix} -1 \\ 1 \end{bmatrix} $$* It would have an eigenvalue of &955;=-1 The trace and the determinant * The trace is the sum of the values down the main diagonal of a square matrix* Note how this is the same as the sum of the eigenvalues (look at the permutation matrix above and its eigenvalues)* The determinant of A is the product of the eigenvalues How to solve A**x**=&955;**x** $$ A\underline { x } =\lambda \underline { x } \\ \left( A-\lambda I \right) \underline { x } =\underline { 0 } $$ * The only solution to this equation is for A-&955;I to be singular and therefor have a determinant of zero$$ \left|{A}-\lambda{I}\right|=0 $$ * This is called the characteristic (or eigenvalue) equation* There will be *n* &955;'s for a *n*&215;*n* matrix(some of which may be of equal value)
###Code
A = Matrix([[3, 1], [1, 3]])
I = eye(2)
A, I # Printing A and the 2-by-2 identity matrix to the screen
(A - lamda * I) # Printing A minus lambda times the identity matrix to the screen
###Output
_____no_output_____
###Markdown
* This will have the following determinant
###Code
(A - lamda * I).det()
###Output
_____no_output_____
###Markdown
* For this 2&215;2 matrix the absolute value of the -6 is the trace of A and the 8 is the determinant of A
###Code
((A - lamda * I).det()).factor()
###Output
_____no_output_____
###Markdown
* I now have two eigenvalues of 2 and 4 * In python we could also use the .*eigenvals()* statement
###Code
A.eigenvals() # There is one value of 2 and one value of 4
###Output
_____no_output_____
###Markdown
* The eigenvectors are calculated by substituting the two values of &955; into the original equation$$ \left( {A}-\lambda{I} \right)\underline{x}=\underline{0} $$
###Code
A.eigenvects()
###Output
_____no_output_____
###Markdown
* The results above is interpreted as follows * The first eigenvalue has one eigenvector and the second eigenvalue also has a single eigenvector * Note the similarity between the eigenvectors of the two examples above* It is easy to see that adding a constant multiple of the identity matrix to another matrix (above we added 3I to the initial matrix) doesn't change the eigenvectors; it does add that constant to the eigenvalues though (we went from -1 and 1 to 2 and 4)$$ A\underline { x } =\lambda \underline { x } \\ \therefore \quad \left( A+cI \right) \underline { x } =\left( \lambda +c \right) \underline { x } $$ * If we add another matrix to A (not a constant multiple of I) or even multiply them, then the influence on the original eigenvalues and eigenvectors of A is NOT so predictable (as above) The eigenvalues and eigenvectors of a rotation matrix * Consider this rotation matrix that rotates a vector by 90o (it is orthogonal) * Think about it, though: what vector can come out parallel to itself after a 90o rotation?
###Code
Q = Matrix([[0, -1], [1, 0]])
Q
###Output
_____no_output_____
###Markdown
* From the trace and determinant above we know that we will have the following equation$$ {\lambda}^{2}-{0}{\lambda}+{1}={0} \\ {\lambda}^{2}=-{1} $$
###Code
Q.eigenvals()
Q.eigenvects()
###Output
_____no_output_____
###Markdown
* Note how the eigenvalues are complex conjugates* Symmetric matrices will only have real eigenvalues* An *anti*-symmetric matrix (where the transpose is the original matrix times the scalar -1, as our example above) will only have complex eigenvalues* Matrices in between can have a mix of these Eigenvalues and eigenvectors of an upper triangular matrix * Compute the eigenvalues and eigenvectors of the following matrix (note it is upper triangular)
###Code
A = Matrix([[3, 1], [0, 3]])
A
A.eigenvals()
###Output
_____no_output_____
###Markdown
* We have two eigenvalues, both equal to 3
###Code
A.eigenvects()
###Output
_____no_output_____
###Markdown
* This is a degenerate matrix; it does not have independent eigenvectors * Look at this upper triangular matrix
###Code
A = Matrix([[3, 1, 1], [0, 3, 4], [0, 0, 3]])
A
A.eigenvals()
A.eigenvects()
###Output
_____no_output_____
###Markdown
Example problems Example problem 1 * Find the eigenvalues and eigenvectors of the square of the following matrix as well as the inverse of the matrix minus the identity matrix$$ {A}=\begin{bmatrix} 1 & 2 & 3 \\ 0 & 1 & -2 \\ 0 & 1 & 4 \end{bmatrix} $$ Solution * Notice the following$$ A\underline { x } =\lambda \underline { x } \\ { A }^{ 2 }\underline { x } =A\left( A\underline { x } \right) =A\left( \lambda \underline { x } \right) =\lambda \left( A\underline { x } \right) ={ \lambda }^{ 2 }\underline { x } $$* Once we know the eigenvalues for A we than simply square them to get the eigenvalues of the matrix squared * Similarly for the inverse of the matrix we have the following (for a non-zero &955;, which is fine as A must be invertible for this problem)$$ { A }^{ -1 }\underline { x } ={ A }^{ -1 }\frac { A\underline { x } }{ \lambda } ={ A }^{ -1 }A\frac { 1 }{ \lambda } \underline { x } =\frac { 1 }{ \lambda } \underline { x} $$
###Code
A = Matrix([[1, 2, 3], [0, 1, -2], [0, 1, 4]])
A
A.eigenvals()
A.eigenvects()
###Output
_____no_output_____
###Markdown
* From this it is clear that the eigenvalues of A2 will be 1, 4, and 9 and for A-1 would be a 1, a half and a third
###Code
(A ** 2).eigenvals()
(A.inv()).eigenvals()
###Output
_____no_output_____
###Markdown
* The eigenvectors will be as follows (exactly the same)
###Code
(A ** 2).eigenvects()
(A.inv()).eigenvects()
###Output
_____no_output_____ |
LR_as_Elo.ipynb | ###Markdown
Logistic Regression as Elo[Benjamin Morris](https://twitter.com/skepticalsports/status/1147225488273788929) set me on the right path. The previous experiments used a general purpose classifier that just looked at the rating difference between teams to predict a winner. You can use the logistic regression framework to solve for optimal Elo ratings. So let's do that here
###Code
from collections import defaultdict, deque
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
plt.style.use('fivethirtyeight')
plt.style.use('seaborn-white')
# load the data and drop the away games
df = pd.read_csv('nbaallelo.csv',index_col='game_id')
df = df[df['game_location']!='A']
# teams moved around a lot
from_new_to_old_team = {
'CHO':'CHA','NOP':'NOH','BRK':'NJN','OKC':'SEA','NOK':'NOH','NOH':'CHH','MEM':'VAN','WAS':'WSB','SAC':'KCK','LAC':'SDC','UTA':'NOJ','SDC':'BUF','NJN':'NYN','GSW':'SFW','DLC':'TEX','HOU':'SDR','CHA':'CHH','SAA':'TEX','SAS':'SAA','DEN':'DNA','DNA':'DNR','WSB':'CAP','CAP':'BAL','BAL':'CHZ','CHZ':'CHP','SDS':'SDA','FLO':'MMF','MMF':'MNM','SFW':'PHW','LAL':'MNL','LAS':'ANA','UTS':'LAS','CAR':'HSM','SSL':'CAR','DET':'FTW','MLH':'TRI','STL':'MLH','ATL':'STL','PHI':'SYR','CIN':'ROC','PTC':'PTP','MNP':'PTP','PTP':'MNP','MMP':'NOB','MMT':'MMP','MMS':'MMT','VIR':'WSA','WSA':'OAK'
}
# the basic 538 Elo
def get_elo_ratings(df_local,start_year = 1947):
K=20
HFA=100
SW=0.75
MOVM=3
ELOW=400
INTERCEPT=7.5
SLOPE=0.006
POW=0.8
NEW_TEAM=1300
year_id = start_year
ELOW=400
df_local = df_local[df_local.year_id >= start_year]
team_year_elo = defaultdict(lambda : defaultdict(list))
elo = {}
for i,row in enumerate(df_local.itertuples()):
# skip duplicates
if row[3] != 0:
continue
# update the year
if row[4] != year_id:
for k in elo:
elo[k] = SW*elo[k] + (1-SW)*1505
year_id +=1
# update teams that renamed themselves
teams1 = set(list(df[df.year_id == year_id-1].team_id.unique()) + list(df[df.year_id == year_id-1].opp_id.unique()))
teams2 = set(list(df[df.year_id == year_id].team_id.unique()) + list(df[df.year_id == year_id].opp_id.unique()))
for t in [_ for _ in teams2 if not _ in teams1]:
if t in from_new_to_old_team and not t in elo and from_new_to_old_team[t] in elo:
elo[t] = elo[from_new_to_old_team[t]]
team_year_elo[t].update(team_year_elo[from_new_to_old_team[t]])
# get the stats
t1,t2=row[8],row[14]
elo_i,elo_n = row[11],row[12]
opp_elo_i,opp_elo_n = row[17],row[18]
p1,p2 = row[10],row[16]
# get the Elo
elo1 = elo.get(t1,NEW_TEAM) + HFA
if row[-4] == 'N':
elo1 -= HFA
elo2 = elo.get(t2,NEW_TEAM)
# get the win %
winp = 1.0 / (10 ** (-(elo1-elo2)/ELOW) + 1)
# compute margin of victory correction
mov = abs(p1-p2)
elo_diff_w = (elo2 - elo1)
mofv_m1 = ((mov+MOVM) ** POW)/(INTERCEPT + SLOPE*(-elo_diff_w))
mofv_m2 = ((mov+MOVM) ** POW)/(INTERCEPT + SLOPE*(elo_diff_w))
# find the elo update amount
if p1 > p2:
hm,am = 1,-1
add_t = K*(1-winp)*mofv_m1
else:
hm,am = -1,1
add_t = K*(winp)*mofv_m2
# apply it
if row[-4] == 'N':
elo[t1] = elo1 + hm*add_t
else:
elo[t1] = elo1 + hm*add_t - HFA
elo[t2] = elo2 + am*add_t
# save it
team_year_elo[t1][year_id].append(elo[t1] - hm*add_t)
team_year_elo[t2][year_id].append(elo[t2] - am*add_t)
return team_year_elo
team_year_elo = get_elo_ratings(df)
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
if False:
import autograd.numpy as np
import autograd
def generate_ratings2(dset,teams,weight_recent = 0,HFA=75/173):
N = len(teams)
X = []
Y = []
for t1,games in dset.items():
for t2,tW in games:
v = np.zeros(N)
v[teams[t1]] = 1
v[teams[t2]] = -1
#v[-1] = 70/173
Y.append(tW)
X.append(v)
try:
w = np.ones_like(Y)
if weight_recent != 0:
w = [np.exp(-(i*i/weight_recent)) for i in range(len(Y))]
w = np.array(w)[::-1]
w /= w.mean()
X = np.array(X).astype(np.float)
Y = np.array(Y).astype(np.float)
W = 1e-2*np.random.normal(size=(X.shape[1]))
def loss_f(xt):
p = 1/(1+np.exp(- (X @ xt + HFA)))
loss = np.mean(w*(-Y * np.log(p) - (1-Y) *np.log(1-p+1e-12)))
return loss
GRAD = autograd.grad(loss_f)
alpha = 3e-1
v = np.zeros_like(W)
for i in range(1000):
loss = loss_f(W)
v = 0.9*v - GRAD(W)
W += (alpha) * v
#if (i%1000== 0):
# break
if (np.linalg.norm(v) < 1e-2):
print('early')
break
if (i%50) == 0:
print(loss,)
if i > 0 and (i%250) ==0:
alpha /= 4
return (W * 173 + 1500,HFA)
except:
return ([1500 for i in range(N)],0)
import scipy.optimize as opt
prev_w = None
def log_logit(xs):
xs = np.array(xs)
return np.choose(xs > 0,[-np.log(1. + np.exp(-xs)),xs - np.log(1. + np.exp(xs))])
def generate_ratings2(dset,teams,weight_recent = 0,HFA=75/173):
global prev_w
N = len(teams)
X = []
Y = []
for t1,games in dset.items():
for t2,tW in games:
v = np.zeros(N)
v[teams[t1]] = 1
v[teams[t2]] = -1
#v[-1] = 70/173
Y.append(tW)
X.append(v)
try:
w = np.ones_like(Y)
if weight_recent != 0:
w = [np.exp(-(i*i/weight_recent)) for i in range(len(Y))]
w = np.array(w)[::-1]
w /= w.mean()
X = np.array(X).astype(np.float)
Y = np.array(Y).astype(np.float)
if prev_w is None or prev_w.shape[0] != X.shape[1]:
prev_w = 1e-2*np.random.normal(size=(X.shape[1]))
W = prev_w
def loss_f(xt):
V = X @ xt + HFA
loss = np.mean(w*(-Y * log_logit(V) - (1-Y) *log_logit(-V)))
return loss
res = opt.minimize(loss_f,W)
W = res.x
prev_w = W
return (W * 173 + 1500,HFA)
except KeyboardInterrupt:
raise
except:
return ([1500 for i in range(N)],0)
def generate_ratings(dset,teams,weight_recent = 0):
N = len(teams)
X = []
Y = []
for t1,games in dset.items():
for t2,tW in games:
v = np.zeros(N)
v[teams[t1]] = 1
v[teams[t2]] = -1
#v[-1] = 70/173
Y.append(tW)
X.append(v)
try:
clf = linear_model.LogisticRegression()
if weight_recent != 0:
w = [np.exp(-(i*i/weight_recent)) for i in range(len(Y))]
w = np.array(w)[::-1]
w /= w.mean()
clf.fit(X,Y,w)
else:
clf.fit(X,Y)
return (clf.coef_[0] * 173 + 1500,clf.intercept_[0])
except:
return ([1500 for i in range(N)],0)
hfa_vals = []
def get_lr_ratings(df_local,start_year = 2011,games_std_dev=0):
correct_r = 0
total_r = 0
MAX_HOME_GAMES_PER_TEAM = 30 # 30
year_id = start_year-1
df_local = df_local[df_local.year_id >= start_year]
team_year_elo = defaultdict(lambda : defaultdict(list))
dataset = defaultdict(lambda : deque(maxlen=MAX_HOME_GAMES_PER_TEAM))
for i,row in enumerate(df_local.itertuples()):
# skip duplicates
if row[3] != 0:
continue
# update the year
if row[4] != year_id:
year_id +=1
teams1 = set(list(df[df.year_id == year_id-1].team_id.unique()) + list(df[df.year_id == year_id-1].opp_id.unique()))
teams2 = set(list(df[df.year_id == year_id].team_id.unique()) + list(df[df.year_id == year_id].opp_id.unique()))
old_to_new = {}
for t in [_ for _ in teams2 if not _ in teams1]:
if t in from_new_to_old_team:
old_team = from_new_to_old_team[t]
team_year_elo[t].update(team_year_elo[old_team])
dataset[t] = dataset[old_team]
old_to_new[old_team] = t
# remove the old team
for t in [t for t in dataset if not t in teams2]:
del dataset[t]
# correct the entries
for team in dataset:
replaced_teams = [(old_to_new.get(g[0],g[0]),g[1]) for g in dataset[team]]
dataset[team] = deque(replaced_teams,maxlen=MAX_HOME_GAMES_PER_TEAM)
teams = {t: i for i,t in enumerate(sorted(teams2))}
t1,t2=row[8],row[14]
p1,p2 = row[10],row[16]
# generate a rating
rate,hfa = generate_ratings(dataset,teams,games_std_dev)
hfa_vals.append(hfa)
team_year_elo[t1][year_id].append(rate[teams[t1]])
team_year_elo[t2][year_id].append(rate[teams[t2]])
if year_id >= 2013:
correct_r += int(((rate[teams[t1]] - rate[teams[t2]] +hfa) > 0) == (p1 > p2))
total_r += 1
# just store the other team and the result
dataset[t1].append((t2,p1>p2))
#print('.',end='')
print(correct_r/total_r)
return team_year_elo,dataset
team_year_elo_reg,dataset = get_lr_ratings(df,2010,0)
#_ = plt.plot([np.exp(-(i*i/(200**2))) for i in range(100)])
#plt.figure()
_ = plt.plot(173*np.array(hfa_vals)[100*10:])
plt.xlabel('Game Number')
plt.ylabel('Elo Home Court Advantage')
plt.title('Regressed HCA (Last 30 Games weighted)')
_ = plt.hist(173*np.array(hfa_vals[41*15:]),50,density=True)
plt.title('{:.3f}'.format(173*np.array(hfa_vals[41*15:]).mean()))
for ds,hfa in zip([team_year_elo_reg,team_year_elo],[75,100]):
correct = 0
total = 0
for yr in range(2013,2016):
ds = {_:ds[_] for _ in ds if yr in ds[_]}
counter = {k:0 for k in ds}
for i,row in enumerate(df[df.year_id == yr].itertuples()):
# skip duplicates
if row[3] != 0:
continue
t1,t2=row[8],row[14]
p1,p2 = row[10],row[16]
elo1 = ds[t1][yr][counter[t1]]
elo2 = ds[t2][yr][counter[t2]]
correct += int(((elo1-elo2 + hfa) > 0) == (p1 > p2))
total += 1
counter[t1] += 1
counter[t2] += 1
print(correct/total)
from scipy.ndimage.filters import gaussian_filter1d
teams = ['GSW','HOU','CLE','MIL','SAS','LAL']
plt.figure(figsize=(10,5))
plt.subplot(2,1,1)
plt.vlines(82,0,2500,linestyles='dashed',color=(0.1,0.1,0.1),lw=3,alpha=0.8)
plt.vlines(82*2,0,2500,linestyles='dashed',color=(0.1,0.1,0.1),lw=3,alpha=0.8)
plt.hlines(1500,0,82*3,linestyles='dashed',color=(0.1,0.1,0.1),lw=3,alpha=0.8)
for t in teams:
plt.plot(np.concatenate([team_year_elo[t][y][:82] for y in range(2013,2016)]),label=t)
plt.xticks([42,82+42,42+82*2],['12-13','13-14','14-15'])
plt.xlim(0,82*3)
plt.ylabel('538 Elo')
plt.xlim(0,82*3)
plt.ylim(1250,1800)
plt.subplot(2,1,2)
plt.vlines(82,0,2500,linestyles='dashed',color=(0.1,0.1,0.1),lw=3,alpha=0.8)
plt.vlines(82*2,0,2500,linestyles='dashed',color=(0.1,0.1,0.1),lw=3,alpha=0.8)
plt.hlines(1500,0,82*3,linestyles='dashed',color=(0.1,0.1,0.1),lw=3,alpha=0.8)
for t in teams:
plt.plot(np.concatenate([team_year_elo_reg[t][y][:82] for y in range(2013,2016)]),label=t)
plt.xticks([42,82+42,42+82*2],['12-13','13-14','14-15'])
plt.xlim(0,82*3)
plt.ylim(1200,1800)
plt.ylabel('Regressed Elo (30 G)')
plt.tight_layout()
plt.legend(loc=8,ncol=3,borderaxespad=0,bbox_to_anchor=(0,1.0,1,0),frameon=True,fontsize=10)
#plt.savefig('elo4.png',edgecolor='w',facecolor='w')
teams = {k:i for i,k in enumerate(sorted([_ for _ in team_year_elo if 2015 in team_year_elo[_]]))}
rate,hfa = generate_ratings(dataset,teams,0)
coeffs = rate
col_names = sorted(dataset.keys())
v = np.argsort(abs(coeffs))[::-1]
coeffs2 = [(coeffs[i2],col_names[i2]) for i2 in v]
print(hfa*173.0)
print('| Variable | Coeff |')
print('|----------|-------|')
for v,n in sorted(coeffs2,reverse=True):
print('|{:25s}|{:.2f}|'.format(n, v))
df.iloc[-4:]
if False:
import autograd.numpy as np
import autograd
def generate_ratings2(dset,teams,weight_recent = 0,HFA=75/173):
N = len(teams)
X = []
Y = []
for t1,games in dset.items():
for t2,tW in games:
v = np.zeros(N)
v[teams[t1]] = 1
v[teams[t2]] = -1
#v[-1] = 70/173
Y.append(tW)
X.append(v)
try:
w = np.ones_like(Y)
if weight_recent != 0:
w = [np.exp(-(i*i/weight_recent)) for i in range(len(Y))]
w = np.array(w)[::-1]
w /= w.mean()
X = np.array(X).astype(np.float)
Y = np.array(Y).astype(np.float)
W = 1e-2*np.random.normal(size=(X.shape[1]))
def loss_f(xt):
p = 1/(1+np.exp(- (X @ xt + HFA)))
loss = np.mean(w*(-Y * np.log(p) - (1-Y) *np.log(1-p+1e-12)))
return loss
GRAD = autograd.grad(loss_f)
alpha = 3e-1
v = np.zeros_like(W)
for i in range(1000):
loss = loss_f(W)
v = 0.9*v - GRAD(W)
W += (alpha) * v
#if (i%1000== 0):
# break
if (np.linalg.norm(v) < 1e-2):
print('early')
break
if (i%50) == 0:
print(loss,)
return (W * 173 + 1500,HFA)
except:
return ([1500 for i in range(N)],0)
import scipy.optimize as opt
def generate_ratings2(dset,teams,weight_recent = 0,HFA=75/173):
N = len(teams)
X = []
Y = []
for t1,games in dset.items():
for t2,tW in games:
v = np.zeros(N)
v[teams[t1]] = 1
v[teams[t2]] = -1
#v[-1] = 70/173
Y.append(tW)
X.append(v)
try:
w = np.ones_like(Y)
if weight_recent != 0:
w = [np.exp(-(i*i/weight_recent)) for i in range(len(Y))]
w = np.array(w)[::-1]
w /= w.mean()
X = np.array(X).astype(np.float)
Y = np.array(Y).astype(np.float)
W = 1e-2*np.random.normal(size=(X.shape[1]))
def loss_f(xt):
V = X @ xt + HFA
loss = np.mean(w*(-Y * log_logit(V) - (1-Y) *log_logit(-V)))
return loss
res = opt.minimize(loss_f,W)
W = res.x
return (W * 173 + 1500,HFA)
except:
return ([1500 for i in range(N)],0)
teams = {k:i for i,k in enumerate(sorted([_ for _ in team_year_elo if 2015 in team_year_elo[_]]))}
rate,hfa = generate_ratings2(dataset,teams,0)
coeffs = rate
col_names = sorted(dataset.keys())
v = np.argsort(abs(coeffs))[::-1]
coeffs2 = [(coeffs[i2],col_names[i2]) for i2 in v]
print(hfa*173.0)
print('| Variable | Coeff |')
print('|----------|-------|')
for v,n in sorted(coeffs2,reverse=True):
print('|{:25s}|{:.2f}|'.format(n, v))
###Output
75.0
| Variable | Coeff |
|----------|-------|
|GSW |1780.67|
|CLE |1720.58|
|SAS |1675.20|
|LAC |1667.05|
|HOU |1664.00|
|ATL |1645.98|
|MEM |1644.10|
|OKC |1591.79|
|DAL |1569.49|
|POR |1559.28|
|CHI |1558.85|
|UTA |1542.78|
|NOP |1538.96|
|WAS |1521.82|
|BOS |1505.14|
|TOR |1500.75|
|IND |1491.56|
|MIL |1482.98|
|PHO |1475.21|
|BRK |1474.54|
|DET |1468.82|
|MIA |1452.55|
|CHO |1445.49|
|DEN |1377.89|
|SAC |1334.34|
|ORL |1297.18|
|PHI |1281.79|
|LAL |1275.94|
|NYK |1228.70|
|MIN |1224.77|
|
dqo/datasets/stats.ipynb | ###Markdown
Tree Data
###Code
def tree_density(n, h):
return (n - h)/(2 ** (h + 1) - h - 1)
def calc_query_stat(_df):
query_stats_df = pd.DataFrame()
for index, row in tqdm(_df.iterrows(), total=_df.shape[0]):
query = row['query'].strip()
try:
rel_tree = SQLParser.to_relational_tree(query)
copied = row.copy()
copied['tree'] = rel_tree
copied['nodes'] = len(rel_tree)
copied['relations'] = len(rel_tree.relations)
copied['projections'] = len(rel_tree.get_projections())
copied['selections'] = len(rel_tree.get_selections(include_joins=False))
copied['joins'] = len(rel_tree.get_joins())
copied['depth'] = rel_tree.depth()
copied['density'] = tree_density(copied['nodes'], copied['depth'])
query_stats_df = query_stats_df.append(copied)
except Exception as e:
print(e)
break
return query_stats_df
imdb_query_stats = calc_query_stat(df_imdb)
tpch_query_stats = calc_query_stat(df_tpch)
tpcds_query_stats = calc_query_stat(df_tpcds)
tpcd_query_stats = calc_query_stat(df_tpcd)
###Output
_____no_output_____
###Markdown
Relationalimdb_schema = ds_imdb.schema()
###Code
imdb_schema = ds_imdb.schema()
for i,r in tqdm(imdb_query_stats.iterrows(), total=len(imdb_query_stats)):
numbers = False
strings = False
for n in r['tree'].nodes():
t = str(type(n).__name__)
if t in ['JoinNode','SelectionNode']:
for operand in n.operands:
if type(operand).__name__ == 'RelationColumn':
col = imdb_schema[operand.relation.name][operand.column]
if col.data_type.value in ['number','float']:
numbers = True
elif col.data_type.value == 'string':
strings = True
imdb_query_stats.loc[i, 'only_strings'] = strings and not numbers
imdb_query_stats.loc[i, 'only_numbers'] = numbers and not strings
imdb_query_stats.loc[i, 'mixed'] = numbers and strings
only_strings = imdb_query_stats.query('only_strings == True')
only_numbers = imdb_query_stats.query('only_numbers == True')
only_strings.runtime.apply(np.log2).apply(np.round).apply(int).hist()
ds = QueriesDataset('imdb:only_strings')
ds.df = only_strings
ds.save(schema=imdb_schema)
only_numbers.runtime.apply(np.log2).apply(np.round).apply(int).hist()
ds = QueriesDataset('imdb:only_numbers')
ds.df = only_numbers
ds.save(schema=imdb_schema)
###Output
_____no_output_____
###Markdown
TPCD
###Code
tpcd_schema = ds_tpcd.schema()
for i,r in tqdm(tpcd_query_stats.iterrows(), total=len(tpcd_query_stats)):
numbers = False
strings = False
for n in r['tree'].nodes():
t = str(type(n).__name__)
if t in ['JoinNode','SelectionNode']:
for operand in n.operands:
if type(operand).__name__ == 'RelationColumn':
col = tpcd_schema[operand.relation.name][operand.column]
if col.data_type.value in ['number','float']:
numbers = True
elif col.data_type.value == 'string':
strings = True
tpcd_query_stats.loc[i, 'only_strings'] = strings and not numbers
tpcd_query_stats.loc[i, 'only_numbers'] = numbers and not strings
tpcd_query_stats.loc[i, 'mixed'] = numbers and strings
tpcd_only_strings = tpcd_query_stats.query('only_strings == True')
tpcd_only_numbers = tpcd_query_stats.query('only_numbers == True')
tpcd_only_strings.runtime.apply(np.log2).apply(np.round).apply(int).hist()
tpcd_only_numbers.runtime.apply(np.log2).apply(np.round).apply(int).hist()
ds = QueriesDataset('tpcd:only_strings')
ds.df = tpcd_only_strings
ds.save(schema=tpcd_schema)
ds = QueriesDataset('tpcd:only_numbers')
ds.df = tpcd_only_numbers
ds.save(schema=tpcd_schema)
###Output
_____no_output_____
###Markdown
DEPTH & NODES
###Code
imdb_query_stats.depth.describe(), tpch_query_stats.depth.describe(), tpcds_query_stats.depth.describe(), tpcd_query_stats.depth.describe()
len(tpcd_query_stats.query('nodes < 125'))
# import os
# ds_tpcd.df.to_csv(os.path.join(ds_tpcd.input_path, 'part_00.csv'), header=False, index=False, columns=['query', 'runtime'])
imdb_query_stats.hist()
tpch_query_stats.hist()
tpcds_query_stats.hist()
tpcd_query_stats.hist()
###Output
_____no_output_____
###Markdown
Density
###Code
fig, axes = plt.subplots(1, 4)
bins = [-250,-200,-150,-100, -50, 0]
fig.suptitle("density")
axes[0].title.set_text('imdb')
axes[1].title.set_text('tpch')
axes[2].title.set_text('tpcds')
axes[3].title.set_text('tpcd')
imdb_query_stats.density.apply(np.log2).hist(bins=bins,ax=axes[0])
tpch_query_stats.density.apply(np.log2).hist(bins=bins, ax=axes[1])
tpcds_query_stats.density.apply(np.log2).hist(bins=bins, ax=axes[2])
tpcd_query_stats.density.apply(np.log2).hist(bins=bins, ax=axes[3])
###Output
_____no_output_____
###Markdown
Depth
###Code
fig, axes = plt.subplots(1, 4)
bins = [0, 50, 100, 150, 200]
fig.suptitle("depth")
axes[0].title.set_text('imdb')
axes[1].title.set_text('tpch')
axes[2].title.set_text('tpcds')
axes[3].title.set_text('tpcd')
imdb_query_stats.depth.hist(bins=bins,ax=axes[0])
tpch_query_stats.depth.hist(bins=bins, ax=axes[1])
tpcds_query_stats.depth.hist(bins=bins, ax=axes[2])
tpcd_query_stats.depth.hist(bins=bins, ax=axes[3])
###Output
_____no_output_____
###Markdown
Nodes
###Code
fig, axes = plt.subplots(1, 4)
bins = [0, 20, 40, 60, 80, 100, 120]
fig.suptitle("nodes")
axes[0].title.set_text('imdb')
axes[1].title.set_text('tpch')
axes[2].title.set_text('tpcds')
axes[3].title.set_text('tpcd')
imdb_query_stats.nodes.hist(bins=bins,ax=axes[0])
tpch_query_stats.nodes.hist(bins=bins, ax=axes[1])
tpcds_query_stats.nodes.hist(bins=bins, ax=axes[2])
tpcd_query_stats.nodes.hist(bins=bins, ax=axes[3])
###Output
_____no_output_____
###Markdown
--- Dataset Stats
###Code
imdb_schema = ds_imdb.schema()
tpch_schema = ds_tpch.schema()
tpcds_schema = ds_tpcds.schema()
tpcd_schema = ds_tpcs.schema()
def describe_schema(s):
stats = {}
stats['tables'] = len(s.tables)
stats['columns'] = len(s.columns)
dtypes = []
rows = []
distincts = []
nulls = []
cols_per_table = []
table_size = []
for t in s.tables:
cols_per_table.append(len(t.columns))
for i, col in enumerate(t.columns):
if i == 0:
rows.append(col.stats.total)
table_size.append(col.stats.total * len(t.columns))
distincts.append(col.stats.distinct / col.stats.total)
nulls.append(col.stats.nulls / col.stats.total)
dtypes.append(str(col.data_type))
c = Counter(dtypes)
stats['dtypes'] = [(v, f'{c[v] / len(dtypes) * 100.0:.2f}%', c[v]) for v in c]
stats['rows'] = np.histogram(np.log10(np.array(rows)), bins=5)
stats['distincts'] = scipy.stats.describe(np.array(distincts))
stats['nulls'] = scipy.stats.describe(np.array(nulls))
stats['cols_per_table'] = np.histogram((np.array(cols_per_table)), bins=5)
stats['table_size'] = np.histogram(np.log10(np.array(table_size)), bins=5)
return stats
from pprint import pprint
print('imdb: \n')
pprint(describe_schema(imdb_schema))
print('')
print('tpch: \n')
pprint(describe_schema(tpch_schema))
print('')
print('tpcds: \n')
pprint(describe_schema(tpcds_schema))
###Output
imdb:
{'cols_per_table': (array([8, 6, 3, 2, 2]),
array([ 2., 4., 6., 8., 10., 12.])),
'columns': 108,
'distincts': DescribeResult(nobs=108, minmax=(0.0, 1.0), mean=0.4143007662470856, variance=0.2074871588778503, skewness=0.3766570715988044, kurtosis=-1.7483771943052444),
'dtypes': [('DataType.NUMBER', '54.63%', 59),
('DataType.STRING', '45.37%', 49)],
'nulls': DescribeResult(nobs=108, minmax=(0.0, 1.0), mean=0.19641912711343756, variance=0.12737557397561525, skewness=1.5327831580385185, kurtosis=0.6401177469580346),
'rows': (array([5, 1, 1, 6, 8]),
array([0.60205999, 1.99349604, 3.38493209, 4.77636814, 6.16780419,
7.55924024])),
'table_size': (array([6, 0, 1, 6, 8]),
array([0.90308999, 2.40333965, 3.90358931, 5.40383897, 6.90408862,
8.40433828])),
'tables': 21}
tpch:
{'cols_per_table': (array([3, 2, 2, 0, 1]),
array([ 3. , 5.6, 8.2, 10.8, 13.4, 16. ])),
'columns': 61,
'distincts': DescribeResult(nobs=61, minmax=(4.5211441478649234e-07, 1.0), mean=0.42496627631600975, variance=0.22097731565310086, skewness=0.35011577808030864, kurtosis=-1.8034580748060471),
'dtypes': [('DataType.NUMBER', '32.79%', 20),
('DataType.STRING', '47.54%', 29),
('DataType.FLOAT', '13.11%', 8),
('DataType.TIME', '6.56%', 4)],
'nulls': DescribeResult(nobs=61, minmax=(0.0, 0.0), mean=0.0, variance=0.0, skewness=0.0, kurtosis=-3.0),
'rows': (array([2, 0, 1, 2, 3]),
array([0.69897 , 1.88833233, 3.07769466, 4.26705699, 5.45641931,
6.64578164])),
'table_size': (array([2, 0, 1, 2, 3]),
array([1.17609126, 2.51085333, 3.84561541, 5.18037748, 6.51513955,
7.84990162])),
'tables': 8}
tpcds:
{'cols_per_table': (array([7, 4, 3, 5, 5]),
array([ 3. , 9.2, 15.4, 21.6, 27.8, 34. ])),
'columns': 425,
'distincts': DescribeResult(nobs=425, minmax=(0.0, 1.0), mean=0.29583259551013413, variance=0.12007663747868011, skewness=0.9152223639198206, kurtosis=-0.6155131253247768),
'dtypes': [('DataType.NUMBER', '44.24%', 188),
('DataType.STRING', '34.35%', 146),
('DataType.FLOAT', '18.82%', 80),
('DataType.TIME', '2.59%', 11)],
'nulls': DescribeResult(nobs=425, minmax=(0.0, 1.0), mean=0.02963484734449726, variance=0.011003336341396682, skewness=6.46622982559804, kurtosis=46.51403629181248),
'rows': (array([8, 1, 3, 7, 5]),
array([0.69897 , 1.97314661, 3.24732321, 4.52149981, 5.79567642,
7.06985302])),
'table_size': (array([8, 1, 2, 7, 6]),
array([1.77815125, 2.98675725, 4.19536325, 5.40396924, 6.61257524,
7.82118124])),
'tables': 24}
###Markdown
Relational Data
###Code
def describe_relational(_df):
stats = {}
stats['tables'] = len(s.tables)
stats['columns'] = len(s.columns)
dtypes = []
rows = []
distincts = []
nulls = []
cols_per_table = []
table_size = []
for t in s.tables:
cols_per_table.append(len(t.columns))
for i, col in enumerate(t.columns):
if i == 0:
rows.append(col.stats.total)
table_size.append(col.stats.total * len(t.columns))
distincts.append(col.stats.distinct / col.stats.total)
nulls.append(col.stats.nulls / col.stats.total)
dtypes.append(str(col.data_type))
c = Counter(dtypes)
stats['dtypes'] = [(v, f'{c[v] / len(dtypes) * 100.0:.2f}%', c[v]) for v in c]
stats['rows'] = np.histogram(np.log10(np.array(rows)), bins=5)
stats['distincts'] = scipy.stats.describe(np.array(distincts))
stats['nulls'] = scipy.stats.describe(np.array(nulls))
stats['cols_per_table'] = np.histogram((np.array(cols_per_table)), bins=5)
stats['table_size'] = np.histogram(np.log10(np.array(table_size)), bins=5)
return stats
from pprint import pprint
print('imdb: \n')
pprint(describe_schema(imdb_df))
print('')
print('tpch: \n')
pprint(describe_schema(tpch_df))
print('')
print('tpcds: \n')
pprint(describe_schema(tpcds_df))
###Output
_____no_output_____ |
hackerRank/.ipynb_checkpoints/dataStructures easy-checkpoint.ipynb | ###Markdown
Arrays reverse array
###Code
array=[1,2,3,4]
array
array[::-1]
###Output
_____no_output_____
###Markdown
2d array-DS
###Code
array=[[1,1,1,0,0,0],[0,1,0,0,0,0],[1,1,1,0,0,0],[0,0,2,4,4,0],[0,0,0,2,0,0],[0,0,1,2,4,0]]
array
###Output
_____no_output_____
###Markdown
first hourglass
###Code
array[0][:3]
array[1][1]
array[2][:3]
###Output
_____no_output_____
###Markdown
second hourglass
###Code
array[0][1:4]
array[1][2]
array[2][1:4]
print(max([sum(array[i-1][j-1:j+2] + [array[i][j]] + array[i+1][j-1:j+2]) for j in range(1, 5) for i in range(1, 5)]))
temp=[]
for i in range(1,5):
for j in range(1,5):
print(array[i-1][j-1:j+2])
print(array[i][j])
print(array[i+1][j-1:j+2])
temp=[]
for i in range(1,5):
for j in range(1,5):
#sum(array[i-1][j-1:j+2]) + sum(array[i][j]) + sum(array[i+1][j-1:j+2])
top=sum(array[i-1][j-1:j+2])
middle=array[i][j]
bottom=sum(array[i+1][j-1:j+2])
temp.append(top+middle+bottom)
max(temp)
###Output
_____no_output_____
###Markdown
rotation left array
###Code
x=list(range(1,6))
x
x_r=x[4:]+x[:4]
x_r
###Output
_____no_output_____
###Markdown
linked lists print linked list
###Code
class LinkedList(object):
def __init__(self,value,next=None):
self.value=value
self.next=next
nodo1=LinkedList(13)
nodo2= LinkedList(16,nodo1)
head=nodo2
def printLinkedList(head):
current=head
while current:
print(current.value)
current=current.next
printLinkedList(head)
def append_linked_list(head,data):
current=head
if head:
while current.next:
current=current.next
current.next=LinkedList(data)
else:
head=LinkedList(data)
return head
append_linked_list(head,15)
printLinkedList(head)
###Output
16
13
15
###Markdown
insert element at the begging of a linked list
###Code
def insert_begin_linked_list(head,data):
nodo=LinkedList(data)
if head:
nodo.next=head
head= nodo
else:
head=nodo
return head
head=insert_begin_linked_list(head,2)
printLinkedList(head)
###Output
2
16
13
15
###Markdown
insert element on any position
###Code
def insertNodeAtPosition(head, data, position):
counter=1
nodo=LinkedList(data)
current=head
if position>1:
while current and counter<position:
if counter==position-1:
nodo.next=current.next
current.next=nodo
current=current.next
counter+=1
elif position==1:
nodo.next=head
head=nodo
return head
head=insertNodeAtPosition(head,78,3)
printLinkedList(head)
head=insertNodeAtPosition(head,78,1)
printLinkedList(head)
head2=head
printLinkedList(head2)
###Output
78
2
16
78
13
15
###Markdown
delete node
###Code
def deleteNode(head,position):
current=head
prev_node=None
for i in range(position+1):
if prev_node==None:
prev_node=head
if i==position:
prev_node.next=current.next
current=current.next
else:
prev_node=current
current=current.next
return head
def deleteNode2(head,position):
if position==0:
head=head.next
else:
current=head
prev_node=head
for i in range(position+1):
if i==position:
prev_node.next=current.next
current=current.next
else:
prev_node=current
current=current.next
return head
printLinkedList(head)
deleteNode2(head,1)
printLinkedList(head)
resultado=deleteNode2(head,0)
printLinkedList(resultado)
head=head.next
printLinkedList(head)
###Output
16
78
13
15
###Markdown
reverse a linked list
###Code
n1=LinkedList(5,None)
n2=LinkedList(4,n1)
n3=LinkedList(3,n2)
n4=LinkedList(2,n3)
n5=LinkedList(1,n4)
head=n5
printLinkedList(head)
def reverseLinkedList(head):
temp=None
current=head
while current:
temp=LinkedList(current.value,temp)
current=current.next
return temp
reverse=reverseLinkedList(head)
printLinkedList(reverse)
# hacker rank solution
def reverse_(head):
current = head
stack =[]
while current:
stack.append(current.data)
current = current.next
s = stack[::-1]
nlist = SinglyLinkedList()
for el in s:
nlist.insert_node(el)
return nlist.head
printLinkedList(head)
###Output
1
2
3
4
5
###Markdown
print reverse
###Code
def print_reverse_ll(head):
current=head
stack=[]
while current:
stack.append(current.value)
current=current.next
s=stack[::-1]
for i in s:
print(i)
print_reverse_ll(head)
## hacker rank solution
def reversedPrint(head):
if head is None:
return None
else:
stack = []
while head:
stack.append(head.value)
head = head.next
while stack:
print(stack.pop())
reversedPrint(head)
###Output
5
4
3
2
1
###Markdown
obtener k nodo desde el ultimo elemento
###Code
## hacker rank solution
def getNode(head, positionFromTail):
tracked = head
while positionFromTail > 0:
head = head.next
positionFromTail -= 1
while head.next:
head = head.next
tracked = tracked.next
return tracked.value
def getNodeEmma(head,positionFromTail):
fast=head
slow=head
for i in range(positionFromTail):
fast=fast.next
while fast.next:
fast=fast.next
slow=slow.next
return slow.value
###Output
_____no_output_____
###Markdown
comparar dos listas enlazadas
###Code
def listToArray(head):
if head==None:
return None
current=head
stack=[]
while current:
stack.append(current.value)
current=current.next
return stack
def compararListas(lista1,lista2):
linkedList1=listToArray(lista1)
linkedList2=listToArray(lista2)
if len(linkedList1)!=len(linkedList2) :
return False
else:
return (linkedList1==linkedList2)
printLinkedList(head)
printLinkedList(reverse)
compararListas(head,reverse)
l1=[5, 4, 3, 2, 1]
l2=[1,2,3,4,5]
l1 ==l2
def compararListas2(lista1,lista2):
currentA = lista1
currentB = lista2
while currentA and currentB:
if currentA.value != currentB.value:
return False
currentA = currentA.next
currentB = currentB.next
return True if currentA == currentB else False
compararListas2(head,head)
compararListas2(head,reverse)
n01=LinkedList(5,None)
n02=LinkedList(4,n01)
n04=LinkedList(2,n02)
n05=LinkedList(1,n04)
lista3=n05
printLinkedList(lista3)
compararListas2(head,lista3)
compararListas(head,head)
###Output
_____no_output_____
###Markdown
merge two sorted linked lists
###Code
def MergeLists(head1,head2):
if head1 is None:
return head2
elif head2 is None:
return head1
if head1.value<=head2.value:
result=head1
result.next=MergeLists(head1.next,head2)
else:
result=head2
result.next=MergeLists(head1,head2.next)
return result
## hacker ranks solution because iterative run out of time
def MergeLists(head1, head2):
if not head1 or not head2:
return head1 or head2
head, head1, head2 = (head1, head1.next, head2) if min([head1.data, head2.data]) == head1.data else (head2, head1, head2.next)
curr = head
while head1 or head2:
if not head1 or not head2:
curr.next = head1 or head2
return head
curr.next, head1, head2 = (head1, head1.next, head2) if min([head1.data, head2.data]) == head1.data else (head2, head1, head2.next)
curr = curr.next
return head
###Output
_____no_output_____
###Markdown
delete duplicate value nodes from sorted linked list
###Code
def deleteDuplicate(head):
current=head
while current:
if current.value==current.next.value:
current.next=current.next.next
else:
current=current.next
return head
###Output
_____no_output_____
###Markdown
find node to merge two linked lists def findMergeNode(head1, head2): current1=head1 current2=head2 while not current1==current2: if current1.next is None: current1=head2 else: current1=current1.next if current2.next is None: current2=head1 else: current2=current2.next return current1.value insert element in sorted double linked list
###Code
class DoubleNodo(object):
def __init__(self,value,prev=None,next=None):
self.value=value
self.prev=prev
self.next=next
nod3=DoubleNodo(10)
nod2=DoubleNodo(4)
nod1=DoubleNodo(3)
nod0=DoubleNodo(1)
nod0.next=nod1
nod1.next=nod2
nod1.prev=nod0
nod2.next=nod3
nod2.prev=nod1
nod3.prev=nod2
head_double=nod0
printLinkedList(head_double)
def insertNodeDouble(head, data):
new_node = DoubleNodo(data)
# in the unlikely event new node data < 0?
if new_node.value < head.value:
new_node.next = head
head.prev = new_node
return new_node
else:
current = head
current_next = head.next
while current_next and (new_node.value > current_next.value):
current = current_next
current_next = current_next.next
if current_next == None:
current.next = new_node
new_node.prev = current
else:
current.next = new_node
current_next.prev = new_node
new_node.prev = current
new_node.next = current_next
current = head
while current:
current = current.next
return head
insertNodeDouble(head_double,5)
printLinkedList(head_double)
def reverseDoubleLinkedList(head):
temp=None
current=head
while current:
temp=DoubleNodo(current.value,current,temp)
current=current.next
return temp
reverse_head=reverseDoubleLinkedList(head_double)
printLinkedList(reverse_head)
def ReverseLL(head):
if not head:
return head
head.next, head.prev = head.prev, head.next
if not head.prev:
return head
return ReverseLL(head.prev)
hacker=ReverseLL(head_double)
printLinkedList(hacker)
###Output
10
5
4
3
1
###Markdown
trees
###Code
class treeNode(object):
def __init__(self,value,right=None,left=None):
self.value=value
self.right=right
self.left=left
hoja7=treeNode(7)
hoja6=treeNode(6)
hoja5=treeNode(4)
hoja4=treeNode(1)
hoja3=treeNode(5)
hoja2=treeNode(2)
hoja1=treeNode(3)
hoja1.right=hoja2
hoja1.left=hoja3
hoja2.right=hoja4
hoja3.right=hoja5
hoja3.left=hoja6
hoja6.left=hoja7
root=hoja1
def preOrder(root,nodos=[]):
nodos.append(root.value)
if root and root.left:
preOrder(root.left,nodos)
if root and root.right:
preOrder(root.right,nodos)
return nodos
print(preOrder(root))
def inOrder(root,nodos=[]):
if root and root.left:
inOrder(root.left,nodos)
nodos.append(root.value)
if root and root.right:
inOrder(root.right,nodos)
return nodos
print(inOrder(root))
def postOrder(root,nodos=[]):
if root and root.left:
postOrder(root.left,nodos)
if root and root.right:
postOrder(root.right,nodos)
nodos.append(root.value)
return nodos
print(postOrder(root,nodos=[]))
###Output
[7, 6, 4, 5, 1, 2, 3]
###Markdown
height of tree
###Code
def heightTree(root):
if root is None:
return -1
else:
left_height=heightTree(root.left)
right_height=heightTree(root.right)
if left_height>right_height:
return left_height+1
else:
return right_height+1
heightTree(root)
###Output
_____no_output_____
###Markdown
tree level order traversal
###Code
# esto breadth first search
def bfs(root):
queue=[]
queue.append(root)
while len(queue)>0:
node=queue.pop(0)
print(node.value,end=' ')
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
bfs(root)
###Output
3 5 2 6 4 1 7
###Markdown
Top vieweste problema no le he entendido, es una solucion que usa diccionarios (O(n^2))
###Code
def topView(root):
# Initialize the level
this_level = [(root, 0)]
scores = {}
while this_level:
# Basically iterate over the nodes on a single level
for _ in range(len(this_level)):
node, score = this_level.pop(0)
# Skip empty nodes
if not node:
continue
# Store the score if it's a new one!
if score not in scores:
scores[score] = node.value
# Add the node children to the next level
this_level.extend(
[(node.left, score - 1),
(node.right, score + 1)])
# Sort the scores and print their values
# (By default the sort is on the tuple first element: the score)
for _, value in sorted(list(scores.items())):
print(value, end=' ')
topView(root)
###Output
7 6 5 3 2 1
###Markdown
insert node in a binary Tree
###Code
head_bin_1=treeNode(4)
head_bin_2=treeNode(2)
head_bin_3=treeNode(7)
head_bin_4=treeNode(1)
head_bin_5=treeNode(3)
head_bin_1.left=head_bin_2
head_bin_1.right=head_bin_3
head_bin_2.left=head_bin_4
head_bin_2.right=head_bin_5
root_bin=head_bin_1
print(inOrder(root_bin,nodos=[]))
def insertBin(root,value):
if root is None:
root=treeNode(value)
if value<root.value:
if root.left is None:
root.left=treeNode(value)
else:
insertBin(root.left,value)
else:
if root.right is None:
root.right=treeNode(value)
else:
insertBin(root.right,value)
return root
root_bin2=insertBin(root_bin,10)
print(preOrder(root_bin2,nodos=[]))
#### hacker rank
class Noderank:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
def preOrder2(root):
if root == None:
return
print (root.info, end=" ")
preOrder(root.left)
preOrder(root.right)
class BinarySearchTree:
def __init__(self):
self.root = None
#Node is defined as
#self.left (the left child of the node)
#self.right (the right child of the node)
#self.info (the value of the node)
def insert(self, val):
if self.root is None:
self.root=Noderank(val)
else:
current=self.root
if val<current.info:
if current.left is None:
current.left=Noderank(val)
else:
current.left.insert(val)
else:
if current.right is None:
current.right=Noderank(val)
else:
current.right.insert(val)
# if self.root == None:
# self.root = Noderank(val)
# else:
# current = self.root
#
# while True:
# if val < current.info:
# if current.left:
# current = current.left
# else:
# current.left = Noderank(val)
# break
# elif val > current.info:
# if current.right:
# current = current.right
# else:
# current.right = Noderank(val)
# break
# else:
# break
array=[1,2,3,4,7]
###Output
_____no_output_____
###Markdown
first common ancestor
###Code
def lca(root, v1, v2):
seek = root
while seek:
n = seek.value
if v1 > n and v2 > n: seek = seek.right
elif v1 < n and v2 < n: seek = seek.left
else: return(seek)
###Output
_____no_output_____
###Markdown
Dynamic Array
###Code
def dynamicArray(n, queries):
lastNumber = 0
seqList=[];
for i in range(n):
seqList.append([])
res = [];
for k, x, y in queries:
index = (x^lastNumber)%n
if k==1:
seqList[index].append(y)
else:
size = len(seqList[index])
lastNumber = seqList[index][y%size]
res.append(lastNumber)
return res
## no lo he entendido
###Output
_____no_output_____
###Markdown
Stacks
###Code
10
1 97
2
1 20
2
1 26
1 20
2
3
1 91
3
## hackerrank solution
#python 3
items = [0]
for i in range(int(input())):
nums = list(map(int, input().split()))
if nums[0] == 1:
items.append(max(nums[1], items[-1]))
elif nums[0] == 2:
items.pop()
else:
print(items[-1])
items=[]
for i in range(int(input())):
nums=list(map(int,input().split()))
if nums[0]==1:
items.append(nums[1])
elif nums[0]==2:
items.pop()
elif nums[0]==3:
print(max(items))
from collections import deque
items=deque
for i in range(int(input())):
nums=list(map(int,input().split()))
if nums[0]==1:
items.append(nums[1])
elif nums[0]==2:
items.pop()
elif nums[0]==3:
print(max(items))
###Output
_____no_output_____
###Markdown
Equal stacks
###Code
stack1=[3, 2, 1, 1, 1]
stack2=[4, 3, 2]
stack3=[1, 1, 4, 1]
stack4=[3, 2, 1, 1, 1]
from collections import deque
stack1_v2=deque()
stack1_v2=stack1
print(stack1_v2)
print(sum(stack1_v2))
stack1_v2.pop(0)
print(stack1_v2)
print(sum(stack1_v2))
# esta solucion es la mas lenta por lo de volver a calcular la suma
def equalStacks(stack1,stack2,stack3):
sum1=sum(stack1)
sum2=sum(stack2)
sum3=sum(stack3)
n1=len(stack1)
n2=len(stack2)
n3=len(stack3)
top1,top2,top3=0,0,0
while(1):
if (top1==n1 and top2==n2 and top3==n3):
return 0
if (sum1==sum2 and sum2==sum3):
return sum1
if (sum1>=sum2 and sum1>=sum3):
stack1.pop(0)
sum1=sum(stack1)
top1+=1
elif (sum2>=sum3):
stack2.pop(0)
sum2=sum(stack2)
top2+=1
elif (sum3>=sum2 and sum3>=sum1):
stack3.pop(0)
sum3=sum(stack3)
top3+=1
equalStacks(stack1,stack2,stack3)
## geeks for geeks solution
def equalStacks(h1, h2, h3):
#
# Write your code here.
#
sum1=sum(h1)
sum2=sum(h2)
sum3=sum(h3)
n1=len(h1)
n2=len(h2)
n3=len(h3)
top1, top2, top3 = 0, 0, 0
while(1):
if(top1==n1 and top2==n2 and top3==n3):
return 0
if(sum1==sum2 and sum2==sum3):
return sum1
if (sum1 >= sum2 and sum1 >= sum3):
sum1-=h2[top1]
top1+=1
elif (sum2 >= sum3):
sum2 -= h2[top2]
top2+=1
elif (sum3 >= sum2 and sum3 >= sum1):
sum3 -= h3[top3]
top3+=1
###Output
_____no_output_____ |
lvBERT/cleaned_lowercase/cleaned_lowercase.ipynb | ###Markdown
Get data
###Code
df = pd.read_csv('./../../../labeledTweets/allLabeledTweets.csv')
df = df[['id', 'message', 'label']]
df = df.drop_duplicates()
print(df.shape[0])
df.head()
df['label'].value_counts()
newLine ="\\n|\\r"
urls = '(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,})'
numbers = '\d+((\.|\-)\d+)?'
mentions = '\B\@([\w\-]+)'
hashtag = '#'
whitespaces = '\s+'
leadTrailWhitespace = '^\s+|\s+?$'
df['clean_message'] = df['message']
df['clean_message'] = df['clean_message'].str.replace(newLine,' ',regex=True)
df['clean_message'] = df['clean_message'].str.replace(urls,' URL ',regex=True)
df['clean_message'] = df['clean_message'].str.replace(mentions,' MENTION ',regex=True)
df['clean_message'] = df['clean_message'].str.replace(numbers,' NMBR ',regex=True)
df['clean_message'] = df['clean_message'].str.replace(hashtag,' ',regex=True)
df['clean_message'] = df['clean_message'].str.replace(whitespaces,' ',regex=True)
df['clean_message'] = df['clean_message'].str.replace(leadTrailWhitespace,'',regex=True)
df.head()
###Output
_____no_output_____
###Markdown
Train, validate split (balanced)
###Code
df_0 = df[df['label']==0]
df_1 = df[df['label']==1]
df_2 = df[df['label']==2]
trainLabelSize = round(df_1.shape[0]*0.85)
trainLabelSize
df_0 = df_0.sample(trainLabelSize, random_state=42)
df_1 = df_1.sample(trainLabelSize, random_state=42)
df_2 = df_2.sample(trainLabelSize, random_state=42)
df_train = pd.concat([df_0, df_1, df_2])
# Shuffle rows
df_train = sklearn.utils.shuffle(df_train, random_state=42)
df_train['label'].value_counts()
df_val = df.merge(df_train, on=['id', 'message', 'label', 'clean_message'], how='left', indicator=True)
df_val = df_val[df_val['_merge']=='left_only']
df_val = df_val[['id', 'message', 'label', 'clean_message']]
df_val['label'].value_counts()
###Output
_____no_output_____
###Markdown
Tokenizer "lvBERT"
###Code
tokenizer = BertTokenizer.from_pretrained('./../lvbert_pytorch/', do_lower_case=True)
###Output
_____no_output_____
###Markdown
Find max length for tokenizer
###Code
token_lens = []
for txt in list(df.clean_message.values):
tokens = tokenizer.encode(txt, max_length=512, truncation=True)
token_lens.append(len(tokens))
max_length = max(token_lens)
max_length
###Output
_____no_output_____
###Markdown
Encode messages
###Code
encoded_data_train = tokenizer.batch_encode_plus(
df_train["clean_message"].values,
add_special_tokens=True,
return_attention_mask=True,
padding='max_length',
truncation=True,
max_length=max_length,
return_tensors='pt'
)
encoded_data_val = tokenizer.batch_encode_plus(
df_val["clean_message"].values,
add_special_tokens=True,
return_attention_mask=True,
padding='max_length',
truncation=True,
max_length=max_length,
return_tensors='pt'
)
input_ids_train = encoded_data_train['input_ids']
attention_masks_train = encoded_data_train['attention_mask']
labels_train = torch.tensor(df_train.label.values)
input_ids_val = encoded_data_val['input_ids']
attention_masks_val = encoded_data_val['attention_mask']
labels_val = torch.tensor(df_val.label.values)
dataset_train = TensorDataset(input_ids_train, attention_masks_train, labels_train)
dataset_val = TensorDataset(input_ids_val, attention_masks_val, labels_val)
len(dataset_train), len(dataset_val)
###Output
_____no_output_____
###Markdown
Model "bert-base-multilingual-cased"
###Code
model = BertForSequenceClassification.from_pretrained('./../lvbert_pytorch/',
num_labels=3,
output_attentions=False,
output_hidden_states=False)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
batch_size = 32
dataloader_train = DataLoader(dataset_train, sampler=RandomSampler(dataset_train), batch_size=batch_size)
dataloader_validation = DataLoader(dataset_val, sampler=SequentialSampler(dataset_val), batch_size=batch_size)
from transformers import get_linear_schedule_with_warmup
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5, eps=1e-8)
# optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
epochs = 5
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=len(dataloader_train)*epochs)
# Function to measure weighted F1
from sklearn.metrics import f1_score
def f1_score_func(preds, labels):
preds_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return f1_score(labels_flat, preds_flat, average='weighted')
import random
seed_val = 17
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
model.to(device)
print(device)
# Function to evaluate model. Returns average validation loss, predictions, true values
def evaluate(dataloader_val):
model.eval()
loss_val_total = 0
predictions, true_vals = [], []
progress_bar = tqdm(dataloader_val, desc='Validating:', leave=False, disable=False)
for batch in progress_bar:
batch = tuple(b.to(device) for b in batch)
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[2]}
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
logits = outputs[1]
loss_val_total += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = inputs['labels'].cpu().numpy()
predictions.append(logits)
true_vals.append(label_ids)
loss_val_avg = loss_val_total/len(dataloader_val)
predictions = np.concatenate(predictions, axis=0)
true_vals = np.concatenate(true_vals, axis=0)
return loss_val_avg, predictions, true_vals
###Output
_____no_output_____
###Markdown
Evaluate untrained model
###Code
_, predictions, true_vals = evaluate(dataloader_validation)
from sklearn.metrics import classification_report, confusion_matrix
preds_flat = np.argmax(predictions, axis=1).flatten()
print(classification_report(true_vals, preds_flat))
print(f1_score_func(predictions, true_vals))
pd.DataFrame(confusion_matrix(true_vals, preds_flat),
index = [['actual', 'actual', 'actual'], ['neutral', 'positive', 'negative']],
columns = [['predicted', 'predicted', 'predicted'], ['neutral', 'positive', 'negative']])
###Output
_____no_output_____
###Markdown
Train
###Code
for epoch in tqdm(range(1, epochs+1)):
model.train()
loss_train_total = 0
progress_bar = tqdm(dataloader_train, desc='Epoch {:1d}'.format(epoch), leave=False, disable=False)
for batch in progress_bar:
model.zero_grad()
batch = tuple(b.to(device) for b in batch)
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[2]}
outputs = model(**inputs)
loss = outputs[0]
loss_train_total += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
progress_bar.set_postfix({'training_loss': '{:.3f}'.format(loss.item()/len(batch))})
torch.save(model.state_dict(), f'modelsCleaned/finetuned_lvBERT_epoch_{epoch}.model')
tqdm.write(f'\nEpoch {epoch}')
loss_train_avg = loss_train_total/len(dataloader_train)
tqdm.write(f'Training loss: {loss_train_avg}')
val_loss, predictions, true_vals = evaluate(dataloader_validation)
val_f1 = f1_score_func(predictions, true_vals)
tqdm.write(f'Validation loss: {val_loss}')
tqdm.write(f'F1 Score (Weighted): {val_f1}')
preds_flat = np.argmax(predictions, axis=1).flatten()
print('Classification report:')
print(classification_report(true_vals, preds_flat))
print('Confusion matrix:')
print(pd.DataFrame(confusion_matrix(true_vals, preds_flat),
index = [['actual', 'actual', 'actual'], ['neutral', 'positive', 'negative']],
columns = [['predicted', 'predicted', 'predicted'], ['neutral', 'positive', 'negative']]))
###Output
_____no_output_____
###Markdown
Evaluate best model
###Code
model.load_state_dict(torch.load('modelsBase/finetuned_BERT_epoch_X.model', map_location=torch.device('cpu')))
_, predictions, true_vals = evaluate(dataloader_validation)
preds_flat = np.argmax(predictions, axis=1).flatten()
print(f1_score_func(predictions, true_vals))
print(classification_report(true_vals, preds_flat))
pd.DataFrame(confusion_matrix(true_vals, preds_flat),
index = [['actual', 'actual', 'actual'], ['neutral', 'positive', 'negative']],
columns = [['predicted', 'predicted', 'predicted'], ['neutral', 'positive', 'negative']])
###Output
_____no_output_____ |
notebooks/python/L03_image_classification_with_cnn.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Lab 03: Image Classification with Convolutional Neural Networks Run in Google Colab View source on GitHub In this tutorial, we'll build and train a neural network to classify images of clothing, like sneakers and shirts. Install and import dependenciesWe'll need [TensorFlow Datasets](https://www.tensorflow.org/datasets/), an API that simplifies downloading and accessing datasets, and provides several sample datasets to work with. We're also using a few helper libraries.
###Code
import tensorflow as tf
# Import TensorFlow Datasets
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# Helper libraries
import math
import numpy as np
import matplotlib.pyplot as plt
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
###Output
_____no_output_____
###Markdown
Import the Fashion MNIST dataset This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset, which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 $\times$ 28 pixels), as seen here: <img src="https://tensorflow.org/images/fashion-mnist-sprite.png" alt="Fashion MNIST sprite" width="600"> Figure 1. Fashion-MNIST samples (by Zalando, MIT License). Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow, using the [Datasets](https://www.tensorflow.org/datasets) API:
###Code
dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
###Output
_____no_output_____
###Markdown
Loading the dataset returns metadata as well as a *training dataset* and *test dataset*.* The model is trained using `train_dataset`.* The model is tested against `test_dataset`.The images are 28 $\times$ 28 arrays, with pixel values in the range `[0, 255]`. The *labels* are an array of integers, in the range `[0, 9]`. These correspond to the *class* of clothing the image represents: Label Class 0 T-shirt/top 1 Trouser 2 Pullover 3 Dress 4 Coat 5 Sandal 6 Shirt 7 Sneaker 8 Bag 9 Ankle boot Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images:
###Code
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
_____no_output_____
###Markdown
Explore the dataLet's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, and 10000 images in the test set:
###Code
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples: {}".format(num_test_examples))
###Output
_____no_output_____
###Markdown
Preprocess the dataThe value of each pixel in the image data is an integer in the range `[0,255]`. For the model to work properly, these values need to be normalized to the range `[0,1]`. So here we create a normalization function, and then apply it to each image in the test and train datasets.
###Code
def normalize(images, labels):
images = tf.cast(images, tf.float32)
images /= 255
return images, labels
# The map function applies the normalize function to each element in the train
# and test datasets
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)
# The first time you use the dataset, the images will be loaded from disk
# Caching will keep them in memory, making training faster
train_dataset = train_dataset.cache()
test_dataset = test_dataset.cache()
###Output
_____no_output_____
###Markdown
Explore the processed dataLet's plot an image to see what it looks like.
###Code
# Take a single image, and remove the color dimension by reshaping
for image, label in test_dataset.take(1):
break
image = image.numpy().reshape((28,28))
# Plot the image - voila a piece of fashion clothing
plt.figure()
plt.imshow(image, cmap=plt.cm.binary)
plt.colorbar()
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
###Code
plt.figure(figsize=(10,10))
i = 0
for (image, label) in test_dataset.take(25):
image = image.numpy().reshape((28,28))
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap=plt.cm.binary)
plt.xlabel(class_names[label])
i += 1
plt.show()
###Output
_____no_output_____
###Markdown
Build the modelBuilding the neural network requires configuring the layers of the model, then compiling the model. Exercise 3.1 Setup the layersThe basic building block of a neural network is the *layer*. A layer extracts a representation from the data fed into it. Hopefully, a series of connected layers results in a representation that is meaningful for the problem at hand.Much of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have internal parameters which are adjusted ("learned") during training.For this exercise, we'll be using two new layers, the Convolution layer (`tf.keras.layers.Conv2D`) and the Max Pooling layer (`tf.keras.layers.MaxPool2D`). Refer to the slides and official documentation on how to use these layers:* [tf.keras.layers.Conv2D reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D)* [tf.keras.layers.MaxPool2D reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D)**Our network layers are:*** 2D Convolution layer - 32 filters, 3x3 kernel, ReLU activation, padding with same values* Max pooling layer - 2x2 kernel, 2 stride* 2D Convolution layer - 64 filters, 3x3 kernel, ReLU activation, padding with same values* Max pooling layer - 2x2 kernel, 2 stride* Flatten layer* Dense layer - 128 nodes output, ReLU activation* Dense layer - 10 nodes output
###Code
model = tf.keras.Sequential([
#TODO - Add model layers as described above
])
###Output
_____no_output_____
###Markdown
Exercise 3.1 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.1.ipynb) Exercise 3.2 Compile the model with `Model.compile`Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:**Compile the model below with the following settings*** *Loss function* — SparseCategoricalCrossentropy* *Optimizer* — Adam* *Metrics* — accuracyRefer to the [official documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Modelcompile) if you've forgotten the function.
###Code
# TODO - Compile the model
###Output
_____no_output_____
###Markdown
Exercise 3.2 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.2.ipynb) Exercise 3.3 Train the model with `Model.fit`First, we define the iteration behavior for the train dataset:1. Repeat forever by specifying `dataset.repeat()` (the `epochs` parameter described below limits how long we perform training).2. The `dataset.shuffle(60000)` randomizes the order so our model cannot learn anything from the order of the examples.3. And `dataset.batch(32)` tells `model.fit` to use batches of 32 images and labels when updating the model variables.Training is performed by calling the `model.fit` method:1. Feed the training data to the model using `train_dataset`.2. The model learns to associate images and labels.3. The `epochs=5` parameter limits training to 5 full iterations of the training dataset, so a total of 5 * 60000 = 300000 examples.
###Code
BATCH_SIZE = 32
train_dataset = train_dataset.cache().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.cache().batch(BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Start training the model in the code box below for **10 epochs**.Refer to the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model) if you've forgotten the function.
###Code
# TODO - Train the model
###Output
_____no_output_____
###Markdown
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.97 (or 97%) on the training data. Exercise 3.3 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.3.ipynb) Exercise 3.4 Evaluate accuracy with `Model.evaluate`Next, compare how the model performs on the test dataset. Use all examples we have in the test dataset to assess accuracy.Refer to the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model) on how to use the function.
###Code
# TODO - Evaluate the model
###Output
_____no_output_____
###Markdown
As it turns out, the accuracy on the test dataset is smaller than the accuracy on the training dataset. This is completely normal, since the model was trained on the `train_dataset`. When the model sees images it has never seen during training, (that is, from the `test_dataset`), we can expect performance to go down. Exercise 3.4 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.4.ipynb) Make predictions and exploreWith the model trained, we can use it to make predictions about some images.
###Code
for test_images, test_labels in test_dataset.take(1):
test_images = test_images.numpy()
test_labels = test_labels.numpy()
predictions = model.predict(test_images)
predictions.shape
###Output
_____no_output_____
###Markdown
Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
###Code
predictions[0]
###Output
_____no_output_____
###Markdown
A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value:
###Code
np.argmax(predictions[0])
###Output
_____no_output_____
###Markdown
So the model is usually most confident that this image is a Shirt, or `class_names[6]`. Let's check the label:
###Code
test_labels[0]
###Output
_____no_output_____
###Markdown
We can graph this to look at the full set of 10 class predictions
###Code
def plot_image(i, predictions_array, true_labels, images):
predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img[...,0], cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
###Output
_____no_output_____
###Markdown
Let's look at the 0th image, predictions, and prediction array.
###Code
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
###Output
_____no_output_____
###Markdown
Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident.
###Code
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
###Output
_____no_output_____
###Markdown
Finally, use the trained model to make a prediction about a single image.
###Code
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
###Output
_____no_output_____
###Markdown
`tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list:
###Code
# Add the image to a batch where it's the only member.
img = np.array([img])
print(img.shape)
###Output
_____no_output_____
###Markdown
Now predict the image:
###Code
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
###Output
_____no_output_____
###Markdown
`model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch:
###Code
np.argmax(predictions_single[0])
###Output
_____no_output_____
###Markdown
And, as before, the model predicts a label of 6 (shirt). Exercise 3.5Experiment with different models and see how the accuracy results differ. In particular change the following parameters:* Set training epochs set to 1* Number of neurons in the Dense layer following the Flatten one. For example, go really low (e.g. 10) in ranges up to 512 and see how accuracy changes* Add additional Dense layers between the Flatten and the final Dense(10), experiment with different units in these layers* Don't normalize the pixel values, and see the effect that has Exercise 3.6 - CIFAR-10 Dataset with CNNsLet's apply what we've learned to another dataset.The [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.As our input is a colour image, we have now 3 values per pixel. When flattened, our input array is is 3072 long ($32\times32\times3$). * What happens when you use the same network as above?* What is the best accuracy that you can achieve? Like in the previous lab, download, extract and load the dataset.The extracted folder `cifar-10-batches-py` contains (in Python's pickle format):* Training dataset: `data_batch_1 - 5` * Test dataset: `test_batch`* Dataset metadata: `batches.meta`
###Code
import os
import glob
# Download the data
_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
zip_dir = tf.keras.utils.get_file('cifar-10-python.tar.gz', origin=_URL, extract=True)
# Get the data and meta file names
data_dir = os.path.join(os.path.dirname(zip_dir), 'cifar-10-batches-py')
train_files = glob.glob(os.path.join(data_dir,"data_batch_*"))
test_file = os.path.join(data_dir,"test_batch")
meta_file = os.path.join(data_dir,"batches.meta")
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def build_dataset(files):
x = []
y = []
for file in files:
dict = unpickle(file)
for image in dict[b'data']:
# Image in the dataset is stored as a 3072 length 1D array
x.append(image)
for label in dict[b'labels']:
y.append(label)
return tf.data.Dataset.from_tensor_slices((x,y))
# Build the training dataset
train_dataset = build_dataset(train_files)
# Build the testing dataset
test_dataset = build_dataset([test_file])
# Get the metadata
meta = unpickle(meta_file)
###Output
_____no_output_____
###Markdown
**Now that we've got a dataset, use what you've learned in this lab to build a CNN model for classifying these images.*** Don't forget to pre-process your data * You'll want change the shape of the input image from 1D to a 3D array inside your mapping function (hint: [use the reshape function](https://www.tensorflow.org/api_docs/python/tf/reshape)) * The image is stored as `[colour channel, width, height]`, you'll need to change this odering to `[width, height, colour channel]` (hint: [use the transpose function](https://www.tensorflow.org/api_docs/python/tf/transpose))* Remember to check our input shape as it's different from the fashion mnist dataset
###Code
# TODO - Create a CNN model and train it using the CIFAR-10 dataset
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Lab 03: Image Classification with Convolutional Neural Networks Run in Google Colab View source on GitHub In this tutorial, we'll build and train a neural network to classify images of clothing, like sneakers and shirts. Install and import dependenciesWe'll need [TensorFlow Datasets](https://www.tensorflow.org/datasets/), an API that simplifies downloading and accessing datasets, and provides several sample datasets to work with. We're also using a few helper libraries.
###Code
import tensorflow as tf
# Import TensorFlow Datasets
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# Helper libraries
import math
import numpy as np
import matplotlib.pyplot as plt
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
###Output
_____no_output_____
###Markdown
Import the Fashion MNIST dataset This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset, which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 $\times$ 28 pixels), as seen here: <img src="https://tensorflow.org/images/fashion-mnist-sprite.png" alt="Fashion MNIST sprite" width="600"> Figure 1. Fashion-MNIST samples (by Zalando, MIT License). Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow, using the [Datasets](https://www.tensorflow.org/datasets) API:
###Code
dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
###Output
_____no_output_____
###Markdown
Loading the dataset returns metadata as well as a *training dataset* and *test dataset*.* The model is trained using `train_dataset`.* The model is tested against `test_dataset`.The images are 28 $\times$ 28 arrays, with pixel values in the range `[0, 255]`. The *labels* are an array of integers, in the range `[0, 9]`. These correspond to the *class* of clothing the image represents: Label Class 0 T-shirt/top 1 Trouser 2 Pullover 3 Dress 4 Coat 5 Sandal 6 Shirt 7 Sneaker 8 Bag 9 Ankle boot Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images:
###Code
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
_____no_output_____
###Markdown
Explore the dataLet's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, and 10000 images in the test set:
###Code
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples: {}".format(num_test_examples))
###Output
_____no_output_____
###Markdown
Preprocess the dataThe value of each pixel in the image data is an integer in the range `[0,255]`. For the model to work properly, these values need to be normalized to the range `[0,1]`. So here we create a normalization function, and then apply it to each image in the test and train datasets.
###Code
def normalize(images, labels):
images = tf.cast(images, tf.float32)
images /= 255
return images, labels
# The map function applies the normalize function to each element in the train
# and test datasets
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)
# The first time you use the dataset, the images will be loaded from disk
# Caching will keep them in memory, making training faster
train_dataset = train_dataset.cache()
test_dataset = test_dataset.cache()
###Output
_____no_output_____
###Markdown
Explore the processed dataLet's plot an image to see what it looks like.
###Code
# Take a single image, and remove the color dimension by reshaping
for image, label in test_dataset.take(1):
break
image = image.numpy().reshape((28,28))
# Plot the image - voila a piece of fashion clothing
plt.figure()
plt.imshow(image, cmap=plt.cm.binary)
plt.colorbar()
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
###Code
plt.figure(figsize=(10,10))
i = 0
for (image, label) in test_dataset.take(25):
image = image.numpy().reshape((28,28))
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap=plt.cm.binary)
plt.xlabel(class_names[label])
i += 1
plt.show()
###Output
_____no_output_____
###Markdown
Build the modelBuilding the neural network requires configuring the layers of the model, then compiling the model. Exercise 3.1 Setup the layersThe basic building block of a neural network is the *layer*. A layer extracts a representation from the data fed into it. Hopefully, a series of connected layers results in a representation that is meaningful for the problem at hand.Much of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have internal parameters which are adjusted ("learned") during training.For this exercise, we'll be using two new layers, the Convolution layer (`tf.keras.layers.Conv2D`) and the Max Pooling layer (`tf.keras.layers.MaxPool2D`). Refer to the slides and official documentation on how to use these layers:* [tf.keras.layers.Conv2D reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D)* [tf.keras.layers.MaxPool2D reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D)**Our network layers are:*** 2D Convolution layer - 32 filters, 3x3 kernel, ReLU activation, padding with same values* Max pooling layer - 2x2 kernel, 2 stride* 2D Convolution layer - 64 filters, 3x3 kernel, ReLU activation, padding with same values* Max pooling layer - 2x2 kernel, 2 stride* Flatten layer* Dense layer - 128 nodes output, ReLU activation* Dense layer - 10 nodes output
###Code
model = tf.keras.Sequential([
#TODO - Add model layers as described above
])
###Output
_____no_output_____
###Markdown
Exercise 3.1 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.1.ipynb) Exercise 3.2 Compile the model with `Model.compile`Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:**Compile the model below with the following settings*** *Loss function* — SparseCategoricalCrossentropy* *Optimizer* — Adam* *Metrics* — accuracyRefer to the [official documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Modelcompile) if you've forgotten the function.
###Code
# TODO - Compile the model
###Output
_____no_output_____
###Markdown
Exercise 3.2 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.2.ipynb) Exercise 3.3 Train the model with `Model.fit`First, we define the iteration behavior for the train dataset:1. Repeat forever by specifying `dataset.repeat()` (the `epochs` parameter described below limits how long we perform training).2. The `dataset.shuffle(60000)` randomizes the order so our model cannot learn anything from the order of the examples.3. And `dataset.batch(32)` tells `model.fit` to use batches of 32 images and labels when updating the model variables.Training is performed by calling the `model.fit` method:1. Feed the training data to the model using `train_dataset`.2. The model learns to associate images and labels.3. The `epochs=5` parameter limits training to 5 full iterations of the training dataset, so a total of 5 * 60000 = 300000 examples.(Don't worry about `steps_per_epoch`, the requirement to have this flag will soon be removed.)
###Code
BATCH_SIZE = 32
train_dataset = train_dataset.cache().repeat().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.cache().batch(BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Start training the model in the code box below for **10 epochs**. Dont' forget to add the parameter `steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE)`.Refer to the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model) if you've forgotten the the function.
###Code
# TODO - Train the model
###Output
_____no_output_____
###Markdown
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.97 (or 97%) on the training data. Exercise 3.3 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.3.ipynb) Exercise 3.4 Evaluate accuracy with `Model.evaluate`Next, compare how the model performs on the test dataset. Use all examples we have in the test dataset to assess accuracy.Refer to the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model) on how to use the function.
###Code
# TODO - Evaluate the model
###Output
_____no_output_____
###Markdown
As it turns out, the accuracy on the test dataset is smaller than the accuracy on the training dataset. This is completely normal, since the model was trained on the `train_dataset`. When the model sees images it has never seen during training, (that is, from the `test_dataset`), we can expect performance to go down. Exercise 3.4 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E3.4.ipynb) Make predictions and exploreWith the model trained, we can use it to make predictions about some images.
###Code
for test_images, test_labels in test_dataset.take(1):
test_images = test_images.numpy()
test_labels = test_labels.numpy()
predictions = model.predict(test_images)
predictions.shape
###Output
_____no_output_____
###Markdown
Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
###Code
predictions[0]
###Output
_____no_output_____
###Markdown
A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value:
###Code
np.argmax(predictions[0])
###Output
_____no_output_____
###Markdown
So the model is usually most confident that this image is a Shirt, or `class_names[6]`. Let's check the label:
###Code
test_labels[0]
###Output
_____no_output_____
###Markdown
We can graph this to look at the full set of 10 class predictions
###Code
def plot_image(i, predictions_array, true_labels, images):
predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img[...,0], cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
###Output
_____no_output_____
###Markdown
Let's look at the 0th image, predictions, and prediction array.
###Code
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
###Output
_____no_output_____
###Markdown
Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident.
###Code
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
###Output
_____no_output_____
###Markdown
Finally, use the trained model to make a prediction about a single image.
###Code
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
###Output
_____no_output_____
###Markdown
`tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list:
###Code
# Add the image to a batch where it's the only member.
img = np.array([img])
print(img.shape)
###Output
_____no_output_____
###Markdown
Now predict the image:
###Code
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
###Output
_____no_output_____
###Markdown
`model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch:
###Code
np.argmax(predictions_single[0])
###Output
_____no_output_____
###Markdown
And, as before, the model predicts a label of 6 (shirt). Exercise 3.5Experiment with different models and see how the accuracy results differ. In particular change the following parameters:* Set training epochs set to 1* Number of neurons in the Dense layer following the Flatten one. For example, go really low (e.g. 10) in ranges up to 512 and see how accuracy changes* Add additional Dense layers between the Flatten and the final Dense(10), experiment with different units in these layers* Don't normalize the pixel values, and see the effect that has Exercise 3.6 - CIFAR-10 Dataset with CNNsLet's apply what we've learned to another dataset.The [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.As our input is a colour image, we have now 3 values per pixel. When flattened, our input array is is 3072 long ($32\times32\times3$). * What happens when you use the same network as above?* What is the best accuracy that you can achieve? Like in the previous lab, download, extract and load the dataset.The extracted folder `cifar-10-batches-py` contains (in Python's pickle format):* Training dataset: `data_batch_1 - 5` * Test dataset: `test_batch`* Dataset metadata: `batches.meta`
###Code
import os
import glob
# Download the data
_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
zip_dir = tf.keras.utils.get_file('cifar-10-python.tar.gz', origin=_URL, extract=True)
# Get the data and meta file names
data_dir = os.path.join(os.path.dirname(zip_dir), 'cifar-10-batches-py')
train_files = glob.glob(os.path.join(data_dir,"data_batch_*"))
test_file = os.path.join(data_dir,"test_batch")
meta_file = os.path.join(data_dir,"batches.meta")
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def build_dataset(files):
x = []
y = []
for file in files:
dict = unpickle(file)
for image in dict[b'data']:
# Image in the dataset is stored as a 3072 length 1D array
x.append(image)
for label in dict[b'labels']:
y.append(label)
return tf.data.Dataset.from_tensor_slices((x,y))
# Build the training dataset
train_dataset = build_dataset(train_files)
# Build the testing dataset
test_dataset = build_dataset([test_file])
# Get the metadata
meta = unpickle(meta_file)
###Output
_____no_output_____
###Markdown
**Now that we've got a dataset, use what you've learned in this lab to build a CNN model for classifying these images.*** Don't forget to pre-process your data * You'll want change the shape of the input image from 1D to a 3D array inside your mapping function (hint: [use the reshape function](https://www.tensorflow.org/api_docs/python/tf/reshape)) * The image is stored as `[colour channel, width, height]`, you'll need to change this odering to `[width, height, colour channel]` (hint: [use the transpose function](https://www.tensorflow.org/api_docs/python/tf/transpose))* Remember to check our input shape as it's different from the fashion mnist dataset
###Code
# TODO - Create a CNN model and train it using the CIFAR-10 dataset
###Output
_____no_output_____ |
notebooks/dl-training.ipynb | ###Markdown
MTL ResNet Model
###Code
from torchvision.models.utils import load_state_dict_from_url
class ResNet(nn.Module):
def __init__(
self,
block,
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block, planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
torchvision.models.resnet.conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
x1 = self.fc_task1(x)
x2 = self.fc_task2(x)
return x1, x2
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block,
layers: List[int],
pretrained:bool,
progress: bool,
**kwargs: Any
):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = torchvision.models.resnet.load_state_dict_from_url(torchvision.models.resnet.model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet18', torchvision.models.resnet.BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet50', torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def get_mtl_resnet50(num_classes: int):
_m = resnet50(pretrained=True) #num_classes=3)
_m.fc = nn.Sequential(nn.Flatten(),
nn.Linear(2048, 512),
nn.ReLU(),
nn.Dropout(0.2))
_m.fc_task1 = nn.Sequential(nn.Linear(512, num_classes),
nn.LogSoftmax(dim=1))
_m.fc_task2 = nn.Sequential(nn.Linear(512, num_classes),
nn.LogSoftmax(dim=1))
return _m
_m = get_mtl_resnet50(3)
x1, x2 = _m(torch.Tensor(np.random.random((1, 3, 100, 100))))
print(x1, x2)
def _forward_impl_custom(self, x: torch.Tensor) -> torch.Tensor:
pass
m = resnet50(pretrained=True)
m._forward_impl = _forward_impl_custom
###Output
_____no_output_____
###Markdown
Mobilenet v3 (large) | Model | Acc@1 | Acc@5 ||---|---|---|| MobileNet V2 | 71.878 | 90.286 | | MobileNet V3 Large | 74.042 | 91.340 | | MobileNet V3 Small | 67.668 | 87.402 |
###Code
class MTL_MobileNetV3(torchvision.models.MobileNetV3):
def __init__(self, inverted_residual_setting, last_channel):
super().__init__(inverted_residual_setting, last_channel)
arch = "mobilenet_v3_large"
self.inverted_residual_setting = inverted_residual_setting
self.last_channel = last_channel
lastconv_input_channels = inverted_residual_setting[-1].out_channels
self.lastconv_output_channels = 6 * lastconv_input_channels
def add_mtl_head(self, num_classes=3):
self.classifier = nn.Sequential(
nn.Linear(self.lastconv_output_channels, self.last_channel),
nn.Hardswish(inplace=True),
nn.Dropout(p=0.2, inplace=True))
self.classifier1 = nn.Sequential(nn.Linear(self.last_channel, num_classes),
nn.LogSoftmax(dim=1))
self.classifier2 = nn.Sequential(nn.Linear(self.last_channel, num_classes),
nn.LogSoftmax(dim=1))
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
x1 = self.classifier1(x)
x2 = self.classifier2(x)
return x1, x2
def my_mobilenet_v3_model(
arch: str,
inverted_residual_setting,
last_channel: int,
pretrained: bool,
progress: bool,
**kwargs: Any
):
model = MTL_MobileNetV3(inverted_residual_setting, last_channel)
if pretrained:
if torchvision.models.mobilenetv3.model_urls.get(arch, None) is None:
raise ValueError("No checkpoint is available for model type {}".format(arch))
state_dict = torchvision.models.mobilenetv3.load_state_dict_from_url(torchvision.models.mobilenetv3.model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def mtl_mobilenet_v3_large(pretrained: bool = True, num_classes: int = 3):
arch = "mobilenet_v3_large"
inverted_residual_setting, last_channel = torchvision.models.mobilenetv3._mobilenet_v3_conf(arch)
m = my_mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress=True)
m.add_mtl_head(num_classes=3)
return m
m = mtl_mobilenet_v3_large(pretrained=True, num_classes=3)
x1, x2 = m(torch.Tensor(np.random.random((1, 3, 100, 100))))
print(x1, x2)
###Output
tensor([[-0.0337, 0.0108, 0.0853]], grad_fn=<AddmmBackward>) tensor([[-0.0209, -0.1206, -0.0302]], grad_fn=<AddmmBackward>)
###Markdown
Training
###Code
def get_metric_dict(mode: str = "train", num_classes: int = 3) -> dict:
kwargs = {"num_classes": num_classes, "average": "weighted"}
metric_dict = {f"accuracy_{mode}_surface": torchmetrics.Accuracy(**kwargs),
f"precision_{mode}_surface": torchmetrics.Precision(**kwargs),
f"accuracy_{mode}_smoothness": torchmetrics.Accuracy(**kwargs),
f"precision_{mode}_smoothness": torchmetrics.Precision(**kwargs),
f"f1_{mode}_surface": torchmetrics.F1(**kwargs),
f"f1_{mode}_smoothness": torchmetrics.F1(**kwargs),
}
return metric_dict
class CargoRocketModel(pl.LightningModule):
def __init__(self, num_classes: int = 10):
super().__init__()
self.num_classes = num_classes
self.model = mtl_mobilenet_v3_large(pretrained=True, num_classes=self.num_classes)
self.criterion1 = nn.NLLLoss()
self.criterion2 = nn.NLLLoss()
self.learning_rate = 1e-3
self.train_metrics = get_metric_dict(mode="train", num_classes=self.num_classes)
self.val_metrics = get_metric_dict(mode="val", num_classes=self.num_classes)
print("Using", self.num_classes, "classes")
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
prediction = self.model(x)
return prediction
def training_step(self, batch, batch_idx):
# training_step defined the train loop.
# It is independent of forward
x = batch["image"]
y1 = batch["surface"]
y2 = batch["smoothness"]
y_hat1, y_hat2 = self.model(x)
loss1 = self.criterion1(y_hat1, y1)
loss2 = self.criterion2(y_hat2, y2)
loss = loss1 + loss2
self.log('train_loss', loss)
for metric_name, metric in self.train_metrics.items():
if "surface" in metric_name:
metric(y_hat1.cpu(), y1.cpu())
self.log(metric_name, metric, on_epoch=True)
elif "smoothness" in metric_name:
metric(y_hat2.cpu(), y2.cpu())
self.log(metric_name, metric, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x = batch["image"]
y1 = batch["surface"]
y2 = batch["smoothness"]
y_hat1, y_hat2 = self.model(x)
loss1 = self.criterion1(y_hat1, y1)
loss2 = self.criterion2(y_hat2, y2)
loss = loss1 + loss2
self.log('val_loss', loss)
for metric_name, metric in self.val_metrics.items():
if "surface" in metric_name:
metric(y_hat1.cpu(), y1.cpu())
self.log(metric_name, metric, on_epoch=True, prog_bar=True)
elif "smoothness" in metric_name:
metric(preds=y_hat2.cpu(), target=y2.cpu())
self.log(metric_name, metric, on_epoch=True, prog_bar=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
print("Cuda avail:", torch.cuda.is_available())
torch.cuda.set_device("cuda:1")
model = CargoRocketModel(num_classes=3)
def get_checkpoint_callback(metric: str) -> ModelCheckpoint:
callback = ModelCheckpoint(monitor=metric,
mode="max",
filename='{epoch}-{val_loss:.2f}-{' + metric + ':.4f}')
return callback
checkpoint = "/home/trossber/street-image-classification/training_run_logs/lightning_logs/version_34/checkpoints/last.ckpt"
trainer = pl.Trainer(#gpus=1,
gpus=[1],
default_root_dir="/home/trossber/street-image-classification/training_run_logs/",
max_epochs=100,
callbacks=[ModelCheckpoint(save_last=True),
get_checkpoint_callback(metric="f1_val_surface"),
get_checkpoint_callback(metric="f1_val_smoothness"),
get_checkpoint_callback(metric="accuracy_val_surface"),
get_checkpoint_callback(metric="accuracy_val_smoothness"),
],
#callbacks=[EarlyStopping(monitor='val_loss', patience=20)]
#resume_from_checkpoint=checkpoint
)
trainer.fit(model, train_loader, val_dataloaders=val_loader)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]
| Name | Type | Params
-----------------------------------------------
0 | model | MTL_MobileNetV3 | 4.2 M
1 | criterion1 | NLLLoss | 0
2 | criterion2 | NLLLoss | 0
-----------------------------------------------
4.2 M Trainable params
0 Non-trainable params
4.2 M Total params
16.839 Total estimated model params size (MB)
###Markdown
Checkpoint loader
###Code
checkpoint_path = "/home/trossber/street-image-classification/training_run_logs/lightning_logs/version_51/checkpoints/"
checkpoint_path += "epoch=97-val_loss=0.93-f1_val_surface=0.9231.ckpt"
model_loaded = CargoRocketModel.load_from_checkpoint(checkpoint_path, num_classes=3)
x1, x2 = model_loaded(torch.Tensor(np.random.random((1, 3, 256, 256))))
print(x1, x2)
###Output
Using 3 classes
tensor([[-0.1754, -3.9359, -1.9565]], grad_fn=<LogSoftmaxBackward>) tensor([[-1.2401, -0.7259, -1.4839]], grad_fn=<LogSoftmaxBackward>)
###Markdown
Broken img
###Code
broken_img = "/home/trossber/street-image-classification/data/raw/images/rJN7Gii8NfYROj8P4t8WbY.jpg"
with Image.open(broken_img) as img:
img_data = np.asarray(img)
plt.imshow(img_data)
###Output
_____no_output_____
###Markdown
Predict for validation
###Code
val_batch = next(iter(val_loader))
x = val_batch["image"]
y1 = val_batch["surface"]
y2 = val_batch["smoothness"]
y_hat1, y_hat2 = model(x)
y_hat1.argmax(axis=1)
y1
tp /(tp+fp)
tp, fp, tn, fn, sup = torchmetrics.StatScores(num_classes=3)(y_hat1, y1)
torchmetrics.StatScores(num_classes=3)(y_hat1.argmax(axis=1), y1)
# true positives, false positives, true negatives, false negatives
# TP, FP, TN, FN, sup
torchmetrics.Accuracy(average="weighted", num_classes=3)(y_hat1, y1)
torchmetrics.ConfusionMatrix(num_classes=3)(y_hat1, y1)
sum(y_hat1.detach().numpy().argmax(axis=1) == y1.numpy()) / len(y1.numpy())
for pred, true in zip(list(y_hat1.detach().numpy().argmax(axis=1)), list(y1.numpy())):
print(pred, true)
next(iter(val_loader))
checkpoint_path = "/home/trossber/street-image-classification/training_run_logs/lightning_logs/version_23/checkpoints/last.ckpt"
model_trained = CargoRocketModel().load_from_checkpoint(checkpoint_path, num_classes=3).to("cuda:1")
model_trained.eval()
df_preds = pd.DataFrame()
for batch in tqdm(val_loader):
x = batch["image"].to(model_trained.device)
label_surface = batch["surface"]
label_smoothness = batch["smoothness"]
image_path = batch["image_path"]
preds_surface, preds_smoothness = model_trained(x)
label_surface_pred = np.exp(preds_surface.detach().cpu().numpy()).argmax(axis=1)
label_smoothness_pred = np.exp(preds_smoothness.detach().cpu().numpy()).argmax(axis=1)
df_batch = pd.DataFrame({
"image_id": [Path(i).stem for i in image_path],
"surface_true": label_surface.numpy(),
"smoothness_true": label_smoothness.numpy(),
"surface_pred": label_surface_pred,
"smoothness_pred": label_smoothness_pred,
})
df_preds = df_preds.append(df_batch)
print(df_preds.shape)
df_preds.to_csv("predictions_checkpoint_last.csv")
df_preds["surface_correct"] = df_preds["surface_true"] == df_preds["surface_pred"]
df_preds["smoothness_correct"] = df_preds["smoothness_true"] == df_preds["smoothness_pred"]
df_preds["smoothness_correct"].value_counts()
df_preds["surface_correct"].value_counts()
###Output
_____no_output_____ |
research/fatal-encounters.ipynb | ###Markdown
Fatal Encounters. Justified? ProblemUsing the [fatal encounters dataset](https://docs.google.com/spreadsheets/d/1dKmaV_JiWcG8XBoRgP8b4e9Eopkpgt7FL7nyspvzAsE/editgid=0),create a classifier that takes as input text and other attributes and triesto predict the target variable `Official disposition of death (justified or other)`. Questions- What are the relevant socially sensitive (including `protected_class_attribute`) in the dataset?- Does the data contain `potentially discriminatory` patterns? With respect to which definition of fairness?- Which features are being chosen to predict the target? Get Data
###Code
import numpy as np
import pandas as pd
import seaborn as sns
from IPython.display import Markdown, display
sns.set_style("white")
%matplotlib inline
data = pd.read_csv("data/fatal_encounters_dataset.csv")
# clean data column names
data.columns = (
data.columns
.str.replace("'", "")
.str.replace("[^a-zA-Z]", "_")
.str.replace("_+", "_")
.str.strip("_")
.str.lower()
.str.strip()
)
data = data[data.columns[~data.columns.str.startswith("unnamed")]]
def examine(df, n_sample=3):
return (
df.describe(include="all").T
[["count", "unique", "mean", "std"]]
.merge(
df.apply(
lambda s: s.sample(
n_sample, random_state=90).reset_index(drop=True))
.T.rename(columns={
i: "sample_%s" % (i + 1) for i in range(n_sample)}),
how="left", left_index=True, right_index=True))
examine(data, n_sample=2)
###Output
_____no_output_____
###Markdown
Exploration
###Code
# TARGET VARIABLE
JUSTIFIED = "official_disposition_of_death_justified_or_other"
# Features of interest
SENSITIVE_ATTRIBUTES = [
"subjects_name",
"subjects_age",
"subjects_gender",
"subjects_race",
"url_of_image_of_deceased",
"symptoms_of_mental_illness"
]
FEATURES = [
"agency_responsible_for_death",
"cause_of_death",
"a_brief_description_of_the_circumstances_surrounding_the_death",
"location_of_death_city",
"location_of_death_state",
"location_of_death_zip_code",
"location_of_death_county",
]
def plot_categorical(s, top_n=15, **kwargs):
ax = s.value_counts().sort_values().tail(top_n).plot.barh(**kwargs)
ax.set_xlabel("frequency");
sns.despine()
return ax
plot_categorical(data[JUSTIFIED], figsize=(8, 7));
###Output
_____no_output_____
###Markdown
Preprocessencode target variable into buckets: `justified`, `other`, and`unknown`. For modeling throw `unknown` data out for the first pass. Target Variable
###Code
JUSTIFIED_STRINGS = [
"Justified",
"Justifed",
"Jusified",
"Justified by internal review",
"Justified by outside agency",
"Justified by District Attorney",
"Other justified (Civilian board/Prosecutor/District Attorney/Coroner)"
]
UNKNOWN_STRINGS = [
"Unreported",
"Unknown",
]
RACE = "subjects_race"
GENDER = "subjects_gender"
def encode_target(s):
if pd.isnull(s):
return "UNKNOWN"
s = s.strip()
if s in JUSTIFIED_STRINGS:
return "JUSTIFIED"
elif s in UNKNOWN_STRINGS:
return "UNKNOWN"
else:
return "OTHER"
gender_encoding_map = {
"Female": "FEMALE",
"Femalr": "FEMALE",
"Transgender": "TRANSGENDER",
"Male": "MALE",
}
race_encoding_map = {
"Race unspecified": "RACE_UNSPECIFIED",
"European-American/White": "WHITE",
"African-American/Black": "BLACK",
"Hispanic/Latino": "LATINO",
"Asian/Pacific Islander": "ASIAN_PACIFIC_ISLANDER",
"Native American/Alaskan": "NATIVE_AMERICAN_ALASKAN",
"Middle Eastern": "MIDDLE_EASTERN",
}
clean_data = data.copy()
clean_data[JUSTIFIED] = data[JUSTIFIED].map(encode_target)
clean_data[JUSTIFIED].value_counts().to_frame()
clean_data[GENDER] = data[GENDER].map(gender_encoding_map)
clean_data[RACE] = data[RACE].map(race_encoding_map)
# exclude records with "UNKNOWN" disposition and "UNSPECIFIED RACE"
clean_data = clean_data[clean_data[JUSTIFIED] != "UNKNOWN"]
clean_data = clean_data[clean_data[RACE] != "RACE_UNSPECIFIED"]
clean_data[JUSTIFIED].value_counts().to_frame()
###Output
_____no_output_____
###Markdown
Sensitive Attributes: Gender and Race
###Code
clean_data.subjects_gender.value_counts().to_frame()
clean_data.subjects_race.value_counts().to_frame()
###Output
_____no_output_____
###Markdown
Features
###Code
examine(clean_data[FEATURES])
clean_data.cause_of_death.value_counts().to_frame()
###Output
_____no_output_____
###Markdown
TODO: tokenize `a_brief_description_of_the_circumstances_surrounding_the_death`so that text is represented as a word vector. Assess Potentially Discriminatory (PD) PatternsGet `mean_difference` score for the following sensitive attributes:- subjects_gender- subjects_race
###Code
from themis_ml.metrics import mean_difference, mean_confidence_interval
def report_mean_difference(y, s_list):
report = []
index = []
for s_name, s in s_list:
s_notnull = s.notnull()
report.append(
map(lambda x: x * 100, mean_difference(y[s_notnull], s[s_notnull])))
index.append("{s_name} vs. NOT {s_name}".format(s_name=s_name))
return pd.DataFrame(
report, columns=["mean difference", "lower bound", "upper bound"],
index=index)
is_justified = clean_data[JUSTIFIED] == "JUSTIFIED"
gender_vectors = [
(g, (clean_data.subjects_gender == g).astype(int))
for g in clean_data.subjects_gender.dropna().unique()]
gender_report = report_mean_difference(is_justified, gender_vectors)
gender_report
def plot_report(report):
margin = (report["mean difference"] - report["lower bound"]).abs()
ax = report[["mean difference"]].plot(
kind="barh", xerr=margin, legend=False)
ax.axvline(0, color="k")
ax.set_xlabel("mean difference")
sns.despine(bottom=True, left=True)
plot_report(gender_report)
###Output
_____no_output_____
###Markdown
If `mean difference` is negative with respect to some sensitive attribute value$s \in \{d, a\}$ and some outcome $y \in \{y^{+}, y^{-}\}$ , it implies thatthe members of the putatively disadvantaged class $d$ experiences thebeneficial outcome $y^{+}$ more often compared to the advantaged class $a$.Conversely, if `mean difference` is positive with respect to some sensitiveattribute value $s \in \{d, a\}$ and some outcome $y \in \{y^{+}, y^{-}\}$ ,it implies that members the putatively disadvantaged class $d$ experiencesthe harmful outcome $y^{-}$ more often compared to the advantaged class $a$.Interestingly, `MALE`s experience `JUSTIFIED` fatal encounters more thantheir `NON MALE` counterparts
###Code
race_vectors = [
(r, (clean_data.subjects_race == r).astype(int))
for r in clean_data.subjects_race.dropna().unique()]
race_report = report_mean_difference(is_justified, race_vectors)
race_report
plot_report(race_report)
mental_illness_vectors = [
(r, (clean_data.symptoms_of_mental_illness == r).astype(int))
for r in clean_data.symptoms_of_mental_illness.dropna().unique()]
mental_illness_report = report_mean_difference(
is_justified, mental_illness_vectors)
mental_illness_report
plot_report(mental_illness_report)
###Output
_____no_output_____
###Markdown
Interestingly, `MALE` and `BLACK` people experience `JUSTIFIED` fatal encounters more thantheir `NON MALE` and `NON BLACK` counterparts, respectively.This leads me to suspect that the labels `official_disposition_of_death_justified_or_other`are somehow skewed against these two sensitive attribute value.**WHO LABELLED THESE RECORDS?** Train ModelsTrain a logistic regression model to predict `JUSTIFIED = 1`, `OTHER = 0`.
###Code
import itertools
import numpy as np
import pandas as pd
from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from themis_ml.linear_model import LinearACFClassifier
from sklearn.metrics import (
accuracy_score, roc_auc_score, f1_score)
FAIRNESS_UNAWARE_FEATURES = [
("subjects_age", "NUMERIC"),
("subjects_gender", "CATEGORICAL"),
("subjects_race", "CATEGORICAL"),
("symptoms_of_mental_illness", "CATEGORICAL"),
("agency_responsible_for_death", "CATEGORICAL"),
("cause_of_death", "CATEGORICAL"),
("location_of_death_city", "CATEGORICAL"),
("location_of_death_state", "CATEGORICAL"),
("location_of_death_zip_code", "CATEGORICAL"),
("location_of_death_county", "CATEGORICAL"),
]
training_data = []
for feature, dtype in FAIRNESS_UNAWARE_FEATURES:
if dtype == "NUMERIC":
f = clean_data[feature].str.replace("[^0-9]", "").astype(float)
training_data.append(f.where(f.notnull(), f.mean()))
elif dtype == "CATEGORICAL":
training_data.append(pd.get_dummies(clean_data[[feature]].fillna("NULL")))
training_data = pd.concat(training_data, axis=1)
features = training_data.columns
training_data = training_data.assign(
target=(clean_data[JUSTIFIED] == "JUSTIFIED").astype(int))
assert training_data.notnull().all().all()
training_data.head()
cv = RepeatedStratifiedKFold(n_splits=3, n_repeats=10)
estimators = [
("logistic_regression", LogisticRegression()),
("linear_acf", LinearACFClassifier()),
]
X = training_data[features].values
y = training_data["target"].values
s = training_data["subjects_race_BLACK"].values
strata = training_data["target"].astype(int).astype(str).str.cat(
training_data["subjects_race_BLACK"].astype(int).astype(str), sep="_")
preds = []
for i, (train, test) in enumerate(cv.split(X, strata, groups=strata)):
print("."),
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
s_train, s_test = s[train], s[test]
for est_name, estimator in estimators:
fit_args = (X_train, y_train, s_train) if est_name == "linear_acf" \
else (X_train, y_train)
predict_args = (X_test, s_test) if est_name == "linear_acf" \
else (X_test, )
estimator.fit(*fit_args)
preds.append(
pd.DataFrame({
"pred_y": estimator.predict_proba(*predict_args)[:, 1],
"pred_label": estimator.predict(*predict_args).astype(int),
"true_y": y_test.astype(int),
"sensitive_attribute": s_test,
"rep_fold": i,
"estimator": est_name,
}))
preds = pd.concat(preds)
def compute_metrics(df):
accuracy = accuracy_score(df.true_y, df.pred_label)
mean_diff, lower, upper = mean_difference(df.pred_label, df.sensitive_attribute)
return pd.Series({
"accuracy": accuracy,
"mean difference": mean_diff,
})
metrics = (
preds
.groupby(["estimator", "rep_fold"])
.apply(compute_metrics)
.reset_index(0)
.pipe(pd.melt, id_vars="estimator", var_name="metric",
value_name="value")
)
sns.factorplot(
x="value", y="estimator",
hue="metric",
row="metric",
sharex=False,
data=metrics,
size=3, aspect=1.5,
join=False);
(
metrics
.groupby(["metric", "estimator"])
.agg([np.mean, np.std]))
###Output
_____no_output_____ |
td_semaine_4/Filtres convolutifs - Partie 2.ipynb | ###Markdown
$\newcommand{\xbf}{{\bf x}}\newcommand{\ybf}{{\bf y}}\newcommand{\wbf}{{\bf w}}\newcommand{\Ibf}{\mathbf{I}}\newcommand{\Xbf}{\mathbf{X}}\newcommand{\Rbb}{\mathbb{R}}\newcommand{\vec}[1]{\left[\begin{array}{c}1\end{array}\right]}$ Les filtres convolutifs (partie 2)Matériel de cours rédigé par Pascal Germain, 2019************
###Code
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import torch
from torch import nn
torch.__version__ # Ce notebook a été conçu avec la version '1.2.0' de pytorch
###Output
_____no_output_____
###Markdown
L'ensemble CIFARNous vous fournissons un sous-ensemble du jeu de données CIFAR10. Le jeu de donnée original provient de : https://www.cs.toronto.edu/~kriz/cifar.htmlIl s’agit d’un problème de classification multi-classes; le jeu de données contient des images couleurs de taille32 × 32 pixels représentant 10 catégories d’objets. Pour simplifier le problème et réduire le temps d’ap-prentissage requis, nous vous suggérons de conserver seulement les trois premières catégories : «avion»,«automobile» et «oiseau». Le répertoire `data/cifar` contient un fichier compressé par catégorie, chacun regroupant les images en format *PNG*.La méthode `charger_cifar` du fichier `cifar_utils` permet d’extraire les images compressées du jeu de données et de les transformer en vecteur de 3 × 32 × 32 = 3072 nombres réels compris entre 0.0 et 1.0, qui sont la concaténation des valeurs des canaux rouge, vert et bleu.
###Code
from cifar_utils import charger_cifar, afficher_grille_cifar
from sklearn.metrics import accuracy_score
repertoire_cifar = '../data/cifar/'
classes_cifar = [0, 1, 2]
data_x, data_y = charger_cifar(repertoire_cifar, classes_cifar)
np.shape(data_x)
data_y
###Output
_____no_output_____
###Markdown
Séparons les images en un ensemble d'apprentissage et un ensemble de test
###Code
from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.5, random_state=42)
print('train_x:', train_x.shape)
print('test_x:', test_x.shape)
print('train_y:', train_y.shape)
print('test_y:', test_y.shape)
###Output
_____no_output_____
###Markdown
La méthode `afficher_grille_cifar` du fichier `cifar_utils` permet visualiser un ensemble d'images.
###Code
indices_aleatoires = np.random.randint(len(train_y), size=40)
afficher_grille_cifar(train_x[indices_aleatoires])
###Output
_____no_output_____
###Markdown
Apprentissage d'un réseau pleinement connecté
###Code
from reseau_classif_generique import ReseauClassifGenerique
nb_entrees = 3 * 32 * 32
nb_sorties = 3
nb_neurones_cachees = 50
archi_pleinement_connectee = nn.Sequential(
nn.Linear(nb_entrees, nb_neurones_cachees),
nn.ReLU(),
nn.Linear(nb_neurones_cachees, nb_sorties),
nn.LogSoftmax(dim=1)
)
reseau_pc = ReseauClassifGenerique(archi_pleinement_connectee, eta=0.01, alpha=0.1, nb_epoques=500,
taille_batch=32, fraction_validation=.1, patience=20)
reseau_pc.apprentissage(train_x, train_y)
train_pred = reseau_pc.prediction(train_x)
test_pred = reseau_pc.prediction(test_x)
print('Précision train:', accuracy_score(train_y, train_pred) )
print('Précision test :', accuracy_score(test_y, test_pred))
###Output
_____no_output_____ |
wei/p06.ipynb | ###Markdown
p.6 Chinking
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('EymPQgCtcAE')
###Output
_____no_output_____
###Markdown
1. Chinking(1) Remove from the groups those things you don't want but are picked up during chunking. (2) A chick that is the thing we want to remove is defined in a similar fashion to how we define a chunk. 2. ExampleChunk with chinking
###Code
import nltk
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
train_text = state_union.raw("2005-GWBush.txt")
sample_text = state_union.raw("2006-GWBush.txt")
custom_sent_tokenizer = PunktSentenceTokenizer(train_text)
tokenized = custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
chunkGram = r"""Chunk: {<.*>+} # This chunk rule will basically group everything into one chunk.
}<VB.?|IN|DT|TO>+{""" # This chink rule will break the chunk at either verbs, preposition or conjunction, determiner, or "to" as preposition or infinitive marker.
chunkParser = nltk.RegexpParser(chunkGram)
chunked = chunkParser.parse(tagged)
chunked.draw()
except Exception as e:
print(str(e))
process_content()
###Output
_____no_output_____ |
notebooks/Distance to land - dfsu.ipynb | ###Markdown
Get a list of land nodes
###Code
xnland = xn[c == 1]
ynland = yn[c == 1]
plt.scatter(xnland,ynland)
###Output
_____no_output_____
###Markdown
Calculate element coordinates
###Code
ne = mesh.NumberOfElements
xe = np.zeros(ne)
ye = np.zeros(ne)
# Node coordinates
xn = np.array(list(mesh.X))
yn = np.array(list(mesh.Y))
for j in range(ne):
nodes = mesh.ElementTable[j]
xcoords = np.empty(nodes.Length)
ycoords = np.empty(nodes.Length)
for i in range(nodes.Length):
nidx = nodes[i]-1
xcoords[i] = xn[nidx]
ycoords[i] = yn[nidx]
xe[j] = xcoords.mean()
ye[j] = ycoords.mean()
###Output
_____no_output_____
###Markdown
Calculate distance to closest land node
###Code
i = 0
d = np.zeros(ne)
for i in range(ne):
d[i] = np.min(np.sqrt((xe[i] - xnland)**2 + (ye[i] - ynland)**2))
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
patches = []
for j in range(ne):
nodes = mesh.ElementTable[j]
pcoords = np.empty([nodes.Length, 2])
for i in range(nodes.Length):
nidx = nodes[i] - 1
pcoords[i, 0] = xn[nidx]
pcoords[i, 1] = yn[nidx]
polygon = Polygon(pcoords, True)
patches.append(polygon)
fig, ax = plt.subplots()
p = PatchCollection(patches, alpha=0.8)
p.set_array(d)
ax.add_collection(p)
fig.colorbar(p, ax=ax)
ax.set_xlim(xn.min(), xn.max())
ax.set_ylim(yn.min(), yn.max())
###Output
_____no_output_____
###Markdown
Store result in a new Dfsu file
###Code
data = list()
data.append(d.reshape(1,-1))
data[0].shape
from mikeio.dfsu import dfsu
dfs = dfsu()
dfsufilename = r"distance.dfsu"
dfs.create(meshfilename, dfsufilename, data, names=["Distance to land"])
###Output
_____no_output_____ |
Evaluation/Model_Evaluation_Phase_1.ipynb | ###Markdown
**Import the Data**
###Code
# Import necessary libraries.
import pandas as pd
import os
import csv
import io
import requests
import numpy as np
import matplotlib.pyplot as plt
import re
%matplotlib inline
# Set up URLs.
circuits_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/circuits.csv'
constructor_results_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/constructor_results.csv'
constructor_standings_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/constructor_standings.csv'
constructors_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/constructors.csv'
driver_standings_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/driver_standings.csv'
drivers_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/drivers.csv'
lap_times_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/lap_times.csv'
pit_stop_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/pit_stops.csv'
qualifying_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/qualifying.csv'
races_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/races.csv'
results_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/results.csv'
seasons_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/seasons.csv'
status_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/status.csv'
race_status_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/race_status.csv'
MasterData1_url = 'https://raw.githubusercontent.com/georgetown-analytics/Formula1/main/data/MasterData1.csv'
# Set up dataframes.
circuits_df = pd.read_csv(circuits_url, sep = ',', encoding = 'latin-1')
constructor_results_df = pd.read_csv(constructor_results_url, sep = ',', engine = 'python')
constructor_standings_df = pd.read_csv(constructor_standings_url, sep = ',', engine = 'python')
constructors_df = pd.read_csv(constructor_standings_url, sep = ',', engine = 'python')
driver_standings_df = pd.read_csv(driver_standings_url, sep = ',', engine = 'python')
lap_times_df = pd.read_csv(lap_times_url, sep = ',', engine = 'python')
pit_stop_df = pd.read_csv(pit_stop_url, sep = ',', engine = 'python')
qualifying_df = pd.read_csv(constructor_standings_url, sep = ',', engine = 'python')
results_df = pd.read_csv(results_url, sep = ',', engine = 'python')
seasons_df = pd.read_csv(seasons_url, sep = ',', engine = 'python')
status_df = pd.read_csv(status_url, sep = ',', engine = 'python')
races_df = pd.read_csv(races_url, sep = ',', engine = 'c')
drivers_df = pd.read_csv(drivers_url, sep = ',', encoding = 'latin-1')
race_status_df = pd.read_csv(race_status_url, sep = ',', engine = 'python')
MasterData1_df = pd.read_csv(MasterData1_url, sep = ',', engine = 'python', encoding = 'latin-1')
###Output
_____no_output_____
###Markdown
**MasterData1 Evaluation**
###Code
MasterData1_df.head()
# Assuming that Index is by Driver
print(MasterData1_df.columns.tolist())
# Are we working with null values?
MasterData1_df.isnull()
# It does not seem like we are, but upon attempting to model, we were left with an error.
#Replace \N with null value
MasterData1_df = MasterData1_df.replace(r'\N', np.NaN)
#Replace null values with median value of that column
MasterData1_df["position"] = MasterData1_df["position"].fillna(MasterData1_df["position"].median())
MasterData1_df["circuitId"] = MasterData1_df["circuitId"].fillna(MasterData1_df["circuitId"].median())
MasterData1_df["rank"] = MasterData1_df["rank"].fillna(MasterData1_df["rank"].median())
MasterData1_df["grid"] = MasterData1_df["grid"].fillna(MasterData1_df["grid"].median())
###Output
_____no_output_____
###Markdown
**Modeling Objectives**- Start off with Logistic Regression Model for Classification- At first we want to predict Completion Status (Binary)- Finished the race = 1- Did not finish the race = 0- Use small amount of variables based on intuition i.e:- grid, position, circuitId, rank- Visualize Findings & Evaluate Model- Run data through a series of models and evaluate- Evaluate Features, determine alterations for a better performing model. **Evaluating Classifiers**
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from yellowbrick.classifier import ClassificationReport
from yellowbrick.classifier import ROCAUC
X = MasterData1_df[["grid", "position", "circuitId", "rank"]]
y = MasterData1_df["Completion Status"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate the classification model and visualizer
logreg = LogisticRegression()
visualizer = ClassificationReport(
logreg, classes=[0,1], support=True, size=(1080, 720)
)
visualizer.fit(X_train, y_train) # Fit the visualizer and the model
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.show() # Draw the data
###Output
_____no_output_____
###Markdown
**Findings**- For all instances that were actually positive, 78% percent was classified correctly- 78% of the time the model is able to accurately predict when a driver would finish a race. - Why are the results for predicting non-finishes, "0", so poor?- Messing with the classes object in visualizer greatly alters results, did I make a mistake with this? **Evaluating Multiple Classifiers at Once**
###Code
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
from yellowbrick.classifier import ClassificationReport
def score_model(X, y, estimator, **kwargs):
"""
Test various estimators.
"""
X = MasterData1_df[["grid", "position", "circuitId", "rank"]]
y = MasterData1_df["Completion Status"]
# Instantiate the classification model and visualizer
model.fit(X, y, **kwargs)
expected = y
predicted = model.predict(X)
# Compute and return F1 (harmonic mean of precision and recall)
print("{}: {}".format(estimator.__class__.__name__, f1_score(expected, predicted)))
models = [
SVC(gamma='auto'),
NuSVC(gamma='auto'),
LinearSVC(),
SGDClassifier(max_iter=100, tol=1e-3),
KNeighborsClassifier(),
LogisticRegression(solver='lbfgs'),
LogisticRegressionCV(cv=3),
BaggingClassifier(),
ExtraTreesClassifier(n_estimators=100),
RandomForestClassifier(n_estimators=100)
]
for model in models:
score_model(X, y, model)
#Based on this series of tests, the model that returned the highest F1 Score is the RandomForestClassifier with a score of
0.9682886216466235
#It seems so far that the most viable models are: RandomForestClassifier, ExtraTreesClassifier, and BaggingClassifier
###Output
_____no_output_____
###Markdown
**Visual Model Evaluation**
###Code
def visualize_model(X, y, estimator):
"""
Test various estimators.
"""
X = MasterData1_df[["grid", "position", "circuitId", "rank"]]
y = MasterData1_df["Completion Status"]
# Instantiate the classification model and visualizer
visualizer = ClassificationReport(
model,
classes=[0,1],
cmap="Reds",
support=True,
size=(600, 360)
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.20)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
for model in models:
visualize_model(X, y, model)
###Output
_____no_output_____
###Markdown
**Evaluating Features** - We have intuitively selected features and have gotten certain models to run well based on these features. - This is good, but it would be better to arrive at these results via a systematic approach. - It is hard to come to conclusions about our data and what our features explain about our hypothesis.- To get a better sense of our data we will use Recursive Feature Elimination, LASSO, or Tree Based evaluations.
###Code
#Recursive Feature Analysis
MasterData1_df.head()
print(MasterData1_df.columns.tolist())
#Replace \N with null value
MasterData1_df = MasterData1_df.replace(r'\N', np.NaN)
#Replace null values with median value of that column
MasterData1_df["position"] = MasterData1_df["position"].fillna(MasterData1_df["position"].median())
MasterData1_df["circuitId"] = MasterData1_df["circuitId"].fillna(MasterData1_df["circuitId"].median())
MasterData1_df["rank"] = MasterData1_df["rank"].fillna(MasterData1_df["rank"].median())
MasterData1_df["grid"] = MasterData1_df["grid"].fillna(MasterData1_df["grid"].median())
MasterData1_df["milliseconds"] = MasterData1_df["milliseconds"].fillna(MasterData1_df["milliseconds"].median())
MasterData1_df["fastestLap"] = MasterData1_df["fastestLap"].fillna(MasterData1_df["fastestLap"].median())
MasterData1_df["fastestLapSpeed"] = MasterData1_df["fastestLapSpeed"].fillna(MasterData1_df["fastestLapSpeed"].median())
MasterData1_df["year"] = MasterData1_df["year"].fillna(MasterData1_df["year"].median())
MasterData1_df["alt"] = MasterData1_df["alt"].fillna(MasterData1_df["alt"].median())
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.feature_selection import SelectFromModel
from yellowbrick.classifier import ClassificationReport
from sklearn.feature_selection import RFE
#set variables
X = MasterData1_df[["alt", "year", "fastestLapSpeed", "fastestLap", "milliseconds", "grid", "rank", "circuitId", "position"]]
y = MasterData1_df["Completion Status"]
#LASSO- Regularization
model = Lasso()
model.fit(X, y)
print(list(zip(X, model.coef_.tolist())))
#What does this mean?
#Transformer
model = Lasso()
sfm = SelectFromModel(model)
sfm.fit(X, y)
print(list(X.iloc[:, sfm.get_support(indices=True)]))
###Output
['year']
|
examples/homomorphic-encryption/Tutorial_0_TenSEAL_Syft_Data_Owner.ipynb | ###Markdown
Homomorphic Encryption using Duet: Data Owner Tutorial 0: Basic operationsWelcome!This tutorial will show you how to use Duet with homomorphic encryption and some use cases. This notebook illustrates the Data Owner view on the operations.We will focus on Duet's integration with [TenSEAL](https://github.com/OpenMined/TenSEAL). TenSEAL is a Python library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes.If you want to learn more about TenSEAL, we recommend the following tutorials:- ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb).- ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb).- ['Tutorial 2: Working with Approximate Numbers'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%202%20-%20Working%20with%20Approximate%20Numbers.ipynb).Let's now start the tutorial with a brief review of what homomorphic encryption is, but keep in mind that you don't need to be a crypto expert to use these features. Homomorphic Encryption__Definition__ : Homomorphic encryption (HE) is a technique that allows computations to be made on ciphertexts and generates results that when decrypted, corresponds to the result of the same computations made on plaintexts.This means that an HE scheme lets you encrypt two numbers *X* and *Y*, add their encrypted versions so that it gets decrypted to *X + Y*, the addition could have been a multiplication as well. SetupAll modules are imported here, make sure everything is installed by running the cell below.
###Code
import syft as sy
import tenseal as ts
import pytest
sy.load("tenseal")
###Output
_____no_output_____
###Markdown
Start Duet Data Owner instance
###Code
# Start Duet local instance
duet = sy.launch_duet(loopback=True)
###Output
_____no_output_____
###Markdown
Theory: Homomorphic encryption schemes__TenSEAL__ supports two encryption schemes: - __BFV__, a scheme for operations on integers. - __CKKS__, a scheme for operations on approximate numbers. This scheme is much better suited for ML applications and we will focus more on it. There are a few major steps for each scheme: 1. __Keys Generation__: in this step, we generate public and private keys that will be used for encryption/decryption. 2. __Encryption__: this is the process of converting a plaintext into an encrypted ciphertext. This step requires an encryption key(or a public key). 3. __Decryption__: this is the process of converting a ciphertext back into a plaintext. This step requires a decryption key(or a secret key). This step cannot be done on the Data Scientist endpoint. Theory: Homomorphic encryption parameters__TenSEAL__ requires a few parameters to set the keys up: - __The polynomial modulus degree(poly_modulus_degree).__ This parameter directly affects the size of the ciphertext, the security of the scheme(bigger is better), but also the computational performance of the scheme(bigger is worse) - __The coefficient modulus sizes(coeff_mod_bit_sizes).__ This parameter is an array of bit sizes and directly affects the size of the ciphertext, the security of the scheme(bigger is worse), and the depth of computation allowed in the encrypted space(longer is better). - __The scaling factor(global_scale).__ This parameter is only used for the approximate schemes(CKKS) and directly affects the precision of computation and decryption. Theory: Homomorphic encryption keys__TenSEAL__ generates a few keys internally, each with another use case: - __The Private Key(or the secret/decryption key)__. This key is used for decrypting ciphertexts, and it is used to derive the other keys. __DO NOT SHARE IT OUTSIDE THE DATA OWNER PROCESS__. - __The Public key(or the encryption key)__. This key is used for encrypting the plain data to a ciphertext. You can safely share it with the Data Scientist. - __The Relinearization Keys(optional)__. This key is used for controlling the quality of the ciphertexts after encrypted multiplications. Generate it only if you are doing encrypted multiplications. You can safely share it with the Data Scientist. - __The Galois Keys(optional)__. This key is needed to perform encrypted vector rotation operations on ciphertexts. Generate it only if you are evaluating convolutions on encrypted data. You can safely share it with the Data Scientist. TenSEAL ContextNow that we had a short introduction, let's get to work.The first step to do for a Data Owner is to generate a security context containing security parameters and encryption keys.
###Code
context = ts.Context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=8192,
coeff_mod_bit_sizes=[60, 40, 40, 60]
)
context.global_scale = 2**40
context
###Output
_____no_output_____
###Markdown
Encrypt the data
###Code
v1 = [0, 1, 2, 3, 4]
v2 = [4, 3, 2, 1, 0]
enc_v1 = ts.ckks_vector(context, v1)
enc_v2 = ts.ckks_vector(context, v2)
(enc_v1, enc_v2)
###Output
_____no_output_____
###Markdown
Make Context and Encrypted Vectors Referenceable over Duet
###Code
# tag them so our partner can easily reference it
ctx_ptr = context.send(duet, pointable=True, tags=["context"])
enc_v1_ptr = enc_v1.send(duet, pointable=True, tags=["enc_v1"])
enc_v2_ptr = enc_v2.send(duet, pointable=True, tags=["enc_v2"])
# we can see that our three objects are now inside the store we control
duet.store.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.
###Code
# We can see our duet partner has requested the two encrypted vectors and the public context
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Approve the requests
###Code
duet.requests[0].accept()
duet.requests[0].accept()
duet.requests[0].accept()
# The requests should have been handled
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 2 : Now STOP and run the Data Scientist notebook until the same checkpoint. Get the computation results from store and decrypt them locally
###Code
# Validate the encrypted add
result_add = duet.store["result_add"].get(delete_obj=False)
result_add.link_context(context)
result_add
decrypted_result = result_add.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted - plain add
result_iadd = duet.store["result_iadd"].get(delete_obj=False)
result_iadd.link_context(context)
decrypted_result = result_iadd.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, [10, 10, 10, 10, 10])]
decrypted_result
# Validate the encrypted subtraction
result_sub = duet.store["result_sub"].get(delete_obj=False)
result_sub.link_context(context)
decrypted_result = result_sub.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 - v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted multiplication
result_mul = duet.store["result_mul"].get(delete_obj=False)
result_mul.link_context(context)
decrypted_result = result_mul.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 * v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted power
result_pow = duet.store["result_pow"].get(delete_obj=False)
result_pow.link_context(context)
decrypted_result = result_pow.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v ** 3 for v in v1]
decrypted_result
# Validate the encrypted negation
result_neg = duet.store["result_neg"].get(delete_obj=False)
result_neg.link_context(context)
decrypted_result = result_neg.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [-v for v in v1]
decrypted_result
# Validate the encrypted polynomial evaluation for 1 + X^2 + X^3
result_poly = duet.store["result_poly"].get(delete_obj=False)
result_poly.link_context(context)
decrypted_result = result_poly.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [1 + v**2 + v**3 for v in v1]
decrypted_result
###Output
_____no_output_____
###Markdown
Homomorphic Encryption using Duet: Data Owner Tutorial 0: Basic operationsWelcome!This tutorial will show you how to use Duet with homomorphic encryption and some use cases. This notebook illustrates the Data Owner view on the operations.We will focus on Duet's integration with [TenSEAL](https://github.com/OpenMined/TenSEAL). TenSEAL is a Python library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes.If you want to learn more about TenSEAL, we recommend the following tutorials:- ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb).- ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb).- ['Tutorial 2: Working with Approximate Numbers'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%202%20-%20Working%20with%20Approximate%20Numbers.ipynb).Let's now start the tutorial with a brief review of what homomorphic encryption is, but keep in mind that you don't need to be a crypto expert to use these features. Homomorphic Encryption__Definition__ : Homomorphic encryption (HE) is a technique that allows computations to be made on ciphertexts and generates results that when decrypted, corresponds to the result of the same computations made on plaintexts.This means that an HE scheme lets you encrypt two numbers *X* and *Y*, add their encrypted versions so that it gets decrypted to *X + Y*, the addition could have been a multiplication as well. SetupAll modules are imported here, make sure everything is installed by running the cell below.
###Code
import syft as sy
import tenseal as ts
import pytest
sy.load_lib("tenseal")
###Output
_____no_output_____
###Markdown
Start Duet Data Owner instance
###Code
# Start Duet local instance
duet = sy.launch_duet(loopback=True)
###Output
_____no_output_____
###Markdown
Theory: Homomorphic encryption schemes__TenSEAL__ supports two encryption schemes: - __BFV__, a scheme for operations on integers. - __CKKS__, a scheme for operations on approximate numbers. This scheme is much better suited for ML applications and we will focus more on it. There are a few major steps for each scheme: 1. __Keys Generation__: in this step, we generate public and private keys that will be used for encryption/decryption. 2. __Encryption__: this is the process of converting a plaintext into an encrypted ciphertext. This step requires an encryption key(or a public key). 3. __Decryption__: this is the process of converting a ciphertext back into a plaintext. This step requires a decryption key(or a secret key). This step cannot be done on the Data Scientist endpoint. Theory: Homomorphic encryption parameters__TenSEAL__ requires a few parameters to set the keys up: - __The polynomial modulus degree(poly_modulus_degree).__ This parameter directly affects the size of the ciphertext, the security of the scheme(bigger is better), but also the computational performance of the scheme(bigger is worse) - __The coefficient modulus sizes(coeff_mod_bit_sizes).__ This parameter is an array of bit sizes and directly affects the size of the ciphertext, the security of the scheme(bigger is worse), and the depth of computation allowed in the encrypted space(longer is better). - __The scaling factor(global_scale).__ This parameter is only used for the approximate schemes(CKKS) and directly affects the precision of computation and decryption. Theory: Homomorphic encryption keys__TenSEAL__ generates a few keys internally, each with another use case: - __The Private Key(or the secret/decryption key)__. This key is used for decrypting ciphertexts, and it is used to derive the other keys. __DO NOT SHARE IT OUTSIDE THE DATA OWNER PROCESS__. - __The Public key(or the encryption key)__. This key is used for encrypting the plain data to a ciphertext. You can safely share it with the Data Scientist. - __The Relinearization Keys(optional)__. This key is used for controlling the quality of the ciphertexts after encrypted multiplications. Generate it only if you are doing encrypted multiplications. You can safely share it with the Data Scientist. - __The Galois Keys(optional)__. This key is needed to perform encrypted vector rotation operations on ciphertexts. Generate it only if you are evaluating convolutions on encrypted data. You can safely share it with the Data Scientist. TenSEAL ContextNow that we had a short introduction, let's get to work.The first step to do for a Data Owner is to generate a security context containing security parameters and encryption keys.
###Code
context = ts.Context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=8192,
coeff_mod_bit_sizes=[60, 40, 40, 60]
)
context.global_scale = 2**40
context
###Output
_____no_output_____
###Markdown
Encrypt the data
###Code
v1 = [0, 1, 2, 3, 4]
v2 = [4, 3, 2, 1, 0]
enc_v1 = ts.ckks_vector(context, v1)
enc_v2 = ts.ckks_vector(context, v2)
(enc_v1, enc_v2)
###Output
_____no_output_____
###Markdown
Make Context and Encrypted Vectors Referenceable over Duet
###Code
# tag them so our partner can easily reference it
ctx_ptr = context.send(duet, searchable=True, tags=["context"])
enc_v1_ptr = enc_v1.send(duet, searchable=True, tags=["enc_v1"])
enc_v2_ptr = enc_v2.send(duet, searchable=True, tags=["enc_v2"])
# we can see that our three objects are now inside the store we control
duet.store.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.
###Code
# We can see our duet partner has requested the two encrypted vectors and the public context
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Approve the requests
###Code
duet.requests[0].accept()
duet.requests[0].accept()
duet.requests[0].accept()
# The requests should have been handled
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 2 : Now STOP and run the Data Scientist notebook until the same checkpoint. Get the computation results from store and decrypt them locally
###Code
# Validate the encrypted add
result_add = duet.store["result_add"].get(delete_obj=False)
result_add.link_context(context)
result_add
decrypted_result = result_add.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted - plain add
result_iadd = duet.store["result_iadd"].get(delete_obj=False)
result_iadd.link_context(context)
decrypted_result = result_iadd.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, [10, 10, 10, 10, 10])]
decrypted_result
# Validate the encrypted subtraction
result_sub = duet.store["result_sub"].get(delete_obj=False)
result_sub.link_context(context)
decrypted_result = result_sub.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 - v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted multiplication
result_mul = duet.store["result_mul"].get(delete_obj=False)
result_mul.link_context(context)
decrypted_result = result_mul.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 * v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted power
result_pow = duet.store["result_pow"].get(delete_obj=False)
result_pow.link_context(context)
decrypted_result = result_pow.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v ** 3 for v in v1]
decrypted_result
# Validate the encrypted negation
result_neg = duet.store["result_neg"].get(delete_obj=False)
result_neg.link_context(context)
decrypted_result = result_neg.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [-v for v in v1]
decrypted_result
# Validate the encrypted polynomial evaluation for 1 + X^2 + X^3
result_poly = duet.store["result_poly"].get(delete_obj=False)
result_poly.link_context(context)
decrypted_result = result_poly.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [1 + v**2 + v**3 for v in v1]
decrypted_result
###Output
_____no_output_____
###Markdown
Homomorphic Encryption using Duet: Data Owner Tutorial 0: Basic operationsWelcome!This tutorial will show you how to use Duet with homomorphic encryption and some use cases. This notebook illustrates the Data Owner view on the operations.We will focus on Duet's integration with [TenSEAL](https://github.com/OpenMined/TenSEAL). TenSEAL is a Python library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes.If you want to learn more about TenSEAL, we recommend the following tutorials:- ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb).- ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb).- ['Tutorial 2: Working with Approximate Numbers'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%202%20-%20Working%20with%20Approximate%20Numbers.ipynb).Let's now start the tutorial with a brief review of what homomorphic encryption is, but keep in mind that you don't need to be a crypto expert to use these features. Homomorphic Encryption__Definition__ : Homomorphic encryption (HE) is a technique that allows computations to be made on ciphertexts and generates results that when decrypted, corresponds to the result of the same computations made on plaintexts.This means that an HE scheme lets you encrypt two numbers *X* and *Y*, add their encrypted versions so that it gets decrypted to *X + Y*, the addition could have been a multiplication as well. COLAB
###Code
%%capture
# This only runs in colab and clones the code sets it up and fixes a few issues,
# you can skip this if you are running Jupyter Notebooks
import sys
if "google.colab" in sys.modules:
branch = "grace_dev" # change to the branch you want
! git clone --single-branch --branch $branch https://github.com/godormad/PySyft.git
! cd PySyft && ./scripts/colab.sh # fixes some colab python issues
sys.path.append("/content/PySyft/src") # prevents needing restart
###Output
_____no_output_____
###Markdown
SetupAll modules are imported here, make sure everything is installed by running the cell below.
###Code
!pip install tenseal
!pip show tenseal
import syft as sy
import tenseal as ts
import pytest
sy.load_lib("tenseal")
###Output
Collecting tenseal
[?25l Downloading https://files.pythonhosted.org/packages/35/20/a4106c3eff920eccbe040276ed869193fadd8fbbc52307dd6922a453f085/tenseal-0.3.0-cp36-cp36m-manylinux2014_x86_64.whl (4.4MB)
[K |████████████████████████████████| 4.4MB 4.2MB/s
[?25hInstalling collected packages: tenseal
Successfully installed tenseal-0.3.0
Name: tenseal
Version: 0.3.0
Summary: A Library for Homomorphic Encryption Operations on Tensors
Home-page: https://github.com/OpenMined/TenSEAL
Author: Ayoub Benaissa
Author-email: [email protected]
License: Apache-2.0
Location: /usr/local/lib/python3.6/dist-packages
Requires:
Required-by:
###Markdown
Start Duet Data Owner instance
###Code
# Start Duet local instance
duet = sy.launch_duet(loopback=False)
###Output
_____no_output_____
###Markdown
Theory: Homomorphic encryption schemes__TenSEAL__ supports two encryption schemes: - __BFV__, a scheme for operations on integers. - __CKKS__, a scheme for operations on approximate numbers. This scheme is much better suited for ML applications and we will focus more on it. There are a few major steps for each scheme: 1. __Keys Generation__: in this step, we generate public and private keys that will be used for encryption/decryption. 2. __Encryption__: this is the process of converting a plaintext into an encrypted ciphertext. This step requires an encryption key(or a public key). 3. __Decryption__: this is the process of converting a ciphertext back into a plaintext. This step requires a decryption key(or a secret key). This step cannot be done on the Data Scientist endpoint. Theory: Homomorphic encryption parameters__TenSEAL__ requires a few parameters to set the keys up: - __The polynomial modulus degree(poly_modulus_degree).__ This parameter directly affects the size of the ciphertext, the security of the scheme(bigger is better), but also the computational performance of the scheme(bigger is worse) - __The coefficient modulus sizes(coeff_mod_bit_sizes).__ This parameter is an array of bit sizes and directly affects the size of the ciphertext, the security of the scheme(bigger is worse), and the depth of computation allowed in the encrypted space(longer is better). - __The scaling factor(global_scale).__ This parameter is only used for the approximate schemes(CKKS) and directly affects the precision of computation and decryption. Theory: Homomorphic encryption keys__TenSEAL__ generates a few keys internally, each with another use case: - __The Private Key(or the secret/decryption key)__. This key is used for decrypting ciphertexts, and it is used to derive the other keys. __DO NOT SHARE IT OUTSIDE THE DATA OWNER PROCESS__. - __The Public key(or the encryption key)__. This key is used for encrypting the plain data to a ciphertext. You can safely share it with the Data Scientist. - __The Relinearization Keys(optional)__. This key is used for controlling the quality of the ciphertexts after encrypted multiplications. Generate it only if you are doing encrypted multiplications. You can safely share it with the Data Scientist. - __The Galois Keys(optional)__. This key is needed to perform encrypted vector rotation operations on ciphertexts. Generate it only if you are evaluating convolutions on encrypted data. You can safely share it with the Data Scientist. TenSEAL ContextNow that we had a short introduction, let's get to work.The first step to do for a Data Owner is to generate a security context containing security parameters and encryption keys.
###Code
context = ts.Context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=8192,
coeff_mod_bit_sizes=[60, 40, 40, 60]
)
context.global_scale = 2**40
context
###Output
_____no_output_____
###Markdown
Encrypt the data
###Code
v1 = [0, 1, 2, 3, 4]
v2 = [4, 3, 2, 1, 0]
enc_v1 = ts.ckks_vector(context, v1)
enc_v2 = ts.ckks_vector(context, v2)
(enc_v1, enc_v2)
###Output
_____no_output_____
###Markdown
Make Context and Encrypted Vectors Referenceable over Duet
###Code
# tag them so our partner can easily reference it
ctx_ptr = context.send(duet, searchable=True, tags=["context"])
enc_v1_ptr = enc_v1.send(duet, searchable=True, tags=["enc_v1"])
enc_v2_ptr = enc_v2.send(duet, searchable=True, tags=["enc_v2"])
# we can see that our three objects are now inside the store we control
duet.store.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.
###Code
# We can see our duet partner has requested the two encrypted vectors and the public context
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Approve the requests
###Code
duet.requests[0].accept()
duet.requests[0].accept()
duet.requests[0].accept()
# The requests should have been handled
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 2 : Now STOP and run the Data Scientist notebook until the same checkpoint. Get the computation results from store and decrypt them locally
###Code
# Validate the encrypted add
result_add = duet.store["result_add"].get(delete_obj=False)
result_add.link_context(context)
result_add
decrypted_result = result_add.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted - plain add
result_iadd = duet.store["result_iadd"].get(delete_obj=False)
result_iadd.link_context(context)
decrypted_result = result_iadd.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, [10, 10, 10, 10, 10])]
decrypted_result
# Validate the encrypted subtraction
result_sub = duet.store["result_sub"].get(delete_obj=False)
result_sub.link_context(context)
decrypted_result = result_sub.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 - v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted multiplication
result_mul = duet.store["result_mul"].get(delete_obj=False)
result_mul.link_context(context)
decrypted_result = result_mul.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 * v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted power
result_pow = duet.store["result_pow"].get(delete_obj=False)
result_pow.link_context(context)
decrypted_result = result_pow.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v ** 3 for v in v1]
decrypted_result
# Validate the encrypted negation
result_neg = duet.store["result_neg"].get(delete_obj=False)
result_neg.link_context(context)
decrypted_result = result_neg.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [-v for v in v1]
decrypted_result
# Validate the encrypted polynomial evaluation for 1 + X^2 + X^3
result_poly = duet.store["result_poly"].get(delete_obj=False)
result_poly.link_context(context)
decrypted_result = result_poly.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [1 + v**2 + v**3 for v in v1]
decrypted_result
###Output
_____no_output_____
###Markdown
Homomorphic Encryption using Duet: Data Owner Tutorial 0: Basic operationsWelcome!This tutorial will show you how to use Duet with homomorphic encryption and some use cases. This notebook illustrates the Data Owner view on the operations.We will focus on Duet's integration with [TenSEAL](https://github.com/OpenMined/TenSEAL). TenSEAL is a Python library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes.If you want to learn more about TenSEAL, we recommend the following tutorials:- ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb).- ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb).- ['Tutorial 2: Working with Approximate Numbers'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%202%20-%20Working%20with%20Approximate%20Numbers.ipynb).Let's now start the tutorial with a brief review of what homomorphic encryption is, but keep in mind that you don't need to be a crypto expert to use these features. Homomorphic Encryption__Definition__ : Homomorphic encryption (HE) is a technique that allows computations to be made on ciphertexts and generates results that when decrypted, corresponds to the result of the same computations made on plaintexts.This means that an HE scheme lets you encrypt two numbers *X* and *Y*, add their encrypted versions so that it gets decrypted to *X + Y*, the addition could have been a multiplication as well. SetupAll modules are imported here, make sure everything is installed by running the cell below.
###Code
import syft as sy
import tenseal as ts
import pytest
sy.load_lib("tenseal")
###Output
_____no_output_____
###Markdown
Start Duet Data Owner instance
###Code
# Start Duet local instance
duet = sy.launch_duet(loopback=True)
###Output
_____no_output_____
###Markdown
Theory: Homomorphic encryption schemes__TenSEAL__ supports two encryption schemes: - __BFV__, a scheme for operations on integers. - __CKKS__, a scheme for operations on approximate numbers. This scheme is much better suited for ML applications and we will focus more on it. There are a few major steps for each scheme: 1. __Keys Generation__: in this step, we generate public and private keys that will be used for encryption/decryption. 2. __Encryption__: this is the process of converting a plaintext into an encrypted ciphertext. This step requires an encryption key(or a public key). 3. __Decryption__: this is the process of converting a ciphertext back into a plaintext. This step requires a decryption key(or a secret key). This step cannot be done on the Data Scientist endpoint. Theory: Homomorphic encryption parameters__TenSEAL__ requires a few parameters to set the keys up: - __The polynomial modulus degree(poly_modulus_degree).__ This parameter directly affects the size of the ciphertext, the security of the scheme(bigger is better), but also the computational performance of the scheme(bigger is worse) - __The coefficient modulus sizes(coeff_mod_bit_sizes).__ This parameter is an array of bit sizes and directly affects the size of the ciphertext, the security of the scheme(bigger is worse), and the depth of computation allowed in the encrypted space(longer is better). - __The scaling factor(global_scale).__ This parameter is only used for the approximate schemes(CKKS) and directly affects the precision of computation and decryption. Theory: Homomorphic encryption keys__TenSEAL__ generates a few keys internally, each with another use case: - __The Private Key(or the secret/decryption key)__. This key is used for decrypting ciphertexts, and it is used to derive the other keys. __DO NOT SHARE IT OUTSIDE THE DATA OWNER PROCESS__. - __The Public key(or the encryption key)__. This key is used for encrypting the plain data to a ciphertext. You can safely share it with the Data Scientist. - __The Relinearization Keys(optional)__. This key is used for controlling the quality of the ciphertexts after encrypted multiplications. Generate it only if you are doing encrypted multiplications. You can safely share it with the Data Scientist. - __The Galois Keys(optional)__. This key is needed to perform encrypted vector rotation operations on ciphertexts. Generate it only if you are evaluating convolutions on encrypted data. You can safely share it with the Data Scientist. TenSEAL ContextNow that we had a short introduction, let's get to work.The first step to do for a Data Owner is to generate a security context containing security parameters and encryption keys.
###Code
context = ts.Context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=8192,
coeff_mod_bit_sizes=[60, 40, 40, 60]
)
context.global_scale = 2**40
context
###Output
_____no_output_____
###Markdown
Encrypt the data
###Code
v1 = [0, 1, 2, 3, 4]
v2 = [4, 3, 2, 1, 0]
enc_v1 = ts.ckks_vector(context, v1)
enc_v2 = ts.ckks_vector(context, v2)
(enc_v1, enc_v2)
###Output
_____no_output_____
###Markdown
Make Context and Encrypted Vectors Referenceable over Duet
###Code
# tag them so our partner can easily reference it
ctx_ptr = context.send(duet, searchable=True, tags=["context"])
enc_v1_ptr = enc_v1.send(duet, searchable=True, tags=["enc_v1"])
enc_v2_ptr = enc_v2.send(duet, searchable=True, tags=["enc_v2"])
# we can see that our three objects are now inside the store we control
duet.store.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.
###Code
# We can see our duet partner has requested the two encrypted vectors and the public context
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Approve the requests
###Code
duet.requests[0].accept()
duet.requests[0].accept()
duet.requests[0].accept()
# The requests should have been handled
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 2 : Now STOP and run the Data Scientist notebook until the same checkpoint. Get the computation results from store and decrypt them locally
###Code
# Validate the encrypted add
result_add = duet.store["result_add"].get(delete_obj=False)
result_add.link_context(context)
result_add
decrypted_result = result_add.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted - plain add
result_iadd = duet.store["result_iadd"].get(delete_obj=False)
result_iadd.link_context(context)
decrypted_result = result_iadd.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, [10, 10, 10, 10, 10])]
decrypted_result
# Validate the encrypted subtraction
result_sub = duet.store["result_sub"].get(delete_obj=False)
result_sub.link_context(context)
decrypted_result = result_sub.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 - v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted multiplication
result_mul = duet.store["result_mul"].get(delete_obj=False)
result_mul.link_context(context)
decrypted_result = result_mul.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 * v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted power
result_pow = duet.store["result_pow"].get(delete_obj=False)
result_pow.link_context(context)
decrypted_result = result_pow.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v ** 3 for v in v1]
decrypted_result
# Validate the encrypted negation
result_neg = duet.store["result_neg"].get(delete_obj=False)
result_neg.link_context(context)
decrypted_result = result_neg.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [-v for v in v1]
decrypted_result
# Validate the encrypted polynomial evaluation for 1 + X^2 + X^3
result_poly = duet.store["result_poly"].get(delete_obj=False)
result_poly.link_context(context)
decrypted_result = result_poly.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [1 + v**2 + v**3 for v in v1]
decrypted_result
###Output
_____no_output_____
###Markdown
Homomorphic Encryption using Duet: Data Owner Tutorial 0: Basic operationsWelcome!This tutorial will show you how to use Duet with homomorphic encryption and some use cases. This notebook illustrates the Data Owner view on the operations.We will focus on Duet's integration with [TenSEAL](https://github.com/OpenMined/TenSEAL). TenSEAL is a Python library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes.If you want to learn more about TenSEAL, we recommend the following tutorials:- ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb).- ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb).- ['Tutorial 2: Working with Approximate Numbers'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%202%20-%20Working%20with%20Approximate%20Numbers.ipynb).Let's now start the tutorial with a brief review of what homomorphic encryption is, but keep in mind that you don't need to be a crypto expert to use these features. Homomorphic Encryption__Definition__ : Homomorphic encryption (HE) is a technique that allows computations to be made on ciphertexts and generates results that when decrypted, corresponds to the result of the same computations made on plaintexts.This means that an HE scheme lets you encrypt two numbers *X* and *Y*, add their encrypted versions so that it gets decrypted to *X + Y*, the addition could have been a multiplication as well. SetupAll modules are imported here, make sure everything is installed by running the cell below.
###Code
%%capture
!pip install tenseal==0.3.0a5
import syft as sy
import tenseal as ts
import pytest
sy.load_lib("tenseal")
###Output
_____no_output_____
###Markdown
Start Duet Data Owner instance
###Code
# Start Duet local instance
duet = sy.launch_duet(loopback=True)
###Output
_____no_output_____
###Markdown
Theory: Homomorphic encryption schemes__TenSEAL__ supports two encryption schemes: - __BFV__, a scheme for operations on integers. - __CKKS__, a scheme for operations on approximate numbers. This scheme is much better suited for ML applications and we will focus more on it. There are a few major steps for each scheme: 1. __Keys Generation__: in this step, we generate public and private keys that will be used for encryption/decryption. 2. __Encryption__: this is the process of converting a plaintext into an encrypted ciphertext. This step requires an encryption key(or a public key). 3. __Decryption__: this is the process of converting a ciphertext back into a plaintext. This step requires a decryption key(or a secret key). This step cannot be done on the Data Scientist endpoint. Theory: Homomorphic encryption parameters__TenSEAL__ requires a few parameters to set the keys up: - __The polynomial modulus degree(poly_modulus_degree).__ This parameter directly affects the size of the ciphertext, the security of the scheme(bigger is better), but also the computational performance of the scheme(bigger is worse) - __The coefficient modulus sizes(coeff_mod_bit_sizes).__ This parameter is an array of bit sizes and directly affects the size of the ciphertext, the security of the scheme(bigger is worse), and the depth of computation allowed in the encrypted space(longer is better). - __The scaling factor(global_scale).__ This parameter is only used for the approximate schemes(CKKS) and directly affects the precision of computation and decryption. Theory: Homomorphic encryption keys__TenSEAL__ generates a few keys internally, each with another use case: - __The Private Key(or the secret/decryption key)__. This key is used for decrypting ciphertexts, and it is used to derive the other keys. __DO NOT SHARE IT OUTSIDE THE DATA OWNER PROCESS__. - __The Public key(or the encryption key)__. This key is used for encrypting the plain data to a ciphertext. You can safely share it with the Data Scientist. - __The Relinearization Keys(optional)__. This key is used for controlling the quality of the ciphertexts after encrypted multiplications. Generate it only if you are doing encrypted multiplications. You can safely share it with the Data Scientist. - __The Galois Keys(optional)__. This key is needed to perform encrypted vector rotation operations on ciphertexts. Generate it only if you are evaluating convolutions on encrypted data. You can safely share it with the Data Scientist. TenSEAL ContextNow that we had a short introduction, let's get to work.The first step to do for a Data Owner is to generate a security context containing security parameters and encryption keys.
###Code
context = ts.Context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=8192,
coeff_mod_bit_sizes=[60, 40, 40, 60]
)
context.global_scale = 2**40
context
###Output
_____no_output_____
###Markdown
Encrypt the data
###Code
v1 = [0, 1, 2, 3, 4]
v2 = [4, 3, 2, 1, 0]
enc_v1 = ts.ckks_vector(context, v1)
enc_v2 = ts.ckks_vector(context, v2)
(enc_v1, enc_v2)
###Output
_____no_output_____
###Markdown
Make Context and Encrypted Vectors Referenceable over Duet
###Code
# tag them so our partner can easily reference it
ctx_ptr = context.send(duet, searchable=True, tags=["context"])
enc_v1_ptr = enc_v1.send(duet, searchable=True, tags=["enc_v1"])
enc_v2_ptr = enc_v2.send(duet, searchable=True, tags=["enc_v2"])
# we can see that our three objects are now inside the store we control
duet.store.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 0.1 : Now STOP and run the Data Scientist notebook until the same checkpoint.
###Code
# We can see our duet partner has requested the two encrypted vectors and the public context
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Approve the requests
###Code
duet.requests[0].accept()
duet.requests[0].accept()
duet.requests[0].accept()
# The requests should have been handled
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 0.2 : Now STOP and run the Data Scientist notebook until the same checkpoint. Get the computation results from store and decrypt them locally
###Code
# Validate the encrypted add
result_add = duet.store["result_add"].get(delete_obj=False)
result_add.link_context(context)
result_add
decrypted_result = result_add.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted - plain add
result_iadd = duet.store["result_iadd"].get(delete_obj=False)
result_iadd.link_context(context)
decrypted_result = result_iadd.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, [10, 10, 10, 10, 10])]
decrypted_result
# Validate the encrypted subtraction
result_sub = duet.store["result_sub"].get(delete_obj=False)
result_sub.link_context(context)
decrypted_result = result_sub.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 - v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted multiplication
result_mul = duet.store["result_mul"].get(delete_obj=False)
result_mul.link_context(context)
decrypted_result = result_mul.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 * v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted power
result_pow = duet.store["result_pow"].get(delete_obj=False)
result_pow.link_context(context)
decrypted_result = result_pow.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v ** 3 for v in v1]
decrypted_result
# Validate the encrypted negation
result_neg = duet.store["result_neg"].get(delete_obj=False)
result_neg.link_context(context)
decrypted_result = result_neg.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [-v for v in v1]
decrypted_result
# Validate the encrypted polynomial evaluation for 1 + X^2 + X^3
result_poly = duet.store["result_poly"].get(delete_obj=False)
result_poly.link_context(context)
decrypted_result = result_poly.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [1 + v**2 + v**3 for v in v1]
decrypted_result
###Output
_____no_output_____
###Markdown
Homomorphic Encryption using Duet: Data Owner Tutorial 0: Basic operationsWelcome!This tutorial will show you how to use Duet with homomorphic encryption and some use cases. This notebook illustrates the Data Owner view on the operations.We will focus on Duet's integration with [TenSEAL](https://github.com/OpenMined/TenSEAL). TenSEAL is a Python library for doing homomorphic encryption operations on tensors. It's built on top of [Microsoft SEAL](https://github.com/Microsoft/SEAL), a C++ library implementing the BFV and CKKS homomorphic encryption schemes.If you want to learn more about TenSEAL, we recommend the following tutorials:- ['Tutorial 0 - Getting Started'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%200%20-%20Getting%20Started.ipynb).- ['Tutorial 1: Training and Evaluation of Logistic Regression on Encrypted Data'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%201%20-%20Training%20and%20Evaluation%20of%20Logistic%20Regression%20on%20Encrypted%20Data.ipynb).- ['Tutorial 2: Working with Approximate Numbers'](https://github.com/OpenMined/TenSEAL/blob/master/tutorials/Tutorial%202%20-%20Working%20with%20Approximate%20Numbers.ipynb).Let's now start the tutorial with a brief review of what homomorphic encryption is, but keep in mind that you don't need to be a crypto expert to use these features. Homomorphic Encryption__Definition__ : Homomorphic encryption (HE) is a technique that allows computations to be made on ciphertexts and generates results that when decrypted, corresponds to the result of the same computations made on plaintexts.This means that an HE scheme lets you encrypt two numbers *X* and *Y*, add their encrypted versions so that it gets decrypted to *X + Y*, the addition could have been a multiplication as well. SetupAll modules are imported here, make sure everything is installed by running the cell below.
###Code
import syft as sy
import tenseal as ts
import pytest
sy.load_lib("tenseal")
###Output
_____no_output_____
###Markdown
Start Duet Data Owner instance
###Code
# Start Duet local instance
duet = sy.launch_duet(loopback=True)
###Output
_____no_output_____
###Markdown
Theory: Homomorphic encryption schemes__TenSEAL__ supports two encryption schemes: - __BFV__, a scheme for operations on integers. - __CKKS__, a scheme for operations on approximate numbers. This scheme is much better suited for ML applications and we will focus more on it. There are a few major steps for each scheme: 1. __Keys Generation__: in this step, we generate public and private keys that will be used for encryption/decryption. 2. __Encryption__: this is the process of converting a plaintext into an encrypted ciphertext. This step requires an encryption key(or a public key). 3. __Decryption__: this is the process of converting a ciphertext back into a plaintext. This step requires a decryption key(or a secret key). This step cannot be done on the Data Scientist endpoint. Theory: Homomorphic encryption parameters__TenSEAL__ requires a few parameters to set the keys up: - __The polynomial modulus degree(poly_modulus_degree).__ This parameter directly affects the size of the ciphertext, the security of the scheme(bigger is better), but also the computational performance of the scheme(bigger is worse) - __The coefficient modulus sizes(coeff_mod_bit_sizes).__ This parameter is an array of bit sizes and directly affects the size of the ciphertext, the security of the scheme(bigger is worse), and the depth of computation allowed in the encrypted space(longer is better). - __The scaling factor(global_scale).__ This parameter is only used for the approximate schemes(CKKS) and directly affects the precision of computation and decryption. Theory: Homomorphic encryption keys__TenSEAL__ generates a few keys internally, each with another use case: - __The Private Key(or the secret/decryption key)__. This key is used for decrypting ciphertexts, and it is used to derive the other keys. __DO NOT SHARE IT OUTSIDE THE DATA OWNER PROCESS__. - __The Public key(or the encryption key)__. This key is used for encrypting the plain data to a ciphertext. You can safely share it with the Data Scientist. - __The Relinearization Keys(optional)__. This key is used for controlling the quality of the ciphertexts after encrypted multiplications. Generate it only if you are doing encrypted multiplications. You can safely share it with the Data Scientist. - __The Galois Keys(optional)__. This key is needed to perform encrypted vector rotation operations on ciphertexts. Generate it only if you are evaluating convolutions on encrypted data. You can safely share it with the Data Scientist. TenSEAL ContextNow that we had a short introduction, let's get to work.The first step to do for a Data Owner is to generate a security context containing security parameters and encryption keys.
###Code
context = ts.Context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=8192,
coeff_mod_bit_sizes=[60, 40, 40, 60]
)
context.global_scale = 2**40
context
###Output
_____no_output_____
###Markdown
Encrypt the data
###Code
v1 = [0, 1, 2, 3, 4]
v2 = [4, 3, 2, 1, 0]
enc_v1 = ts.ckks_vector(context, v1)
enc_v2 = ts.ckks_vector(context, v2)
(enc_v1, enc_v2)
###Output
_____no_output_____
###Markdown
Make Context and Encrypted Vectors Referenceable over Duet
###Code
# tag them so our partner can easily reference it
ctx_ptr = context.send(duet, pointable=True, tags=["context"])
enc_v1_ptr = enc_v1.send(duet, pointable=True, tags=["enc_v1"])
enc_v2_ptr = enc_v2.send(duet, pointable=True, tags=["enc_v2"])
# we can see that our three objects are now inside the store we control
duet.store.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.
###Code
# We can see our duet partner has requested the two encrypted vectors and the public context
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Approve the requests
###Code
duet.requests[0].accept()
duet.requests[0].accept()
duet.requests[0].accept()
# The requests should have been handled
duet.requests.pandas
###Output
_____no_output_____
###Markdown
Checkpoint 2 : Now STOP and run the Data Scientist notebook until the same checkpoint. Get the computation results from store and decrypt them locally
###Code
# Validate the encrypted add
result_add = duet.store["result_add"].get(delete_obj=False)
result_add.link_context(context)
result_add
decrypted_result = result_add.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted - plain add
result_iadd = duet.store["result_iadd"].get(delete_obj=False)
result_iadd.link_context(context)
decrypted_result = result_iadd.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 + v2 for v1, v2 in zip(v1, [10, 10, 10, 10, 10])]
decrypted_result
# Validate the encrypted subtraction
result_sub = duet.store["result_sub"].get(delete_obj=False)
result_sub.link_context(context)
decrypted_result = result_sub.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 - v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted multiplication
result_mul = duet.store["result_mul"].get(delete_obj=False)
result_mul.link_context(context)
decrypted_result = result_mul.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v1 * v2 for v1, v2 in zip(v1, v2)]
decrypted_result
# Validate the encrypted power
result_pow = duet.store["result_pow"].get(delete_obj=False)
result_pow.link_context(context)
decrypted_result = result_pow.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [v ** 3 for v in v1]
decrypted_result
# Validate the encrypted negation
result_neg = duet.store["result_neg"].get(delete_obj=False)
result_neg.link_context(context)
decrypted_result = result_neg.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [-v for v in v1]
decrypted_result
# Validate the encrypted polynomial evaluation for 1 + X^2 + X^3
result_poly = duet.store["result_poly"].get(delete_obj=False)
result_poly.link_context(context)
decrypted_result = result_poly.decrypt()
assert pytest.approx(decrypted_result, abs=10**-3) == [1 + v**2 + v**3 for v in v1]
decrypted_result
###Output
_____no_output_____ |
nbs/demo_kde_pyhf.ipynb | ###Markdown
KDE demo, with histosys!> It works :) 
###Code
import time
import jax
import jax.experimental.optimizers as optimizers
import jax.experimental.stax as stax
import jax.random
from jax.random import PRNGKey
import numpy as np
from functools import partial
import pyhf
pyhf.set_backend('jax')
pyhf.default_backend = pyhf.tensor.jax_backend(precision='64b')
from neos import data, makers
from relaxed import infer
rng = PRNGKey(22)
# regression net
init_random_params, predict = stax.serial(
stax.Dense(1024),
stax.Relu,
stax.Dense(1024),
stax.Relu,
stax.Dense(1),
stax.Sigmoid,
)
###Output
_____no_output_____
###Markdown
Compose differentiable workflow
###Code
dgen = data.generate_blobs(rng,blobs=4)
# Specify our hyperparameters ahead of time for the kde histograms
bins = np.linspace(0,1,4)
bandwidth=0.27
reflect_infinite_bins = True
hmaker = makers.hists_from_nn(dgen, predict, hpar_dict = dict(bins=bins,bandwidth=bandwidth),method='kde', reflect_infinities=reflect_infinite_bins)
nnm = makers.histosys_model_from_hists(hmaker)
get_cls = infer.make_hypotest(nnm, solver_kwargs=dict(pdf_transform=True))
# loss returns a list of metrics -- let's just index into one (CLs)
def loss(params, test_mu):
return get_cls(params, test_mu)["CLs"]
###Output
_____no_output_____
###Markdown
Randomly initialise nn weights and check that we can get the gradient of the loss wrt nn params
###Code
_, network = init_random_params(jax.random.PRNGKey(2), (-1, 2))
# gradient wrt nn weights
jax.value_and_grad(loss)(network, test_mu=1.0)
###Output
_____no_output_____
###Markdown
Define training loop!
###Code
opt_init, opt_update, opt_params = optimizers.adam(1e-3)
def train_network(N):
cls_vals = []
_, network = init_random_params(jax.random.PRNGKey(1), (-1, 2))
state = opt_init(network)
losses = []
# parameter update function
# @jax.jit
def update_and_value(i, opt_state, mu):
net = opt_params(opt_state)
value, grad = jax.value_and_grad(loss)(net, mu)
return opt_update(i, grad, state), value, net
for i in range(N):
start_time = time.time()
state, value, network = update_and_value(i, state, 1.0)
epoch_time = time.time() - start_time
losses.append(value)
metrics = {"loss": losses}
yield network, metrics, epoch_time
###Output
_____no_output_____
###Markdown
Plotting helper function for awesome animations :)
###Code
def plot(axarr, network, metrics, maxN):
ax = axarr[0]
g = np.mgrid[-5:5:101j, -5:5:101j]
levels = bins
ax.contourf(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
levels=levels,
cmap="binary",
)
ax.contour(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
colors="w",
levels=levels,
)
sig, bkg_nom, bkg_up, bkg_down = dgen()
ax.scatter(sig[:, 0], sig[:, 1], alpha=0.3, c="C9")
ax.scatter(bkg_up[:, 0], bkg_up[:, 1], alpha=0.1, c="C1", marker=6)
ax.scatter(bkg_down[:, 0], bkg_down[:, 1], alpha=0.1, c="C1", marker=7)
ax.scatter(bkg_nom[:, 0], bkg_nom[:, 1], alpha=0.3, c="C1")
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax = axarr[1]
# ax.axhline(0.05, c="slategray", linestyle="--")
ax.plot(metrics["loss"], c="steelblue", linewidth=2.0)
ax.set_yscale("log")
ax.set_ylim(1e-4, 0.06)
ax.set_xlim(0, maxN)
ax.set_xlabel("epoch")
ax.set_ylabel(r"$CL_s$")
ax = axarr[2]
s, b, bup, bdown = hmaker(network)
bin_width = 1 / (len(bins) - 1)
centers = bins[:-1] + np.diff(bins) / 2.0
ax.bar(centers, b, color="C1", width=bin_width)
ax.bar(centers, s, bottom=b, color="C9", width=bin_width)
bunc = np.asarray([[x, y] if x > y else [y, x] for x, y in zip(bup, bdown)])
plot_unc = []
for unc, be in zip(bunc, b):
if all(unc > be):
plot_unc.append([max(unc), be])
elif all(unc < be):
plot_unc.append([be, min(unc)])
else:
plot_unc.append(unc)
plot_unc = np.asarray(plot_unc)
b_up, b_down = plot_unc[:, 0], plot_unc[:, 1]
ax.bar(centers, bup - b, bottom=b, alpha=0.4, color="red", width=bin_width, hatch="+")
ax.bar(
centers, b - bdown, bottom=bdown, alpha=0.4, color="green", width=bin_width, hatch="-"
)
ax.set_ylim(0, 120)
ax.set_ylabel("frequency")
ax.set_xlabel("nn output")
###Output
_____no_output_____
###Markdown
Let's run it!!
###Code
# slow
import numpy as np
from IPython.display import HTML
from matplotlib import pyplot as plt
plt.rcParams.update(
{
"axes.labelsize": 13,
"axes.linewidth": 1.2,
"xtick.labelsize": 13,
"ytick.labelsize": 13,
"figure.figsize": [13.0, 4.0],
"font.size": 13,
"xtick.major.size": 3,
"ytick.major.size": 3,
"legend.fontsize": 11,
}
)
fig, axarr = plt.subplots(1, 3, dpi=120)
maxN = 500 # make me bigger for better results!
animate = True # animations fail tests...
if animate:
from celluloid import Camera
camera = Camera(fig)
# Training
for i, (network, metrics, epoch_time) in enumerate(train_network(maxN)):
# print(f"epoch {i}:", f'CLs = {metrics["loss"][-1]}, took {epoch_time}s')
if animate:
plot(axarr, network, metrics, maxN=maxN)
plt.tight_layout()
camera.snap()
# if i % 10 == 0:
# camera.animate().save("animation.gif", writer="imagemagick", fps=8)
# HTML(camera.animate().to_html5_video())
# break
if animate:
camera.animate().save("animationinfesoft.gif", writer="imagemagick", fps=15)
###Output
_____no_output_____
###Markdown
KDE demo, with histosys!> It works :) 
###Code
import time
import jax
import jax.experimental.optimizers as optimizers
import jax.experimental.stax as stax
import jax.random
from jax.random import PRNGKey
import numpy as np
from functools import partial
import pyhf
pyhf.set_backend('jax')
pyhf.default_backend = pyhf.tensor.jax_backend(precision='64b')
from neos import data, makers
from relaxed import infer
rng = PRNGKey(22)
# regression net
init_random_params, predict = stax.serial(
stax.Dense(1024),
stax.Relu,
stax.Dense(1024),
stax.Relu,
stax.Dense(1),
stax.Sigmoid,
)
###Output
_____no_output_____
###Markdown
Compose differentiable workflow
###Code
dgen = data.generate_blobs(rng,blobs=4)
# Specify our hyperparameters ahead of time for the kde histograms
bins = np.linspace(0,1,4)
bandwidth=0.27
reflect_infinite_bins = True
hmaker = makers.hists_from_nn(dgen, predict, hpar_dict = dict(bins=bins,bandwidth=bandwidth),method='kde', reflect_infinities=reflect_infinite_bins)
nnm = makers.histosys_model_from_hists(hmaker)
get_cls = infer.make_hypotest(nnm, solver_kwargs=dict(pdf_transform=True))
# loss returns a list of metrics -- let's just index into one (CLs)
def loss(params, test_mu):
return get_cls(params, test_mu)["CLs"]
###Output
_____no_output_____
###Markdown
Randomly initialise nn weights and check that we can get the gradient of the loss wrt nn params
###Code
_, network = init_random_params(jax.random.PRNGKey(2), (-1, 2))
# gradient wrt nn weights
jax.value_and_grad(loss)(network, test_mu=1.0)
###Output
_____no_output_____
###Markdown
Define training loop!
###Code
opt_init, opt_update, opt_params = optimizers.adam(1e-3)
def train_network(N):
cls_vals = []
_, network = init_random_params(jax.random.PRNGKey(1), (-1, 2))
state = opt_init(network)
losses = []
# parameter update function
# @jax.jit
def update_and_value(i, opt_state, mu):
net = opt_params(opt_state)
value, grad = jax.value_and_grad(loss)(net, mu)
return opt_update(i, grad, state), value, net
for i in range(N):
start_time = time.time()
state, value, network = update_and_value(i, state, 1.0)
epoch_time = time.time() - start_time
losses.append(value)
metrics = {"loss": losses}
yield network, metrics, epoch_time
###Output
_____no_output_____
###Markdown
Plotting helper function for awesome animations :)
###Code
def plot(axarr, network, metrics, maxN):
ax = axarr[0]
g = np.mgrid[-5:5:101j, -5:5:101j]
levels = bins
ax.contourf(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
levels=levels,
cmap="binary",
)
ax.contour(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
colors="w",
levels=levels,
)
sig, bkg_nom, bkg_up, bkg_down = dgen()
ax.scatter(sig[:, 0], sig[:, 1], alpha=0.3, c="C9")
ax.scatter(bkg_up[:, 0], bkg_up[:, 1], alpha=0.1, c="C1", marker=6)
ax.scatter(bkg_down[:, 0], bkg_down[:, 1], alpha=0.1, c="C1", marker=7)
ax.scatter(bkg_nom[:, 0], bkg_nom[:, 1], alpha=0.3, c="C1")
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax = axarr[1]
# ax.axhline(0.05, c="slategray", linestyle="--")
ax.plot(metrics["loss"], c="steelblue", linewidth=2.0)
ax.set_yscale("log")
ax.set_ylim(1e-4, 0.06)
ax.set_xlim(0, maxN)
ax.set_xlabel("epoch")
ax.set_ylabel(r"$CL_s$")
ax = axarr[2]
s, b, bup, bdown = hmaker(network)
bin_width = 1 / (len(bins) - 1)
centers = bins[:-1] + np.diff(bins) / 2.0
ax.bar(centers, b, color="C1", width=bin_width)
ax.bar(centers, s, bottom=b, color="C9", width=bin_width)
bunc = np.asarray([[x, y] if x > y else [y, x] for x, y in zip(bup, bdown)])
plot_unc = []
for unc, be in zip(bunc, b):
if all(unc > be):
plot_unc.append([max(unc), be])
elif all(unc < be):
plot_unc.append([be, min(unc)])
else:
plot_unc.append(unc)
plot_unc = np.asarray(plot_unc)
b_up, b_down = plot_unc[:, 0], plot_unc[:, 1]
ax.bar(centers, bup - b, bottom=b, alpha=0.4, color="red", width=bin_width, hatch="+")
ax.bar(
centers, b - bdown, bottom=bdown, alpha=0.4, color="green", width=bin_width, hatch="-"
)
ax.set_ylim(0, 120)
ax.set_ylabel("frequency")
ax.set_xlabel("nn output")
###Output
_____no_output_____
###Markdown
Let's run it!!
###Code
# slow
import numpy as np
from IPython.display import HTML
from matplotlib import pyplot as plt
plt.rcParams.update(
{
"axes.labelsize": 13,
"axes.linewidth": 1.2,
"xtick.labelsize": 13,
"ytick.labelsize": 13,
"figure.figsize": [13.0, 4.0],
"font.size": 13,
"xtick.major.size": 3,
"ytick.major.size": 3,
"legend.fontsize": 11,
}
)
fig, axarr = plt.subplots(1, 3, dpi=120)
maxN = 500 # make me bigger for better results!
animate = True # animations fail tests...
if animate:
from celluloid import Camera
camera = Camera(fig)
# Training
for i, (network, metrics, epoch_time) in enumerate(train_network(maxN)):
# print(f"epoch {i}:", f'CLs = {metrics["loss"][-1]}, took {epoch_time}s')
if animate:
plot(axarr, network, metrics, maxN=maxN)
plt.tight_layout()
camera.snap()
# if i % 10 == 0:
# camera.animate().save("animation.gif", writer="imagemagick", fps=8)
# HTML(camera.animate().to_html5_video())
# break
if animate:
camera.animate().save("animationinfesoft.gif", writer="imagemagick", fps=15)
###Output
_____no_output_____
###Markdown
KDE demo, with histosys!> It works :) 
###Code
import time
import jax
import jax.experimental.optimizers as optimizers
import jax.experimental.stax as stax
import jax.random
from jax.random import PRNGKey
import numpy as np
from functools import partial
import pyhf
pyhf.set_backend("jax")
pyhf.default_backend = pyhf.tensor.jax_backend(precision="64b")
from neos import data, makers
from relaxed import infer
rng = PRNGKey(22)
# regression net
init_random_params, predict = stax.serial(
stax.Dense(1024),
stax.Relu,
stax.Dense(1024),
stax.Relu,
stax.Dense(1),
stax.Sigmoid,
)
###Output
_____no_output_____
###Markdown
Compose differentiable workflow
###Code
dgen = data.generate_blobs(rng, blobs=4)
# Specify our hyperparameters ahead of time for the kde histograms
bins = np.linspace(0, 1, 4)
bandwidth = 0.27
reflect_infinite_bins = True
hmaker = makers.hists_from_nn(
dgen,
predict,
hpar_dict=dict(bins=bins, bandwidth=bandwidth),
method="kde",
reflect_infinities=reflect_infinite_bins,
)
nnm = makers.histosys_model_from_hists(hmaker)
get_cls = infer.make_hypotest(nnm, solver_kwargs=dict(pdf_transform=True))
# loss returns a list of metrics -- let's just index into one (CLs)
def loss(params, test_mu):
return get_cls(params, test_mu)["CLs"]
###Output
_____no_output_____
###Markdown
Randomly initialise nn weights and check that we can get the gradient of the loss wrt nn params
###Code
_, network = init_random_params(jax.random.PRNGKey(2), (-1, 2))
# gradient wrt nn weights
jax.value_and_grad(loss)(network, test_mu=1.0)
###Output
_____no_output_____
###Markdown
Define training loop!
###Code
opt_init, opt_update, opt_params = optimizers.adam(1e-3)
def train_network(N):
cls_vals = []
_, network = init_random_params(jax.random.PRNGKey(1), (-1, 2))
state = opt_init(network)
losses = []
# parameter update function
# @jax.jit
def update_and_value(i, opt_state, mu):
net = opt_params(opt_state)
value, grad = jax.value_and_grad(loss)(net, mu)
return opt_update(i, grad, state), value, net
for i in range(N):
start_time = time.time()
state, value, network = update_and_value(i, state, 1.0)
epoch_time = time.time() - start_time
losses.append(value)
metrics = {"loss": losses}
yield network, metrics, epoch_time
###Output
_____no_output_____
###Markdown
Plotting helper function for awesome animations :)
###Code
def plot(axarr, network, metrics, maxN):
ax = axarr[0]
g = np.mgrid[-5:5:101j, -5:5:101j]
levels = bins
ax.contourf(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
levels=levels,
cmap="binary",
)
ax.contour(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
colors="w",
levels=levels,
)
sig, bkg_nom, bkg_up, bkg_down = dgen()
ax.scatter(sig[:, 0], sig[:, 1], alpha=0.3, c="C9")
ax.scatter(bkg_up[:, 0], bkg_up[:, 1], alpha=0.1, c="C1", marker=6)
ax.scatter(bkg_down[:, 0], bkg_down[:, 1], alpha=0.1, c="C1", marker=7)
ax.scatter(bkg_nom[:, 0], bkg_nom[:, 1], alpha=0.3, c="C1")
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax = axarr[1]
# ax.axhline(0.05, c="slategray", linestyle="--")
ax.plot(metrics["loss"], c="steelblue", linewidth=2.0)
ax.set_yscale("log")
ax.set_ylim(1e-4, 0.06)
ax.set_xlim(0, maxN)
ax.set_xlabel("epoch")
ax.set_ylabel(r"$CL_s$")
ax = axarr[2]
s, b, bup, bdown = hmaker(network)
bin_width = 1 / (len(bins) - 1)
centers = bins[:-1] + np.diff(bins) / 2.0
ax.bar(centers, b, color="C1", width=bin_width)
ax.bar(centers, s, bottom=b, color="C9", width=bin_width)
bunc = np.asarray([[x, y] if x > y else [y, x] for x, y in zip(bup, bdown)])
plot_unc = []
for unc, be in zip(bunc, b):
if all(unc > be):
plot_unc.append([max(unc), be])
elif all(unc < be):
plot_unc.append([be, min(unc)])
else:
plot_unc.append(unc)
plot_unc = np.asarray(plot_unc)
b_up, b_down = plot_unc[:, 0], plot_unc[:, 1]
ax.bar(
centers, bup - b, bottom=b, alpha=0.4, color="red", width=bin_width, hatch="+"
)
ax.bar(
centers,
b - bdown,
bottom=bdown,
alpha=0.4,
color="green",
width=bin_width,
hatch="-",
)
ax.set_ylim(0, 120)
ax.set_ylabel("frequency")
ax.set_xlabel("nn output")
###Output
_____no_output_____
###Markdown
Let's run it!!
###Code
# slow
import numpy as np
from IPython.display import HTML
from matplotlib import pyplot as plt
plt.rcParams.update(
{
"axes.labelsize": 13,
"axes.linewidth": 1.2,
"xtick.labelsize": 13,
"ytick.labelsize": 13,
"figure.figsize": [13.0, 4.0],
"font.size": 13,
"xtick.major.size": 3,
"ytick.major.size": 3,
"legend.fontsize": 11,
}
)
fig, axarr = plt.subplots(1, 3, dpi=120)
maxN = 500 # make me bigger for better results!
animate = True # animations fail tests...
if animate:
from celluloid import Camera
camera = Camera(fig)
# Training
for i, (network, metrics, epoch_time) in enumerate(train_network(maxN)):
# print(f"epoch {i}:", f'CLs = {metrics["loss"][-1]}, took {epoch_time}s')
if animate:
plot(axarr, network, metrics, maxN=maxN)
plt.tight_layout()
camera.snap()
# if i % 10 == 0:
# camera.animate().save("animation.gif", writer="imagemagick", fps=8)
# HTML(camera.animate().to_html5_video())
# break
if animate:
camera.animate().save("animationinfesoft.gif", writer="imagemagick", fps=15)
###Output
_____no_output_____
###Markdown
KDE demo, with histosys!> It works :)  This depends on a *very* experimental fork of pyhf, install it by running the cell below:
###Code
!python -m pip install git+https://github.com/phinate/pyhf.git@diffable_json
# import jax
import neos.makers as makers
import neos.cls as cls
import numpy as np
import jax.experimental.stax as stax
import jax.experimental.optimizers as optimizers
import jax.random
import time
import pyhf
pyhf.set_backend(pyhf.tensor.jax_backend())
# regression net
init_random_params, predict = stax.serial(
stax.Dense(1024),
stax.Relu,
stax.Dense(1024),
stax.Relu,
stax.Dense(1),
stax.Sigmoid
)
###Output
_____no_output_____
###Markdown
Compose differentiable workflow
###Code
# choose hyperparams
bins = np.linspace(0,1,4)
centers = bins[:-1] + np.diff(bins)/2.
bandwidth = 0.8 * 1/(len(bins)-1)
# compose functions from neos to define workflow
hmaker = makers.kde_bins_from_nn_histosys(predict,bins=bins,bandwidth=bandwidth)
nnm = makers.nn_histosys(hmaker)
loss = cls.cls_maker(nnm, solver_kwargs=dict(pdf_transform=True))
bandwidth # print bw
###Output
_____no_output_____
###Markdown
Randomly initialise nn weights and check that we can get the gradient of the loss wrt nn params
###Code
_, network = init_random_params(jax.random.PRNGKey(13), (-1, 2))
jax.grad(loss)(network, 1.0)
###Output
_____no_output_____
###Markdown
Define training loop!
###Code
#jit_loss = jax.jit(loss)
opt_init, opt_update, opt_params = optimizers.adam(1e-3)
@jax.jit
def update_and_value(i, opt_state, mu):
net = opt_params(opt_state)
value, grad = jax.value_and_grad(loss)(net, mu)
return opt_update(i, grad, state), value, net
def train_network(N):
cls_vals = []
_, network = init_random_params(jax.random.PRNGKey(1), (-1, 2))
state = opt_init(network)
losses = []
# parameter update function
#@jax.jit
def update_and_value(i, opt_state, mu):
net = opt_params(opt_state)
value, grad = jax.value_and_grad(loss)(net, mu)
return opt_update(i, grad, state), value, net
for i in range(N):
start_time = time.time()
state, value, network = update_and_value(i,state,1.0)
epoch_time = time.time() - start_time
losses.append(value)
metrics = {"loss": losses}
yield network, metrics, epoch_time
###Output
_____no_output_____
###Markdown
Plotting helper function for awesome animations :)
###Code
def plot(axarr, network, metrics, hm, maxN):
ax = axarr[0]
g = np.mgrid[-5:5:101j, -5:5:101j]
levels = bins
ax.contourf(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
levels=levels,
cmap="binary",
)
ax.contour(
g[0],
g[1],
predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 1)[:, :, 0],
colors="w",
levels=levels,
)
ax.scatter(hm.sig[:, 0], hm.sig[:, 1], alpha=0.3, c="C9")
ax.scatter(hm.bkg_up[:, 0], hm.bkg_up[:, 1], alpha=0.1, c="C1", marker = 6)
ax.scatter(hm.bkg_down[:, 0], hm.bkg_down[:, 1], alpha=0.1, c="C1", marker = 7)
ax.scatter(hm.bkg_nom[:, 0], hm.bkg_nom[:, 1], alpha=0.3, c="C1")
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax = axarr[1]
ax.axhline(0.05, c="slategray", linestyle="--")
ax.plot(metrics["loss"], c="steelblue", linewidth=2.0)
ax.set_ylim(0, 0.15)
ax.set_xlim(0, maxN)
ax.set_xlabel("epoch")
ax.set_ylabel(r"$CL_s$")
ax = axarr[2]
s, b, bup, bdown = hm(network)
bin_width = 1/(len(bins)-1)
ax.bar(centers, b, color="C1", width=bin_width)
ax.bar(centers, s, bottom=b, color="C9", width=bin_width)
bunc = np.asarray([[x,y] if x>y else [y,x] for x,y in zip(bup,bdown)])
plot_unc = []
for unc, be in zip(bunc,b):
if all(unc > be):
plot_unc.append([max(unc),be])
elif all(unc < be):
plot_unc.append([be, min(unc)])
else:
plot_unc.append(unc)
plot_unc = np.asarray(plot_unc)
b_up, b_down = plot_unc[:,0], plot_unc[:,1]
ax.bar(centers, b_up-b, bottom=b, alpha=0.4, color="black",width=bin_width)
ax.bar(centers, b-b_down, bottom=b_down, alpha=0.4, color="black", width=bin_width)
ax.set_ylim(0, 60)
ax.set_ylabel("frequency")
ax.set_xlabel("nn output")
###Output
_____no_output_____
###Markdown
Install celluloid to create animations if you haven't already by running this next cell:
###Code
!python -m pip install celluloid
###Output
Requirement already satisfied: celluloid in /Users/phinate/envs/neos/lib/python3.7/site-packages/celluloid-0.2.0-py3.7.egg (0.2.0)
Requirement already satisfied: matplotlib in /Users/phinate/envs/neos/lib/python3.7/site-packages/matplotlib-3.2.0-py3.7-macosx-10.15-x86_64.egg (from celluloid) (3.2.0)
Requirement already satisfied: cycler>=0.10 in /Users/phinate/envs/neos/lib/python3.7/site-packages (from matplotlib->celluloid) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /Users/phinate/envs/neos/lib/python3.7/site-packages (from matplotlib->celluloid) (1.1.0)
Requirement already satisfied: numpy>=1.11 in /Users/phinate/envs/neos/lib/python3.7/site-packages/numpy-1.18.1-py3.7-macosx-10.15-x86_64.egg (from matplotlib->celluloid) (1.18.1)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /Users/phinate/envs/neos/lib/python3.7/site-packages (from matplotlib->celluloid) (2.4.6)
Requirement already satisfied: python-dateutil>=2.1 in /Users/phinate/envs/neos/lib/python3.7/site-packages (from matplotlib->celluloid) (2.8.1)
Requirement already satisfied: six in /Users/phinate/envs/neos/lib/python3.7/site-packages (from cycler>=0.10->matplotlib->celluloid) (1.14.0)
Requirement already satisfied: setuptools in /Users/phinate/envs/neos/lib/python3.7/site-packages (from kiwisolver>=1.0.1->matplotlib->celluloid) (41.2.0)
###Markdown
Let's run it!!
###Code
#slow
import numpy as np
from matplotlib import pyplot as plt
from IPython.display import HTML
plt.rcParams.update(
{
"axes.labelsize": 13,
"axes.linewidth": 1.2,
"xtick.labelsize": 13,
"ytick.labelsize": 13,
"figure.figsize": [13., 4.0],
"font.size": 13,
"xtick.major.size": 3,
"ytick.major.size": 3,
"legend.fontsize": 11,
}
)
fig, axarr = plt.subplots(1, 3, dpi=120)
maxN = 50 # make me bigger for better results!
animate = False # animations fail tests...
if animate:
from celluloid import Camera
camera = Camera(fig)
# Training
for i, (network, metrics, epoch_time) in enumerate(train_network(maxN)):
print(f"epoch {i}:", f'CLs = {metrics["loss"][-1]}, took {epoch_time}s')
if animate:
plot(axarr, network, metrics, nnm.hm, maxN=maxN)
plt.tight_layout()
camera.snap()
if i % 10 == 0:
camera.animate().save("animation.gif", writer="imagemagick", fps=8)
#HTML(camera.animate().to_html5_video())
if animate:
camera.animate().save("animation.gif", writer="imagemagick", fps=8)
###Output
epoch 0: CLs = 0.06838154482072967, took 1.60487699508667s
epoch 1: CLs = 0.03329781502172735, took 2.0166208744049072s
epoch 2: CLs = 0.015110551170142816, took 2.0455799102783203s
epoch 3: CLs = 0.008083692178301183, took 2.0034470558166504s
epoch 4: CLs = 0.0049504634741244224, took 2.0266478061676025s
epoch 5: CLs = 0.003409642162911286, took 2.073227882385254s
epoch 6: CLs = 0.0025668480224632084, took 2.056434154510498s
epoch 7: CLs = 0.0020664966892849357, took 1.9783759117126465s
epoch 8: CLs = 0.001795572196163553, took 1.257340908050537s
epoch 9: CLs = 0.001556316101668731, took 1.2828562259674072s
epoch 10: CLs = 0.0013830857487930892, took 1.9847660064697266s
epoch 11: CLs = 0.0012744173832954786, took 2.057671070098877s
epoch 12: CLs = 0.0011956050063779422, took 1.9829719066619873s
epoch 13: CLs = 0.0011379882817992293, took 1.9814558029174805s
epoch 14: CLs = 0.0010955022703085238, took 2.0655109882354736s
epoch 15: CLs = 0.001063696054614427, took 1.9597313404083252s
epoch 16: CLs = 0.0010385283826592762, took 1.9722530841827393s
epoch 17: CLs = 0.0010167467255659535, took 2.054440975189209s
epoch 18: CLs = 0.000995951081921742, took 1.9948761463165283s
epoch 19: CLs = 0.0009745685857311948, took 2.0465872287750244s
epoch 20: CLs = 0.000952238973602304, took 1.9752001762390137s
epoch 21: CLs = 0.0009294113561868489, took 2.0399250984191895s
epoch 22: CLs = 0.0009069997853758949, took 2.068655014038086s
epoch 23: CLs = 0.0008858131774971412, took 1.9675908088684082s
epoch 24: CLs = 0.0008665640563871868, took 1.986894130706787s
epoch 25: CLs = 0.000849618963856047, took 2.0341649055480957s
epoch 26: CLs = 0.0008350320433831993, took 1.9940309524536133s
epoch 27: CLs = 0.0008225663879890543, took 2.0009779930114746s
epoch 28: CLs = 0.0008118405770869419, took 2.0324268341064453s
epoch 29: CLs = 0.000802443073251391, took 2.0389211177825928s
epoch 30: CLs = 0.0007939733262334325, took 1.9717748165130615s
epoch 31: CLs = 0.0007860802796573196, took 2.0695040225982666s
epoch 32: CLs = 0.0007784894056508396, took 2.0261731147766113s
epoch 33: CLs = 0.0007710721958871236, took 2.0273730754852295s
epoch 34: CLs = 0.0007637933706159394, took 2.129802942276001s
epoch 35: CLs = 0.0007567171962739039, took 1.9799671173095703s
epoch 36: CLs = 0.0007499962734278665, took 1.978816032409668s
epoch 37: CLs = 0.0007437523385873668, took 1.995072841644287s
epoch 38: CLs = 0.0007380663432652312, took 2.0848560333251953s
epoch 39: CLs = 0.0007329783541036861, took 1.9978752136230469s
epoch 40: CLs = 0.0007285053769783278, took 1.975020170211792s
epoch 41: CLs = 0.0007245641147743953, took 2.090196132659912s
epoch 42: CLs = 0.0007209943152786114, took 1.9991321563720703s
epoch 43: CLs = 0.0007176400295669794, took 1.9755010604858398s
epoch 44: CLs = 0.0007143650257970258, took 2.0589888095855713s
epoch 45: CLs = 0.0007110598235580134, took 2.0370700359344482s
epoch 46: CLs = 0.000707657986960486, took 1.9755001068115234s
epoch 47: CLs = 0.0007041661610163175, took 2.080190896987915s
epoch 48: CLs = 0.0007006291471818304, took 1.986905813217163s
epoch 49: CLs = 0.0006971251819183344, took 1.9537162780761719s
|
notebooks/asian_barrier_option/deep_learning_option_2.ipynb | ###Markdown
Deep Learning Model for Asian Barrier OptionsAs shown in the previous notebook, there are a few problems to generate data on the fly 1. There is no model serialization so the trained model is not saved 2. There is no validation dataset to check the training progress 3. Most of the time is spent on Monte Carlo simulation hence the training is slow 4. We use a few paths(1024) for each option parameter set which is noise and the model cannot converge to a low cost value.The solution is to save the Monte Carlo simulation data on the disk. This allows us to 1. Reuse the same dataset for different models and save the Monte Carlo simulation time 2. Generate more accurate pricing data by increasing the number of paths We will use CuPy to run the Monte Carlo simulation as it is the most efficient way. Taking the same OptionDataSet defined in the previous notebook:-
###Code
from cupy_dataset import OptionDataSet
###Output
_____no_output_____
###Markdown
Making the directories for the saved data files and the model check points:-
###Code
!mkdir -p datafiles
!mkdir -p check_points
###Output
_____no_output_____
###Markdown
Defining a function to generate the dataset file:-
###Code
import torch
def gen_data(n_files = 630, options_per_file = 10000, seed=3):
counter = 0
ds = OptionDataSet(max_len=n_files * options_per_file, number_path=8192000, batch=1,
seed=seed)
x = []
y = []
for i in ds:
if counter!=0 and counter % options_per_file == 0:
filename = 'datafiles/'+str(seed) + '_' + str(counter//options_per_file) + '.pth'
state = (torch.cat(x, 0), torch.cat(y, 0))
torch.save(state, filename)
x = []
y = []
x.append(i[0].cpu())
y.append(i[1].cpu())
counter += 1
return seed
###Output
_____no_output_____
###Markdown
It will generate files that contain `X` and `Y` matrix of size `option_per_file` and the filenames are in the format of `seed_group.pth`, we can test run with `n_files` = 5 and `options_per_file` = 16
###Code
gen_data(n_files=5, options_per_file = 16, seed=3)
X, Y = torch.load('datafiles/3_1.pth')
print(X)
print(Y)
###Output
tensor([[1.7910e+02, 6.8079e+01, 1.0688e+02, 2.5889e-01, 1.7393e-01, 1.4359e-01],
[1.3597e+02, 5.8014e+01, 1.0772e+02, 1.1119e-01, 1.1278e-01, 3.3107e-03],
[4.7951e+01, 3.6957e+01, 8.0480e+01, 2.6536e-01, 5.3653e-02, 7.2782e-02],
[1.0026e+02, 8.1533e+00, 6.6216e+01, 3.8491e-02, 5.5396e-02, 1.4566e-01],
[1.0416e+02, 7.9586e+01, 1.0620e+02, 1.2557e-01, 1.9639e-02, 3.0966e-02],
[1.6851e+02, 9.7813e+01, 1.2468e+02, 1.1845e-01, 7.9473e-02, 1.0369e-01],
[1.6673e+02, 7.4595e+01, 6.4872e+01, 3.8445e-01, 4.0116e-02, 1.5097e-01],
[3.2400e+01, 1.4736e+01, 9.4934e+01, 2.5872e-01, 6.7174e-02, 1.0737e-01],
[1.2953e+02, 8.5337e+01, 1.2570e+02, 1.6452e-01, 7.1083e-02, 1.9993e-01],
[1.5920e+02, 1.3722e+02, 6.4502e+01, 3.5891e-01, 1.5036e-01, 1.8909e-01],
[4.7439e+00, 6.8898e-01, 1.7892e+01, 1.6206e-02, 1.1772e-01, 1.1536e-01],
[1.4590e+02, 5.5645e+00, 9.4114e+00, 9.8751e-02, 7.2455e-03, 1.2266e-01],
[1.0537e+02, 4.6149e+01, 7.2182e+01, 2.0814e-01, 1.5636e-02, 4.7667e-02],
[1.9498e+02, 1.4687e+02, 5.9092e+01, 5.9770e-02, 4.7395e-02, 8.9560e-02],
[5.4070e+00, 4.4146e+00, 1.3971e+02, 3.4593e-01, 1.8324e-01, 1.3890e-01],
[6.1022e+01, 3.5528e+01, 3.8339e+01, 1.4686e-01, 1.2386e-01, 1.2188e-01]])
tensor([2.1621e-02, 1.0037e-02, 3.2299e+01, 0.0000e+00, 4.7080e+00, 2.7595e-04,
0.0000e+00, 5.9109e+01, 4.3838e+00, 0.0000e+00, 1.2694e+01, 0.0000e+00,
4.3242e-03, 0.0000e+00, 1.2877e+02, 2.6165e-06])
###Markdown
We will use DASK to generate dataset on multipe GPUs in this notebook
###Code
import dask
import dask_cudf
from dask.delayed import delayed
from dask_cuda import LocalCUDACluster
cluster = LocalCUDACluster()
from dask.distributed import Client
client = Client(cluster)
client
###Output
_____no_output_____
###Markdown
Following code is an example that generates `100x5x16` data points on 4 GPUs. For serious Deep Learning model training, we need millions of data points. You can try to change `n_files` and `options_per_file` to larger numbers
###Code
futures = []
for i in range(0, 100):
future = client.submit(gen_data, 5, 16, i)
futures.append(future)
results = client.gather(futures)
results
###Output
_____no_output_____
###Markdown
Once millions of data points are generated, we can combine the data points together and split them into training and validation datasets.
###Code
import pathlib
files = list(pathlib.Path('datafiles/').glob('*.pth'))
trn_size = int(len(files)*0.7)
trn_files = files[:trn_size]
val_files = files[trn_size:]
trn_x = []
trn_y = []
count = 0
for i in trn_files:
tensor = torch.load(i)
if count % 10 == 0:
print(count,'/',len(trn_files))
trn_x.append(tensor[0])
trn_y.append(tensor[1])
count += 1
X = torch.cat(trn_x)
Y = torch.cat(trn_y)
torch.save((X,Y), 'trn.pth')
val_x = []
val_y = []
count = 0
for i in val_files:
tensor = torch.load(i)
if count % 10 == 0:
print(count,'/',len(val_files))
val_x.append(tensor[0])
val_y.append(tensor[1])
count += 1
X = torch.cat(val_x)
Y = torch.cat(val_y)
torch.save((X,Y), 'val.pth')
###Output
0 / 350
10 / 350
20 / 350
30 / 350
40 / 350
50 / 350
60 / 350
70 / 350
80 / 350
90 / 350
100 / 350
110 / 350
120 / 350
130 / 350
140 / 350
150 / 350
160 / 350
170 / 350
180 / 350
190 / 350
200 / 350
210 / 350
220 / 350
230 / 350
240 / 350
250 / 350
260 / 350
270 / 350
280 / 350
290 / 350
300 / 350
310 / 350
320 / 350
330 / 350
340 / 350
0 / 150
10 / 150
20 / 150
30 / 150
40 / 150
50 / 150
60 / 150
70 / 150
80 / 150
90 / 150
100 / 150
110 / 150
120 / 150
130 / 150
140 / 150
###Markdown
We created two data files `trn.pth` and `val.pth` for training and validation. We can define a new PyTorch Dataset to load data from file and write it to file. This dataset takes rank and world_size arguments for distributed training. It loads the whole dataset into the GPU memory and samples the data points according to the rank id so that dataset of different rank_id gives different data.
###Code
%%writefile filedataset.py
import torch
class OptionDataSet(torch.utils.data.Dataset):
def __init__(self, filename, rank=0, world_size=5):
tensor = torch.load(filename)
self.tensor = (tensor[0].cuda(), tensor[1].cuda())
self.length = len(self.tensor[0]) // world_size
self.world_size = world_size
self.rank = rank
def __getitem__(self, index):
index = index * self.world_size + self.rank
return self.tensor[0][index], self.tensor[1][index]
def __len__(self):
return self.length
###Output
Writing filedataset.py
###Markdown
When training the deep learning models, one effective way to prevent over-fitting is to have separate validation dataset to monitor the out of sample performance. When the validation dataset performance declines, it means over-fitting is happening so we can stop the training. We put everything together into one script that can train the model efficiently in multiple GPUs:
###Code
%%writefile distributed_training.py
import torch
from ignite.engine import Engine, Events
from torch.nn import MSELoss
from ignite.contrib.handlers.param_scheduler import CosineAnnealingScheduler
from apex import amp
import argparse
import os
from apex.parallel import DistributedDataParallel
import apex
from apex.optimizers import FusedLAMB
from model import Net
from filedataset import OptionDataSet
from ignite.metrics import MeanAbsoluteError
import ignite
import shutil
import torch.distributed as dist
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--path", default=None)
parser.add_argument("--mae_improv_tol", default=0.002, type=float)
args = parser.parse_args()
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
torch.backends.cudnn.benchmark = True
trn_dataset = OptionDataSet(filename='./trn.pth',
rank=dist.get_rank(),
world_size=int(os.environ['WORLD_SIZE']))
trn_dataset = torch.utils.data.DataLoader(trn_dataset,
batch_size=1024,
shuffle=True,
num_workers=0)
val_dataset = OptionDataSet(filename='./val.pth',
rank=dist.get_rank(),
world_size=int(os.environ['WORLD_SIZE']))
val_dataset = torch.utils.data.DataLoader(val_dataset,
batch_size=1024,
shuffle=False,
num_workers=0)
model = Net().cuda()
optimizer = FusedLAMB(model.parameters(), lr=1e-3)
loss_fn = MSELoss()
model = apex.parallel.convert_syncbn_model(model, channel_last=True)
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
best_mae = 100000
if args.path is not None:
def resume():
global best_mae
checkpoint = torch.load(args.path)
best_mae = checkpoint['best_mae']
model.load_state_dict(checkpoint['state_dict'])
amp.load_state_dict(checkpoint['amp'])
optimizer.load_state_dict(checkpoint['optimizer'])
resume()
if args.distributed:
model = DistributedDataParallel(model)
def train_update(engine, batch):
model.train()
optimizer.zero_grad()
x = batch[0]
y = batch[1]
y_pred = model(x)
loss = loss_fn(y, y_pred[:, 0])
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(train_update)
log_interval = 500
scheduler = CosineAnnealingScheduler(optimizer, 'lr', 1e-5, 5e-6,
len(trn_dataset),
start_value_mult=0.999, end_value_mult=0.999,
save_history=False
)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'check_points/model_best.pth.tar')
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(trn_dataset) + 1
if iter % log_interval == 0:
print('loss', engine.state.output, 'iter', engine.state.iteration,
'lr', scheduler.get_param())
metric = MeanAbsoluteError()
loss_m = ignite.metrics.Loss(loss_fn)
# run eval at one process only
def eval_update(engine, batch):
model.eval()
x = batch[0]
y = batch[1]
y_pred = model(x)
return y, y_pred[:, 0]
evaluator = Engine(eval_update)
metric.attach(evaluator, "MAE")
loss_m.attach(evaluator, "loss")
@trainer.on(Events.EPOCH_COMPLETED)
def log_evalnumber(engine):
global best_mae
mae_improv_tol = args.mae_improv_tol # default 0.002 or 0.2% improvement
evaluator.run(val_dataset, max_epochs=1)
metrics = evaluator.state.metrics
average_tensor = torch.tensor([metrics['MAE'], metrics['loss']]).cuda()
torch.distributed.reduce(average_tensor, 0, op=torch.distributed.ReduceOp.SUM)
torch.distributed.broadcast(average_tensor, 0)
average_tensor = average_tensor/int(os.environ['WORLD_SIZE'])
mae = average_tensor[0].item()
is_best = False
if (1 - mae / best_mae) >= mae_improv_tol or \
(engine.state.epoch == engine.state.max_epochs and
mae < best_mae):
best_mae = mae
is_best = True
# print("RANK {} Val Results - Epoch: {} Avg MAE: {:.5f} loss: {:.5f} BEST MAE: {:.5f}"
# .format(dist.get_rank(), trainer.state.epoch, metrics['MAE'], metrics['loss'], best_mae))
if dist.get_rank() == 0:
print('Epoch {}/{}'.format(engine.state.epoch, engine.state.max_epochs))
print('Best MAE Improvement Tolerance for checkpointing: {}%'.format(100 * mae_improv_tol))
print("RANK {} AVG {} NGPUs, best-mae: {:.5f} mae: {:.5f} loss: {:.5f}".format(
dist.get_rank(),
int(os.environ['WORLD_SIZE']),
best_mae,
average_tensor[0].item(),
average_tensor[1].item()))
fname = 'check_points/current_pth.tar'
if is_best:
save_checkpoint({'epoch': trainer.state.epoch,
'state_dict': model.module.state_dict(),
'best_mae': best_mae,
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict()
}, is_best,
filename=fname)
inputs = torch.tensor([[110.0, 100.0, 120.0, 0.35, 0.1, 0.05]]).cuda()
res = model(inputs)
print('test one example:', res.item())
trainer.run(trn_dataset, max_epochs=2000)
###Output
Overwriting distributed_training.py
###Markdown
Compared to the last notebook, it is a little complicated because * it handles the validation dataset evaluation* it serializes the model into a file and keeps track of the best performed model based on the mean absolute error(MAE)* it resumes the training from the fileWe can launch the distributed training by the following command:-
###Code
ngpus=!echo $(nvidia-smi -L | wc -l)
!python -m torch.distributed.launch --nproc_per_node={ngpus[0]} distributed_training.py
###Output
_____no_output_____
###Markdown
We need some patience to train the pricing model until it converges. Inference and GreeksOnce the training is converged, the best performed model is saved into `check_points/` directory. To get a good model, you need millions of data points to train the model until it converges. Usually it takes 10-20 hours in a single 8 GPUs DGX-1 machine. We trained the model with 10 million training data points and 5 million validation data points. We didn't explore what is the minimum number of training samples but simply use large number of data samples. You may get away by using less data points for training. To save your time, you can run the following commands to download the weights and use them for the inference
###Code
! ((test ! -f './check_points/model_best.pth.tar' || test ! -f './check_points/512/model_best.pth.tar') && \
bash ./download_data.sh) || echo "Dataset is already present. No need to re-download it."
###Output
Dataset is already present. No need to re-download it.
###Markdown
We can load the model parameters and use it to do inference
###Code
from model import Net
import torch
checkpoint = torch.load('check_points/model_best.pth.tar')
model = Net().cuda()
model.load_state_dict(checkpoint['state_dict'])
inputs = torch.tensor([[110.0, 100.0, 120.0, 0.35, 0.1, 0.05]]).cuda()
model(inputs)
###Output
_____no_output_____
###Markdown
One of the benefits of building a deep learning model is that the [Greeks]() can be easily computed. We just need to take advantage of the auto-grad feature in Pytorch. Following is an example to compute the first order differentiation for a multiple variable polynomial function.
###Code
import torch
from torch.autograd import grad
'''
z = (xy)^2
x = 3, y =2
first order deriv [24 36]
'''
inputs = torch.tensor([3.0,2.0], requires_grad=True)
z = (inputs[0]*inputs[1])**2
first_order_grad = grad(z, inputs, create_graph=True)
print(first_order_grad)
###Output
(tensor([24., 36.], grad_fn=<AddBackward0>),)
###Markdown
We can use `grad` function to compute the first order differentiation for parameters 'K, B, S0, sigma, mu, r'
###Code
inputs = torch.tensor([[110.0, 100.0, 120.0, 0.35, 0.1, 0.05]]).cuda()
inputs.requires_grad = True
x = model(inputs)
x.backward()
first_order_gradient = inputs.grad
first_order_gradient
###Output
_____no_output_____
###Markdown
Here we are going to plot the Delta graph:-
###Code
%matplotlib inline
import pylab
import numpy as np
def compute_delta(S):
inputs = torch.tensor([[110.0, 100.0, S, 0.35, 0.1, 0.05]]).cuda()
inputs.requires_grad = True
x = model(inputs)
x.backward()
first_order_gradient = inputs.grad
return first_order_gradient[0][2]
prices = np.arange(10, 200, 0.1)
deltas = []
for p in prices:
deltas.append(compute_delta(p).item())
fig = pylab.plot(prices, deltas)
pylab.xlabel('prices')
pylab.ylabel('Delta')
fig
###Output
_____no_output_____
###Markdown
Calculating the second order derivative is easy in PyTorch too. We just need to apply the `grad` function twice. Following is an example to calculate the second order derivative for the same polynomial function as above:
###Code
import torch
from torch.autograd import grad
'''
z = (xy)^2
x = 3, y =2
first order deriv [24 36]
d2z/dx2 = 8
d2z/dxdy = 24
d2z/dy2 = 18
'''
inputs = torch.tensor([3.0,2.0], requires_grad=True)
z = (inputs[0]*inputs[1])**2
first_order_grad = grad(z, inputs, create_graph=True)
second_order_grad_x, = grad(first_order_grad[0][0], inputs, retain_graph=True) #
second_order_grad_y, = grad(first_order_grad[0][1], inputs)
print(second_order_grad_x)
print(second_order_grad_y)
###Output
tensor([ 8., 24.])
tensor([24., 18.])
###Markdown
Use this mechanism, we can calculate the second order derivatives $\frac{\partial^2 P}{\partial K \partial S_0}$, $\frac{\partial^2 P}{\partial B \partial S_0}$, $\frac{\partial^2 P}{\partial S_0^2}$, $\frac{\partial^2 P}{\partial \sigma \partial S_0}$, $\frac{\partial^2 P}{\partial \mu \partial S_0}$, $\frac{\partial^2 P}{\partial r \partial S_0}$ in the following example.
###Code
import torch
from torch import Tensor
from torch.autograd import Variable
from torch.autograd import grad
from torch import nn
inputs = torch.tensor([[110.0, 100.0, 120.0, 0.35, 0.1, 0.05]]).cuda()
inputs.requires_grad = True
x = model(inputs)
# instead of using loss.backward(), use torch.autograd.grad() to compute gradients
# https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad
loss_grads = grad(x, inputs, create_graph=True)
drv = grad(loss_grads[0][0][2], inputs)
drv
###Output
_____no_output_____
###Markdown
Gamma is the second order differenation of `S`. We can plot the the Gamma curve as a function of the stock price
###Code
import pylab
import numpy as np
def compute_gamma(S):
inputs = torch.tensor([[110.0, 100.0, S, 0.35, 0.1, 0.05]]).cuda()
inputs.requires_grad = True
x = model(inputs)
loss_grads = grad(x, inputs, create_graph=True)
drv = grad(loss_grads[0][0][2], inputs)
return drv[0][0][2]
prices = np.arange(10, 200, 0.1)
deltas = []
for p in prices:
deltas.append(compute_gamma(p).item())
fig2 = pylab.plot(prices, deltas)
pylab.xlabel('prices')
pylab.ylabel('Gamma')
fig2
###Output
_____no_output_____
###Markdown
[Implied volatility](https://en.wikipedia.org/wiki/Implied_volatility) is the forecasted volatility of the underlying asset based on the quoted prices of the option. It is the reverse mapping of price to the option parameter given the model which is hard to do with the Monte Carlo simulation approach. But if we have the deep learning pricing model, it is an easy task. We can first plot the relationship between volatility and the option price
###Code
import pylab
import numpy as np
def compute_price(sigma):
inputs = torch.tensor([[110.0, 100.0, 120.0, sigma, 0.1, 0.05]]).cuda()
x = model(inputs)
return x.item()
sigmas = np.arange(0, 0.5, 0.1)
prices = []
for s in sigmas:
prices.append(compute_price(s))
fig3 = pylab.plot(sigmas, prices)
pylab.xlabel('Sigma')
pylab.ylabel('Price')
fig3
###Output
_____no_output_____
###Markdown
Given the prices `P`, the implied volatility is the root of the function `compute_price`. We can use bisection to find the root.
###Code
def bisection_root(small, large, fun, target, EPS=1e-6):
if fun(large) - target < 0:
print('upper bound is too small')
return None
if fun(small) - target > 0:
print('lower bound is too large')
return None
while large - small > EPS:
mid = (large + small) / 2.0
if fun(mid) - target >= 0:
large = mid
else:
small = mid
mid = (large + small) / 2.0
return mid, abs(fun(mid) - target)
quoted_price = 16.0
sigma, err = bisection_root(0, 0.5, compute_price, quoted_price)
print('implied volativity', sigma, 'error', err)
###Output
implied volativity 0.18517351150512695 error 4.76837158203125e-06
|
notebooks/project_eda.ipynb | ###Markdown
This dataset is from an October 2017 Kaggle competition and it contains text from the works of three well known horror authors whose work is in the public domain.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import spacy
from spacy.symbols import ORTH, LEMMA
nlp = spacy.load("en_core_web_sm")
import nltk
from nltk.sentiment import SentimentIntensityAnalyzer
from wordcloud import WordCloud, STOPWORDS
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
df = pd.read_csv('../data/train.csv')
df.head()
eap_df = df[df['author'] == 'EAP']
hpl_df = df[df['author'] == 'HPL']
mws_df = df[df['author'] == 'MWS']
corpus = df['text'].dropna()
print(corpus[3])
stopwords_lst = set(stopwords.words('english'))
from collections import Counter
print Counter(.split())
vectorizer = CountVectorizer(stop_words = stopwords_lst, analyzer='word', token_pattern= '[a-zA-Z]+', max_features = 5000)
X = vectorizer.fit_transform(corpus)
print(vectorizer.get_feature_names())
categories = ['EAP', 'HPL', 'MWS']
freqs = zip(vectorizer.get_feature_names(), matrix.sum(axis=0))
# sort from largest to smallest
print (sorted(freqs, key=lambda x: -x[1]))
corpus_string = df.text.str.cat(sep=' ')
split_it = corpus_string.split()
filtered = [word for word in split_it if word not in stopwords.words('english')]
Counter = Counter(filtered)
# most_common() produces k frequently encountered
# input values and their respective counts.
most_occur = Counter.most_common(20)
print(most_occur)
###Output
_____no_output_____ |
Training-Code/Morpheus_Maker.ipynb | ###Markdown
Morpheus Maker (ver. 2.0)***Powered by tegridy-tools TMIDIX Optimus Processors: https://github.com/asigalov61/tegridy-tools***Credit for GPT2-RGA code used in this colab goes out @ Sashmark97 https://github.com/Sashmark97/midigen and @ Damon Gwinn https://github.com/gwinndr/MusicTransformer-Pytorch***WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/*** Project Los Angeles Tegridy Code 2022*** (Setup Environment)
###Code
#@title nvidia-smi gpu check
!nvidia-smi
#@title Install all dependencies (run only once per session)
!git clone https://github.com/asigalov61/tegridy-tools
!pip install torch
!pip install tqdm
!pip install matplotlib
#@title Import all needed modules
print('Loading needed modules. Please wait...')
import os
from datetime import datetime
import secrets
import copy
import tqdm as tqdm
from tqdm import tqdm
if not os.path.exists('/notebooks/Dataset'):
os.makedirs('/notebooks/Dataset')
print('Loading TMIDIX module...')
os.chdir('/notebooks/tegridy-tools/tegridy-tools')
import TMIDIX
os.chdir('/notebooks/tegridy-tools/tegridy-tools')
from GPT2RGAX import *
import matplotlib.pyplot as plt
os.chdir('/notebooks/')
###Output
_____no_output_____
###Markdown
(FROM SCRATCH) Download and process MIDI dataset
###Code
#@title Download original LAKH/clean_midi MIDI subset (Recommended)
#@markdown Works best stand-alone/as-is for the optimal results
%cd /notebooks/
!wget 'http://hog.ee.columbia.edu/craffel/lmd/clean_midi.tar.gz'
!tar -xvf 'clean_midi.tar.gz'
!rm 'clean_midi.tar.gz'
%cd /notebooks/
#@title Process MIDIs to special MIDI dataset with TMIDIX MIDI Processor
#@title Process MIDIs
sorted_or_random_file_loading_order = False # Sorted order is NOT recommended
dataset_ratio = 0.02 # Change this if you need more data
print('TMIDIX MIDI Processor')
print('Starting up...')
###########
files_count = 0
gfiles = []
melody_chords_f = []
###########
print('Loading MIDI files...')
print('This may take a while on a large dataset in particular.')
dataset_addr = "./clean_midi/"
# os.chdir(dataset_addr)
filez = list()
for (dirpath, dirnames, filenames) in os.walk(dataset_addr):
filez += [os.path.join(dirpath, file) for file in filenames]
print('=' * 70)
if filez == []:
print('Could not find any MIDI files. Please check Dataset dir...')
print('=' * 70)
if sorted_or_random_file_loading_order:
print('Sorting files...')
filez.sort()
print('Done!')
print('=' * 70)
else:
print('Randomizing file list...')
random.shuffle(filez)
stats = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
print('Processing MIDI files. Please wait...')
for f in tqdm(filez[:int(len(filez) * dataset_ratio)]):
try:
fn = os.path.basename(f)
fn1 = fn.split('.')[0]
files_count += 1
#print('Loading MIDI file...')
score = TMIDIX.midi2ms_score(open(f, 'rb').read())
events_matrix = []
itrack = 1
patches = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
patch_map = [[0, 1, 2, 3, 4, 5, 6, 7], # Piano
[24, 25, 26, 27, 28, 29, 30], # Guitar
[32, 33, 34, 35, 36, 37, 38, 39], # Bass
[40, 41], # Violin
[42, 43], # Cello
[46], # Harp
[56, 57, 58, 59, 60], # Trumpet
[71, 72], # Clarinet
[73, 74, 75], # Flute
[-1], # Fake Drums
[52, 53] # Choir
]
while itrack < len(score):
for event in score[itrack]:
if event[0] == 'note' or event[0] == 'patch_change':
events_matrix.append(event)
itrack += 1
events_matrix1 = []
for event in events_matrix:
if event[0] == 'patch_change':
patches[event[2]] = event[3]
if event[0] == 'note':
event.extend([patches[event[3]]])
once = False
for p in patch_map:
if event[6] in p and event[3] != 9: # Except the drums
event[3] = patch_map.index(p)
once = True
if not once and event[3] != 9: # Except the drums
event[3] = 0 # All other instruments/patches channel
event[5] = max(80, event[5])
if event[3] < 11: # We won't write chans 11-16 for now...
events_matrix1.append(event)
stats[event[3]] += 1
# recalculating timings
for e in events_matrix1:
e[1] = int(e[1] / 10)
e[2] = int(e[2] / 10)
# final processing...
#=======================
if len(events_matrix1) > 0:
events_matrix1.sort(key=lambda x: (x[1], x[4]))
cho = []
pe = events_matrix1[0]
melody_chords = []
for e in events_matrix1:
time = min(255, e[1]-pe[1])
dur = max(1, min(255, e[2]))
cha = e[3]
ptc = min(127, e[4])
vel = min(127, e[5])
melody_chords.append([time, dur, ptc, cha, vel])
pe = e
melody_chords_f.append(melody_chords)
gfiles.append(f)
except KeyboardInterrupt:
print('Saving current progress and quitting...')
break
except:
print('Bad MIDI:', f)
continue
print('=' * 70)
print('Done!')
print('=' * 70)
print('Resulting Stats:')
print('=' * 70)
print('Piano:', stats[0])
print('Guitar:', stats[1])
print('Bass:', stats[2])
print('Violin:', stats[3])
print('Cello:', stats[4])
print('Harp:', stats[5])
print('Trumpet:', stats[6])
print('Clarinet:', stats[7])
print('Flute:', stats[8])
print('Drums:', stats[9])
print('Choir:', stats[10])
print('=' * 70)
# Process and mark INTs...
INTS_f1 = []
for chords_list in tqdm(melody_chords_f):
INTS_f1.append([-1, -1, -1, -1, -1]) # Intro
pe = chords_list[0]
count = 0
for i in chords_list:
INTS_f1.append(i)
if count == len(chords_list)-50:
INTS_f1.append([-2, -2, -2, -2, -2]) # Outro
count += 1
pe = i
INTS_f1.append([-3, -3, -3, -3, -3]) # End
INTS_f1[:15]
TMIDIX.Tegridy_Any_Pickle_File_Writer(INTS_f1, '/notebooks/Morpheus_INTS')
INTS_f1 = TMIDIX.Tegridy_Any_Pickle_File_Reader('/notebooks/Morpheus_INTS')
#@title Load processed INTs datasets
number_of_batches = 64 # Change this to your specs
n_workers = 30 # Change this to your specs
dataset_ratio = 1 # Change this if you want to limit input data
val_dataset_ratio = 0.03 # Change this if you want to limit input data
print('=' * 50)
print('Prepping INTs datasets...')
train_data1 = []
avg_vel = int(sum([y[4] for y in INTS_f1]) / len(INTS_f1))
pe = INTS_f1[0]
for i in tqdm(INTS_f1):
if min(i) >= 0:
if i[0] != 0:
train_data1.extend([i[0] + (int(i[1] / 25) * 256)])
if i[4] > avg_vel:
train_data1.extend([(256 * 11) + 128 + (256 * i[3])+i[2]])
else:
train_data1.extend([(256 * 11) + (256 * i[3])+i[2]])
pe = i
if i == [-1, -1, -1, -1, -1]: # Intro
train_data1.extend([(256 * 11)+(256 * 11)-3])
if i == [-2, -2, -2, -2, -2]: # Outro
train_data1.extend([(256 * 11)+(256 * 11)-2])
if i == [-3, -3, -3, -3, -3]: # End
train_data1.extend([(256 * 11)+(256 * 11)-1])
train_data = train_data1[:int(len(train_data1) * dataset_ratio)]
val_dataset = train_data[:int(len(train_data) * val_dataset_ratio)]
test_dataset = train_data[:int(len(train_data) * val_dataset_ratio)]
train_list = train_data
val_list = val_dataset
test_list = []
print('=' * 50)
print('Processing INTs datasets...')
train_dataset = EPianoDataset(train_list, max_seq, random_seq)
val_dataset = EPianoDataset(val_list, max_seq)
test_dataset = EPianoDataset(test_list, max_seq)
print('=' * 50)
print('Loading INTs datasets...')
batch_size = number_of_batches
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=n_workers)
test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=n_workers)
print('=' * 50)
print('Total INTs in the dataset', len(train_data))
print('Total unique INTs in the dataset', len(set(train_data)))
print('Max INT in the dataset', max(train_data))
print('Min INT in the dataset', min(train_data))
print('=' * 50)
print('Checking datasets shapes...')
print('=' * 50)
print('Train loader')
for x, tgt in train_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Validation loader')
for x, tgt in val_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Test loader')
for x, tgt in test_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Done! Enjoy! :)')
print('=' * 50)
###Output
_____no_output_____
###Markdown
Test the resulting INTs dataset...
###Code
train_data[:15]
out = train_data[:16000]
if len(out) != 0:
song = []
song = out
song_f = []
time = 0
dur = 0
vel = 0
pitch = 0
duration = 0
for s in song:
if s >= 0 and s < 256 * 11:
time += s % 256
dur = ((s // 256) + 1) * 250
if s >= 256 * 11 and s < (256 * 21):
if (s // 128) % 2 != 0:
vel = 90
channel = ((s-128-(256*11)) // 256)
else:
vel = 60
channel = ((s-(256*11)) // 256)
pitch = s % 256
song_f.append(['note', (abs(time))*10, dur, channel, pitch, vel ])
detailed_stats = TMIDIX.Tegridy_SONG_to_MIDI_Converter(song_f,
output_signature = 'Morpheus',
output_file_name = '/notebooks/Morpheus-Music-Composition',
track_name='Project Los Angeles',
number_of_ticks_per_quarter=500)
print('Done!')
###Output
_____no_output_____
###Markdown
(TRAIN) Train the model
###Code
#@title Train
config = GPTConfig(5640,
max_seq,
dim_feedforward=1024,
n_layer=8,
n_head=8,
n_embd=1024,
enable_rpr=True,
er_len=max_seq)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = GPT(config)
model = nn.DataParallel(model)
model.to(device)
#=====
init_step = 0
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(d_model, SCHEDULER_WARMUP_STEPS, init_step)
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
train_loss_func = eval_loss_func
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
lr_scheduler = LambdaLR(opt, lr_stepper.step)
#===
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
best_acc_file = '/notebooks/gpt2_rpr_acc.pth'
best_loss_file = '/notebooks/gpt2_rpr_loss.pth'
loss_train, loss_val, acc_val = [], [], []
for epoch in range(0, epochs):
new_best = False
loss = train(epoch+1,
model, train_loader,
train_loss_func,
opt,
lr_scheduler,
num_iters=-1,
save_checkpoint_steps=4000)
loss_train.append(loss)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, num_iters=-1)
loss_val.append(eval_loss)
acc_val.append(eval_acc)
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
new_best = True
if(new_best):
print("Best eval acc epoch:", best_eval_acc_epoch)
print("Best eval acc:", best_eval_acc)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch)
print("Best eval loss:", best_eval_loss)
# Eval funct to eval separately if needed
#=====
init_step = 0
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(d_model, SCHEDULER_WARMUP_STEPS, init_step)
eval_loss_func = nn.CrossEntropyLoss(ignore_index=5640)
train_loss_func = eval_loss_func
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
lr_scheduler = LambdaLR(opt, lr_stepper.step)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, num_iters=-1)
#@title Plot resulting training loss graph
tr_loss_list = [item for sublist in loss_train for item in sublist]
plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b')
plt.savefig('/notebooks/Morpheus-Training-Loss-Graph.png')
###Output
_____no_output_____
###Markdown
(SAVE)
###Code
#@title Save the model
print('Saving the model...')
full_path_to_model_checkpoint = "/notebooks/Morpheus-Trained-Model.pth" #@param {type:"string"}
torch.save(model.state_dict(), full_path_to_model_checkpoint)
print('Done!')
###Output
_____no_output_____ |
cvnd/P3_Implement_SLAM/3. Landmark Detection and Tracking.ipynb | ###Markdown
Project 3: Implement SLAM --- Project OverviewIn this project, you'll implement SLAM for robot that moves and senses in a 2 dimensional, grid world!SLAM gives us a way to both localize a robot and build up a map of its environment as a robot moves and senses in real-time. This is an active area of research in the fields of robotics and autonomous systems. Since this localization and map-building relies on the visual sensing of landmarks, this is a computer vision problem. Using what you've learned about robot motion, representations of uncertainty in motion and sensing, and localization techniques, you will be tasked with defining a function, `slam`, which takes in six parameters as input and returns the vector `mu`. > `mu` contains the (x,y) coordinate locations of the robot as it moves, and the positions of landmarks that it senses in the worldYou can implement helper functions as you see fit, but your function must return `mu`. The vector, `mu`, should have (x, y) coordinates interlaced, for example, if there were 2 poses and 2 landmarks, `mu` will look like the following, where `P` is the robot position and `L` the landmark position:```mu = matrix([[Px0], [Py0], [Px1], [Py1], [Lx0], [Ly0], [Lx1], [Ly1]])```You can see that `mu` holds the poses first `(x0, y0), (x1, y1), ...,` then the landmark locations at the end of the matrix; we consider a `nx1` matrix to be a vector. Generating an environmentIn a real SLAM problem, you may be given a map that contains information about landmark locations, and in this example, we will make our own data using the `make_data` function, which generates a world grid with landmarks in it and then generates data by placing a robot in that world and moving and sensing over some numer of time steps. The `make_data` function relies on a correct implementation of robot move/sense functions, which, at this point, should be complete and in the `robot_class.py` file. The data is collected as an instantiated robot moves and senses in a world. Your SLAM function will take in this data as input. So, let's first create this data and explore how it represents the movement and sensor measurements that our robot takes.--- Create the worldUse the code below to generate a world of a specified size with randomly generated landmark locations. You can change these parameters and see how your implementation of SLAM responds! `data` holds the sensors measurements and motion of your robot over time. It stores the measurements as `data[i][0]` and the motion as `data[i][1]`. Helper functionsYou will be working with the `robot` class that may look familiar from the first notebook, In fact, in the `helpers.py` file, you can read the details of how data is made with the `make_data` function. It should look very similar to the robot move/sense cycle you've seen in the first notebook.
###Code
import numpy as np
from helpers import make_data
# your implementation of slam should work with the following inputs
# feel free to change these input values and see how it responds!
# world parameters
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world (square)
# robot parameters
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
###Output
Landmarks: [[72, 100], [24, 45], [25, 66], [7, 39], [59, 41]]
Robot: [x=50.13139 y=14.69737]
###Markdown
A note on `make_data`The function above, `make_data`, takes in so many world and robot motion/sensor parameters because it is responsible for:1. Instantiating a robot (using the robot class)2. Creating a grid world with landmarks in it**This function also prints out the true location of landmarks and the *final* robot location, which you should refer back to when you test your implementation of SLAM.**The `data` this returns is an array that holds information about **robot sensor measurements** and **robot motion** `(dx, dy)` that is collected over a number of time steps, `N`. You will have to use *only* these readings about motion and measurements to track a robot over time and find the determine the location of the landmarks using SLAM. We only print out the true landmark locations for comparison, later.In `data` the measurement and motion data can be accessed from the first and second index in the columns of the data array. See the following code for an example, where `i` is the time step:```measurement = data[i][0]motion = data[i][1]```
###Code
# print out some stats about the data
# total time steps : N-1
# for time_step in range(N-1):
for time_step in range(3):
print('Example measurements: \n', data[time_step][0])
print('Example motion: \n', data[time_step][1])
print('\n')
###Output
Example measurements:
[[1, -25.637804129691425, -6.20347355373945], [2, -24.991509048178585, 15.02131162678203], [3, -44.90012278440724, -11.121288210391182], [4, 8.1275673525483, -7.318720905431141]]
Example motion:
[19.712394326773566, -3.37957244422802]
Example measurements:
[[1, -47.803689326817064, 0.4388879827833345], [4, -9.323226832117305, -5.625437074240885]]
Example motion:
[19.712394326773566, -3.37957244422802]
Example measurements:
[[4, -29.529373080316233, -2.124716575448025]]
Example motion:
[0.6457875860247203, 19.98957124086798]
###Markdown
Try changing the value of `time_step`, you should see that the list of measurements varies based on what in the world the robot sees after it moves. As you know from the first notebook, the robot can only sense so far and with a certain amount of accuracy in the measure of distance between its location and the location of landmarks. The motion of the robot always is a vector with two values: one for x and one for y displacement. This structure will be useful to keep in mind as you traverse this data in your implementation of slam. Initialize ConstraintsOne of the most challenging tasks here will be to create and modify the constraint matrix and vector: omega and xi. In the second notebook, you saw an example of how omega and xi could hold all the values the define the relationships between robot poses `xi` and landmark positions `Li` in a 1D world, as seen below, where omega is the blue matrix and xi is the pink vector.In *this* project, you are tasked with implementing constraints for a 2D world. We are referring to robot poses as `Px, Py` and landmark positions as `Lx, Ly`, and one way to approach this challenge is to add *both* x and y locations in the constraint matrices.You may also choose to create two of each omega and xi (one for x and one for y positions). TODO: Write a function that initializes omega and xiComplete the function `initialize_constraints` so that it returns `omega` and `xi` constraints for the starting position of the robot. Any values that we do not yet know should be initialized with the value `0`. You may assume that our robot starts out in exactly the middle of the world with 100% confidence (no motion or measurement noise at this point). The inputs `N` time steps, `num_landmarks`, and `world_size` should give you all the information you need to construct intial constraints of the correct size and starting values.*Depending on your approach you may choose to return one omega and one xi that hold all (x,y) positions *or* two of each (one for x values and one for y); choose whichever makes most sense to you!*
###Code
def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
# omega = [0]
center = world_size // 2
dim = ( N + num_landmarks ) * 2
omega = np.zeros((dim, dim))
omega[0][0] = 1
omega[1][1] = 1
## TODO: Define the constraint *vector*, xi
## you can assume that the robot starts out in the middle of the world with 100% confidence
# xi = [0]
xi = np.zeros((dim, 1))
xi[0][0] = center
xi[1][0] = center
return omega, xi
omega_test, xi_test = initialize_constraints(3, 2, 100)
print(omega_test)
print(xi_test)
###Output
[[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
[[50.]
[50.]
[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 0.]
[ 0.]]
###Markdown
Test as you goIt's good practice to test out your code, as you go. Since `slam` relies on creating and updating constraint matrices, `omega` and `xi` to account for robot sensor measurements and motion, let's check that they initialize as expected for any given parameters.Below, you'll find some test code that allows you to visualize the results of your function `initialize_constraints`. We are using the [seaborn](https://seaborn.pydata.org/) library for visualization.**Please change the test values of N, landmarks, and world_size and see the results**. Be careful not to use these values as input into your final smal function.This code assumes that you have created one of each constraint: `omega` and `xi`, but you can change and add to this code, accordingly. The constraints should vary in size with the number of time steps and landmarks as these values affect the number of poses a robot will take `(Px0,Py0,...Pxn,Pyn)` and landmark locations `(Lx0,Ly0,...Lxn,Lyn)` whose relationships should be tracked in the constraint matrices. Recall that `omega` holds the weights of each variable and `xi` holds the value of the sum of these variables, as seen in Notebook 2. You'll need the `world_size` to determine the starting pose of the robot in the world and fill in the initial values for `xi`.
###Code
# import data viz resources
import matplotlib.pyplot as plt
from pandas import DataFrame
import seaborn as sns
%matplotlib inline
# define a small N and world_size (small for ease of visualization)
N_test = 5
num_landmarks_test = 2
small_world = 10
# initialize the constraints
initial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)
# define figure size
plt.rcParams["figure.figsize"] = (10,7)
# display omega
sns.heatmap(DataFrame(initial_omega), cmap='Blues', annot=True, linewidths=.5)
# define figure size
plt.rcParams["figure.figsize"] = (1,7)
# display xi
sns.heatmap(DataFrame(initial_xi), cmap='Oranges', annot=True, linewidths=.5)
###Output
_____no_output_____
###Markdown
--- SLAM inputs In addition to `data`, your slam function will also take in:* N - The number of time steps that a robot will be moving and sensing* num_landmarks - The number of landmarks in the world* world_size - The size (w/h) of your world* motion_noise - The noise associated with motion; the update confidence for motion should be `1.0/motion_noise`* measurement_noise - The noise associated with measurement/sensing; the update weight for measurement should be `1.0/measurement_noise` A note on noiseRecall that `omega` holds the relative "strengths" or weights for each position variable, and you can update these weights by accessing the correct index in omega `omega[row][col]` and *adding/subtracting* `1.0/noise` where `noise` is measurement or motion noise. `Xi` holds actual position values, and so to update `xi` you'll do a similar addition process only using the actual value of a motion or measurement. So for a vector index `xi[row][0]` you will end up adding/subtracting one measurement or motion divided by their respective `noise`. TODO: Implement Graph SLAMFollow the TODO's below to help you complete this slam implementation (these TODO's are in the recommended order), then test out your implementation! Updating with motion and measurementsWith a 2D omega and xi structure as shown above (in earlier cells), you'll have to be mindful about how you update the values in these constraint matrices to account for motion and measurement constraints in the x and y directions. Recall that the solution to these matrices (which holds all values for robot poses `P` and landmark locations `L`) is the vector, `mu`, which can be computed at the end of the construction of omega and xi as the inverse of omega times xi: $\mu = \Omega^{-1}\xi$**You may also choose to return the values of `omega` and `xi` if you want to visualize their final state!**
###Code
## TODO: Complete the code to implement SLAM
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):
## TODO: Use your initilization to create constraint matrices, omega and xi
omega, xi = initialize_constraints(N, num_landmarks, world_size)
## TODO: Iterate through each time step in the data
## get all the motion and measurement data as you iterate
for i in range(N-1):
robo_x = 2*i
robo_y = robo_x + 1
measurements = data[i][0]
motion = data[i][1]
msm_increment = 1./measurement_noise
mon_increment = 1./motion_noise
## TODO: update the constraint matrix/vector to account for all *measurements*
## this should be a series of additions that take into account the measurement noise
for measurement in measurements:
land_x = (N + measurement[0]) * 2
land_y = land_x + 1
omega[robo_x][robo_x] += msm_increment
omega[robo_y][robo_y] += msm_increment
omega[land_x][land_x] += msm_increment
omega[land_y][land_y] += msm_increment
omega[robo_x][land_x] -= msm_increment
omega[land_x][robo_x] -= msm_increment
omega[robo_y][land_y] -= msm_increment
omega[land_y][robo_y] -= msm_increment
xi[robo_x][0] -= measurement[1] * msm_increment
xi[robo_y][0] -= measurement[2] * msm_increment
xi[land_x][0] += measurement[1] * msm_increment
xi[land_y][0] += measurement[2] * msm_increment
## TODO: update the constraint matrix/vector to account for all *motion* and motion noise
omega[robo_x][robo_x] += mon_increment
omega[robo_y][robo_y] += mon_increment
omega[robo_x+2][robo_x+2] += mon_increment
omega[robo_y+2][robo_y+2] += mon_increment
omega[robo_x][robo_x+2] -= mon_increment
omega[robo_x+2][robo_x] -= mon_increment
omega[robo_y][robo_y+2] -= mon_increment
omega[robo_y+2][robo_y] -= mon_increment
xi[robo_x][0] -= motion[0] * mon_increment
xi[robo_y][0] -= motion[1] * mon_increment
xi[robo_x+2][0] += motion[0] * mon_increment
xi[robo_y+2][0] += motion[1] * mon_increment
## TODO: After iterating through all the data
## Compute the best estimate of poses and landmark positions
## using the formula, omega_inverse * Xi
omega_inv = np.linalg.inv(np.matrix(omega))
# calculate the solution, mu
mu = np.dot(omega_inv, xi)
return mu # return `mu`
###Output
_____no_output_____
###Markdown
Helper functionsTo check that your implementation of SLAM works for various inputs, we have provided two helper functions that will help display the estimated pose and landmark locations that your function has produced. First, given a result `mu` and number of time steps, `N`, we define a function that extracts the poses and landmarks locations and returns those as their own, separate lists. Then, we define a function that nicely print out these lists; both of these we will call, in the next step.
###Code
# a helper function that creates a list of poses and of landmarks for ease of printing
# this only works for the suggested constraint architecture of interlaced x,y poses
def get_poses_landmarks(mu, N):
# create a list of poses
poses = []
for i in range(N):
poses.append((mu[2*i].item(), mu[2*i+1].item()))
# create a list of landmarks
landmarks = []
for i in range(num_landmarks):
landmarks.append((mu[2*(N+i)].item(), mu[2*(N+i)+1].item()))
# return completed lists
return poses, landmarks
def print_all(poses, landmarks):
print('\n')
print('Estimated Poses:')
for i in range(len(poses)):
print('['+', '.join('%.3f'%p for p in poses[i])+']')
print('\n')
print('Estimated Landmarks:')
for i in range(len(landmarks)):
print('['+', '.join('%.3f'%l for l in landmarks[i])+']')
###Output
_____no_output_____
###Markdown
Run SLAMOnce you've completed your implementation of `slam`, see what `mu` it returns for different world sizes and different landmarks! What to ExpectThe `data` that is generated is random, but you did specify the number, `N`, or time steps that the robot was expected to move and the `num_landmarks` in the world (which your implementation of `slam` should see and estimate a position for. Your robot should also start with an estimated pose in the very center of your square world, whose size is defined by `world_size`.With these values in mind, you should expect to see a result that displays two lists:1. **Estimated poses**, a list of (x, y) pairs that is exactly `N` in length since this is how many motions your robot has taken. The very first pose should be the center of your world, i.e. `[50.000, 50.000]` for a world that is 100.0 in square size.2. **Estimated landmarks**, a list of landmark positions (x, y) that is exactly `num_landmarks` in length. Landmark LocationsIf you refer back to the printout of *exact* landmark locations when this data was created, you should see values that are very similar to those coordinates, but not quite (since `slam` must account for noise in motion and measurement).
###Code
# call your implementation of slam, passing in the necessary parameters
mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)
# print out the resulting landmarks and poses
if(mu is not None):
# get the lists of poses and landmarks
# and print them out
poses, landmarks = get_poses_landmarks(mu, N)
print_all(poses, landmarks)
# Landmarks: [[72, 100], [24, 45], [25, 66], [7, 39], [59, 41]]
# Robot: [x=50.13139 y=14.69737]
###Output
Estimated Poses:
[50.000, 50.000]
[69.405, 45.952]
[88.525, 42.219]
[88.921, 60.547]
[89.225, 79.563]
[97.163, 98.018]
[89.153, 79.789]
[80.502, 62.930]
[70.378, 45.599]
[60.212, 28.875]
[51.813, 9.432]
[46.265, 28.334]
[41.662, 47.569]
[36.164, 64.678]
[30.757, 83.931]
[10.857, 83.983]
[20.759, 67.476]
[30.387, 48.998]
[38.864, 31.589]
[47.853, 13.723]
Estimated Landmarks:
[71.090, 100.067]
[23.311, 45.001]
[24.607, 65.656]
[6.334, 38.987]
[58.655, 41.402]
###Markdown
Visualize the constructed worldFinally, using the `display_world` code from the `helpers.py` file (which was also used in the first notebook), we can actually visualize what you have coded with `slam`: the final position of the robot and the positon of landmarks, created from only motion and measurement data!**Note that these should be very similar to the printed *true* landmark locations and final pose from our call to `make_data` early in this notebook.**
###Code
# import the helper function
from helpers import display_world
# Display the final world!
# define figure size
plt.rcParams["figure.figsize"] = (20,20)
# check if poses has been created
if 'poses' in locals():
# print out the last pose
print('Last pose: ', poses[-1])
# display the last position of the robot *and* the landmark positions
display_world(int(world_size), poses[-1], landmarks)
###Output
Last pose: (47.85295592820569, 13.723173393297415)
###Markdown
Trial with different paramenters
###Code
# world parameters
num_landmarks = 5 # number of landmarks
N = 60 # time steps
world_size = 100.0 # size of world (square)
# robot parameters
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
# initialize the constraints
initial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)
# call your implementation of slam, passing in the necessary parameters
mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)
# print out the resulting landmarks and poses
if(mu is not None):
# get the lists of poses and landmarks
# and print them out
poses, landmarks = get_poses_landmarks(mu, N)
print_all(poses, landmarks)
# Display the final world!
# define figure size
plt.rcParams["figure.figsize"] = (20,20)
# check if poses has been created
if 'poses' in locals():
# print out the last pose
print('Last pose: ', poses[-1])
# display the last position of the robot *and* the landmark positions
display_world(int(world_size), poses[-1], landmarks)
###Output
Last pose: (41.31542135934828, 32.769081764352705)
###Markdown
Question: How far away is your final pose (as estimated by `slam`) compared to the *true* final pose? Why do you think these poses are different?You can find the true value of the final pose in one of the first cells where `make_data` was called. You may also want to look at the true landmark locations and compare them to those that were estimated by `slam`. Ask yourself: what do you think would happen if we moved and sensed more (increased N)? Or if we had lower/higher noise parameters. **Answer**: When N is increased from 30 to 60, the MSE for Landmarks' position is decreased. (see below) I think this is because the signal of noises are spread over randomly while the information of the true location would be stacked at the same position over the time.
###Code
N30_landmark = [[72, 100], [24, 45], [25, 66], [7, 39], [59, 41]]
N30_estimated = [[71.090, 100.067], [23.311, 45.001], [24.607, 65.656] , [6.334, 38.987] , [58.655, 41.402]]
mse = 0
for idx in range(5):
mse += (N30_landmark[idx][0] - N30_estimated[idx][0])**2
mse += (N30_landmark[idx][1] - N30_estimated[idx][1])**2
mse /= 5
print("N=30 :", mse)
N60_landmark = [[7, 55], [93, 92], [10, 52], [43, 78], [7, 97]]
N60_estimated = [[6.929, 55.189], [92.860, 91.724], [9.437, 52.287], [42.954, 78.376], [6.141, 97.720]]
mse = 0
for idx in range(5):
mse += (N60_landmark[idx][0] - N60_estimated[idx][0])**2
mse += (N60_landmark[idx][1] - N60_estimated[idx][1])**2
mse /= 5
print("N=60 :", mse)
###Output
N=30 : 0.4608899999999981
N=60 : 0.3871298
###Markdown
TestingTo confirm that your slam code works before submitting your project, it is suggested that you run it on some test data and cases. A few such cases have been provided for you, in the cells below. When you are ready, uncomment the test cases in the next cells (there are two test cases, total); your output should be **close-to or exactly** identical to the given results. If there are minor discrepancies it could be a matter of floating point accuracy or in the calculation of the inverse matrix. Submit your projectIf you pass these tests, it is a good indication that your project will pass all the specifications in the project rubric. Follow the submission instructions to officially submit!
###Code
# Here is the data and estimated outputs for test case 1
test_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608], [3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]], [[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]], [-12.2607279422326, -15.801093326936487]], [[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]], [[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]], [[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]], [[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]], [14.008259661173426, 14.274756084260822]], [[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]], [14.008259661173426, 14.274756084260822]], [[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]], [14.008259661173426, 14.274756084260822]], [[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]], [[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]], [[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]], [[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097], [3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409], [3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125], [2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]], [[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]], [[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]], [[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]], [[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]], [-17.82919359778298, 9.062000642947142]]]
## Test Case 1
##
# Estimated Pose(s):
# [50.000, 50.000]
# [37.858, 33.921]
# [25.905, 18.268]
# [13.524, 2.224]
# [27.912, 16.886]
# [42.250, 30.994]
# [55.992, 44.886]
# [70.749, 59.867]
# [85.371, 75.230]
# [73.831, 92.354]
# [53.406, 96.465]
# [34.370, 100.134]
# [48.346, 83.952]
# [60.494, 68.338]
# [73.648, 53.082]
# [86.733, 38.197]
# [79.983, 20.324]
# [72.515, 2.837]
# [54.993, 13.221]
# [37.164, 22.283]
# Estimated Landmarks:
# [82.679, 13.435]
# [70.417, 74.203]
# [36.688, 61.431]
# [18.705, 66.136]
# [20.437, 16.983]
### Uncomment the following three lines for test case 1 and compare the output to the values above ###
mu_1 = slam(test_data1, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_1, 20)
print_all(poses, landmarks)
# Here is the data and estimated outputs for test case 2
test_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]], [18.92765331253674, -6.460955043986683]], [[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438], [3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369], [3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381], [3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]], [-11.167066095509824, 16.592065417497455]], [[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914], [3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]], [-11.167066095509824, 16.592065417497455]], [[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595], [4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]], [[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]], [-6.595520680493778, -18.88118393939265]], [[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]], [-6.595520680493778, -18.88118393939265]], [[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]], [[], [11.492663265706092, 16.36822198838621]], [[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]], [[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]], [11.492663265706092, 16.36822198838621]], [[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614], [3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]], [11.492663265706092, 16.36822198838621]], [[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765], [3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]], [11.492663265706092, 16.36822198838621]], [[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]], [19.628527845173146, 3.83678180657467]], [[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]], [-19.841703647091965, 2.5113335861604362]], [[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]], [-5.946642674882207, -19.09548221169787]], [[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027], [4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406], [3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]], [-5.946642674882207, -19.09548221169787]]]
## Test Case 2
##
# Estimated Pose(s):
# [50.000, 50.000]
# [69.035, 45.061]
# [87.655, 38.971]
# [76.084, 55.541]
# [64.283, 71.684]
# [52.396, 87.887]
# [44.674, 68.948]
# [37.532, 49.680]
# [31.392, 30.893]
# [24.796, 12.012]
# [33.641, 26.440]
# [43.858, 43.560]
# [54.735, 60.659]
# [65.884, 77.791]
# [77.413, 94.554]
# [96.740, 98.020]
# [76.149, 99.586]
# [70.211, 80.580]
# [64.130, 61.270]
# [58.183, 42.175]
# Estimated Landmarks:
# [76.777, 42.415]
# [85.109, 76.850]
# [13.687, 95.386]
# [59.488, 39.149]
# [69.283, 93.654]
### Uncomment the following three lines for test case 2 and compare to the values above ###
mu_2 = slam(test_data2, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_2, 20)
print_all(poses, landmarks)
###Output
Estimated Poses:
[50.000, 50.000]
[69.181, 45.665]
[87.743, 39.703]
[76.270, 56.311]
[64.317, 72.176]
[52.257, 88.154]
[44.059, 69.401]
[37.002, 49.918]
[30.924, 30.955]
[23.508, 11.419]
[34.180, 27.133]
[44.155, 43.846]
[54.806, 60.920]
[65.698, 78.546]
[77.468, 95.626]
[96.802, 98.821]
[75.957, 99.971]
[70.200, 81.181]
[64.054, 61.723]
[58.107, 42.628]
Estimated Landmarks:
[76.779, 42.887]
[85.065, 77.438]
[13.548, 95.652]
[59.449, 39.595]
[69.263, 94.240]
|
notebooks/predict-for-clinvar.ipynb | ###Markdown
clinvar* only use consistent positions
###Code
import pandas, numpy
import pydot, pydotplus, graphviz
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model, metrics, tree, svm
from sklearn.neural_network import MLPClassifier
from sklearn.externals.six import StringIO
from IPython.display import HTML
%matplotlib inline
def calc_path_frac(rows):
pfam = list(rows['pfam'].values)[0]
pathogenic = len(rows[ (rows.clin_class=='PATHOGENIC') | (rows.clin_class=='LIKLEY_PATHOGENIC')])
benign = len(rows[ (rows.clin_class=='LIKELY_BENIGN') | (rows.clin_class=='BENIGN')])
frac = -1
if pathogenic+benign:
frac = pathogenic/(pathogenic+benign)
return pandas.Series([frac, len(rows)], index=['path_frac', 'size'])
dat_file = '../data/interim/EPIv6.eff.dbnsfp.anno.hHack.dat.xls'
df_pre = pandas.read_csv(dat_file, sep='\t')
df = (df_pre['pfam'].str.split(',', expand=True)
.stack()
.reset_index(level=0)
.set_index('level_0')
.rename(columns={0:'pfam'})
.join(df_pre.drop('pfam',1), how='left')
)
dd = df.groupby('pfam').apply(calc_path_frac)
ff = dd.reset_index()
# mk domain features
def match(row, domain_info):
ls = []
for pfam in row['pfam'].split(','):
if pfam in domain_info:
if domain_info[pfam][2] == 0:
ls.append(domain_info[pfam])
if len(ls) == 0:
for pfam in row['pfam'].split(','):
if pfam in domain_info:
return domain_info[pfam]
if len(ls):
return ls[0]
else:
return (0, 0, 1)
ff.loc[:, 'path_na'] = ff.apply(lambda row: 1 if row['path_frac']==-1 else 0, axis=1)
domain_info = {pfam:[path_frac, size, path_na]
for pfam, path_frac, size, path_na
in ff.values}
df_pre.loc[:, 'path_frac_t'] = df_pre.apply(lambda row: match(row, domain_info)[0], axis=1)
df_pre.loc[:, 'size_t'] = df_pre.apply(lambda row: match(row, domain_info)[1], axis=1)
df_pre.loc[:, 'path_na_t'] = df_pre.apply(lambda row: match(row, domain_info)[2], axis=1)
df_x_pre = df_pre[ (df_pre.clin_class != 'VUS') & (df_pre.mpc>0) & (df_pre.pfam != 'none')]
df_s = df_x_pre.groupby('pfam').size().reset_index()
multi_pfam = set( df_s[df_s[0]>1]['pfam'].values )
df_x_pre.loc[:, 'multi_pfam'] = df_x_pre.apply(lambda row: row['pfam'] in multi_pfam, axis=1)
df_x = df_x_pre[df_x_pre.multi_pfam]
df_x.loc[:, 'y'] = df_x.apply(lambda row: 1 if row['clin_class'] in ('PATHOGENIC', 'LIKLEY_PATHOGENIC')
else 0, axis=1)
df_x.head()
clin_file = '../data/interim/clinvar/clinvar.dat'
clinvar_df_pre = pandas.read_csv(clin_file, sep='\t')
def calc_final_sig(row):
sig_set = set(str(row['clinSig'].split('|')))
has_benign = '2' in sig_set or '3' in sig_set
has_path = '4' in sig_set or '5' in sig_set
if has_path and not has_benign:
return 1
if not has_path and has_benign:
return 0
return -1
clinvar_df_pre.loc[:, "y"] = clinvar_df_pre.apply(calc_final_sig, axis=1)
clinvar_df = clinvar_df_pre[(clinvar_df_pre.y!=-1) & (clinvar_df_pre.pfam!='none') & (clinvar_df_pre.mpc>0)].drop_duplicates()
clinvar_df.loc[:, 'path_frac_t'] = clinvar_df.apply(lambda row: match(row, domain_info)[0], axis=1)
clinvar_df.loc[:, 'size_t'] = clinvar_df.apply(lambda row: match(row, domain_info)[1], axis=1)
clinvar_df.loc[:, 'path_na_t'] = clinvar_df.apply(lambda row: match(row, domain_info)[2], axis=1)
# need a smarter match to domain here
#m = pandas.merge(clinvar_df, ff, on='pfam', how='left')
#m.head()
print(len(clinvar_df))
print(len(clinvar_df[clinvar_df.y==1]))
print(len(clinvar_df[clinvar_df.y==0]))
scores = clinvar_df['mpc'].values
truth = clinvar_df['y'].values
fpr_mpc, tpr_mpc, _ = metrics.roc_curve(truth, scores, pos_label=1)
#plt.plot(fpr_mpc, tpr_mpc, label='mpc', color='black')
mpc_auc = metrics.auc(fpr_mpc, tpr_mpc)
# train new tree and apply to clinvar
tree_clf = tree.DecisionTreeClassifier(max_depth=4)
all_preds = []
all_truth = []
cols = ['mpc', 'size_t', 'path_na_t', 'path_frac_t']
X, y = df_x[cols], df_x['y']
tree_clf.fit(X, y)
dot_data = StringIO()
tree.export_graphviz(tree_clf, feature_names=cols, out_file=dot_data)
graph = pydotplus.graph_from_dot_data( dot_data.getvalue() )
graph.write_pdf('mtr_tree.full.pdf')
X_clin, y_clin = clinvar_df[cols], clinvar_df['y']
preds = tree_clf.predict_proba(X_clin)
fpr_tree, tpr_tree, _ = metrics.roc_curve(y_clin, [x[1] for x in preds], pos_label=1)
tree_auc = metrics.auc(fpr_tree, tpr_tree)
HTML('<iframe src=./mtr_tree.full.pdf width=1000 height=500></iframe>')
# train new tree and apply to clinvar: just pathogenic frac
tree_clf = tree.DecisionTreeClassifier(max_depth=3)
all_preds = []
all_truth = []
cols = ['size_t', 'path_na_t', 'path_frac_t']
X, y = df_x[cols], df_x['y']
tree_clf.fit(X, y)
dot_data = StringIO()
tree.export_graphviz(tree_clf, feature_names=cols, out_file=dot_data)
graph = pydotplus.graph_from_dot_data( dot_data.getvalue() )
graph.write_pdf('mtr_tree.full.nompc.pdf')
X_clin, y_clin = clinvar_df[cols], clinvar_df['y']
preds = tree_clf.predict_proba(X_clin)
fpr_tree_nm, tpr_tree_nm, _ = metrics.roc_curve(y_clin, [x[1] for x in preds], pos_label=1)
tree_auc_nm = metrics.auc(fpr_tree_nm, tpr_tree_nm)
HTML('<iframe src=./mtr_tree.full.nompc.pdf width=1000 height=500></iframe>')
#X_clin, y_clin = clinvar_df[cols], clinvar_df['y']
#preds = tree_clf.predict_proba(X_clin)
#fpr_tree, tpr_tree, _ = metrics.roc_curve(y_clin, [x[1] for x in preds], pos_label=1)
#tree_auc = metrics.auc(fpr_tree, tpr_tree)
#mpc_auc = metrics.auc(fpr_mpc, tpr_mpc)
print('mpc auc', mpc_auc)
print('tree auc', tree_auc)
print('tree-no-mpc auc', tree_auc_nm)
plt.plot(fpr_tree, tpr_tree, label='tree', color='green')
plt.plot(fpr_tree_nm, tpr_tree_nm, label='tree-no-mpc', color='orange')
plt.plot(fpr_mpc, tpr_mpc, label='mpc', color='black')
plt.legend(loc=4)
plt.savefig('../docs/plots/clinvar_roc.png')
###Output
mpc auc 0.854802406183
tree auc 0.876005895945
tree-no-mpc auc 0.841307465541
|
2. Numpy, Pandas, Matplotlib/Lesson 5. Matplotlib and Seaborn - Part 2/1. Scatterplot_Practice.ipynb | ###Markdown
In this workspace, you'll make use of this data set describing various car attributes, such as fuel efficiency. The cars in this dataset represent about 3900 sedans tested by the EPA from 2013 to 2018. This dataset is a trimmed-down version of the data found [here](https://catalog.data.gov/dataset/fuel-economy-data).
###Code
fuel_econ = pd.read_csv('./data/fuel_econ.csv')
fuel_econ.head()
###Output
_____no_output_____
###Markdown
**Task 1**: Let's look at the relationship between fuel mileage ratings for city vs. highway driving, as stored in the 'city' and 'highway' variables (in miles per gallon, or mpg). Use a _scatter plot_ to depict the data. What is the general relationship between these variables? Are there any points that appear unusual against these trends?
###Code
# YOUR CODE HERE
plt.scatter(data = fuel_econ, x = "city", y="highway", alpha = 0.1)
plt.xlabel("City fuel eff. (mpg)")
plt.ylabel("Highway fuel eff. (mpg)")
# run this cell to check your work against ours
scatterplot_solution_1()
###Output
Most of the data falls in a large blob between 10 and 30 mpg city and 20 to 40 mpg highway. Some transparency is added via 'alpha' to show the concentration of data. Interestingly, for most cars highway mileage is clearly higher than city mileage, but for those cars with city mileage above about 30 mpg, the distinction is less pronounced. In fact, most cars above 45 mpg city have better city mileage than highway mileage, contrary to the main trend. It might be good to call out this trend by adding a diagonal line to the figure using the `plot` function. (See the solution file for that code!)
###Markdown
**Task 2**: Let's look at the relationship between two other numeric variables. How does the engine size relate to a car's CO2 footprint? The 'displ' variable has the former (in liters), while the 'co2' variable has the latter (in grams per mile). Use a heat map to depict the data. How strong is this trend?
###Code
# YOUR CODE HERE
x_bins = np.arange(0.6, 7+0.4, 0.4)
y_bins = np.arange(0, 692 + 50 ,50)
plt.hist2d(data = fuel_econ, x="displ", y="co2", cmin=0.5, bins=[x_bins, y_bins], cmap = "viridis_r" )
plt.colorbar();
fuel_econ[["displ", "co2"]].describe()
# run this cell to check your work against ours
scatterplot_solution_2()
###Output
In the heat map, I've set up a color map that goes from light to dark, and made it so that any cells without count don't get colored in. The visualization shows that most cars fall in a line where larger engine sizes correlate with higher emissions. The trend is somewhat broken by those cars with the lowest emissions, which still have engine sizes shared by most cars (between 1 and 3 liters).
|
notebook/08t-resnext50-512.ipynb.ipynb | ###Markdown
GPU
###Code
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
print(gpu_info)
###Output
Fri Jan 8 09:04:20 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.27.04 Driver Version: 418.67 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 32C P0 25W / 250W | 0MiB / 16280MiB | 0% Default |
| | | ERR! |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
CFG
###Code
CONFIG_NAME = 'config08.yml'
TITLE = '08t-resnext50-512'
! git clone https://github.com/raijin0704/cassava.git
# ====================================================
# CFG
# ====================================================
import yaml
CONFIG_PATH = f'./cassava/config/{CONFIG_NAME}'
with open(CONFIG_PATH) as f:
config = yaml.load(f)
INFO = config['info']
TAG = config['tag']
CFG = config['cfg']
CFG['train'] = True
CFG['inference'] = False
# CFG['debug'] = True
if CFG['debug']:
CFG['epochs'] = 1
assert INFO['TITLE'] == TITLE
###Output
Cloning into 'cassava'...
remote: Enumerating objects: 55, done.[K
remote: Counting objects: 100% (55/55), done.[K
remote: Compressing objects: 100% (48/48), done.[K
remote: Total 55 (delta 33), reused 10 (delta 5), pack-reused 0[K
Unpacking objects: 100% (55/55), done.
###Markdown
colab & kaggle notebookでの環境面の処理 colab
###Code
def _colab_kaggle_authority():
from googleapiclient.discovery import build
import io, os
from googleapiclient.http import MediaIoBaseDownload
drive_service = build('drive', 'v3')
results = drive_service.files().list(
q="name = 'kaggle.json'", fields="files(id)").execute()
kaggle_api_key = results.get('files', [])
filename = "/root/.kaggle/kaggle.json"
os.makedirs(os.path.dirname(filename), exist_ok=True)
request = drive_service.files().get_media(fileId=kaggle_api_key[0]['id'])
fh = io.FileIO(filename, 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
os.chmod(filename, 600)
def _install_apex():
import os
import subprocess
import sys
# import time
subprocess.run('git clone https://github.com/NVIDIA/apex'.split(' '))
# time.sleep(10)
os.chdir('apex')
subprocess.run('pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .'.split(' '))
os.chdir('..')
def process_colab():
import subprocess
# ドライブのマウント
from google.colab import drive
drive.mount('/content/drive')
# Google Cloudの権限設定
from google.colab import auth
auth.authenticate_user()
# kaggle設定
# _colab_kaggle_authority()
# subprocess.run('pip install --upgrade --force-reinstall --no-deps kaggle'.split(' '))
# ライブラリ関係
subprocess.run('pip install --upgrade opencv-python'.split(' '))
subprocess.run('pip install --upgrade albumentations'.split(' '))
subprocess.run('pip install timm'.split(' '))
# if CFG['apex']:
# print('installing apex')
# _install_apex()
# print('done')
# 各種pathの設定
DATA_PATH = '/content/drive/Shareddrives/便利用/kaggle/cassava/input/'
OUTPUT_DIR = './output/'
NOTEBOOK_PATH = f'/content/drive/Shareddrives/便利用/kaggle/cassava/notebook/{TITLE}.ipynb'
return DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH
###Output
_____no_output_____
###Markdown
kaggle notebook
###Code
def _kaggle_gcp_authority():
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
user_credential = user_secrets.get_gcloud_credential()
user_secrets.set_tensorflow_credential(user_credential)
def process_kaggle():
# GCP設定
_kaggle_gcp_authority()
# 各種pathの設定
DATA_PATH = '../input/cassava-leaf-disease-classification/'
OUTPUT_DIR = './'
NOTEBOOK_PATH = './__notebook__.ipynb'
# system path
import sys
sys.path.append('../input/pytorch-image-models/pytorch-image-models-master')
return DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH
###Output
_____no_output_____
###Markdown
共通
###Code
def process_common():
# ライブラリ関係
import subprocess
subprocess.run('pip install mlflow'.split(' '))
# 環境変数
import os
os.environ["GCLOUD_PROJECT"] = INFO['PROJECT_ID']
try:
from google.colab import auth
except ImportError:
DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH = process_kaggle()
else:
DATA_PATH, OUTPUT_DIR, NOTEBOOK_PATH = process_colab()
finally:
process_common()
###Output
Mounted at /content/drive
###Markdown
install apex
###Code
if CFG['apex']:
try:
import apex
except Exception:
! git clone https://github.com/NVIDIA/apex.git
% cd apex
!pip install --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .
%cd ..
###Output
Cloning into 'apex'...
remote: Enumerating objects: 7872, done.[K
remote: Total 7872 (delta 0), reused 0 (delta 0), pack-reused 7872[K
Receiving objects: 100% (7872/7872), 13.98 MiB | 29.04 MiB/s, done.
Resolving deltas: 100% (5374/5374), done.
/content/apex
/usr/local/lib/python3.6/dist-packages/pip/_internal/commands/install.py:283: UserWarning: Disabling all use of wheels due to the use of --build-options / --global-options / --install-options.
cmdoptions.check_install_build_global(options)
Processing /content/apex
Skipping wheel build for apex, due to binaries being disabled for it.
Installing collected packages: apex
Running setup.py install for apex ... [?25l[?25hdone
Successfully installed apex-0.1
/content
###Markdown
Library
###Code
# ====================================================
# Library
# ====================================================
import os
import datetime
import math
import time
import random
import glob
import shutil
from pathlib import Path
from contextlib import contextmanager
from collections import defaultdict, Counter
import scipy as sp
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
from tqdm.auto import tqdm
from functools import partial
import cv2
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
import torchvision.models as models
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader, Dataset
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau
from albumentations import (
Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip,
RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout,
IAAAdditiveGaussianNoise, Transpose
)
from albumentations.pytorch import ToTensorV2
from albumentations import ImageOnlyTransform
import timm
import mlflow
import warnings
warnings.filterwarnings('ignore')
if CFG['apex']:
from apex import amp
if CFG['debug']:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cuda')
start_time = datetime.datetime.now()
start_time_str = start_time.strftime('%m%d%H%M')
###Output
_____no_output_____
###Markdown
Directory settings
###Code
# ====================================================
# Directory settings
# ====================================================
if os.path.exists(OUTPUT_DIR):
shutil.rmtree(OUTPUT_DIR)
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
###Output
_____no_output_____
###Markdown
save basic files
###Code
# with open(f'{OUTPUT_DIR}/{start_time_str}_TAG.json', 'w') as f:
# json.dump(TAG, f, indent=4)
# with open(f'{OUTPUT_DIR}/{start_time_str}_CFG.json', 'w') as f:
# json.dump(CFG, f, indent=4)
import shutil
notebook_path = f'{OUTPUT_DIR}/{start_time_str}_{TITLE}.ipynb'
shutil.copy2(NOTEBOOK_PATH, notebook_path)
###Output
_____no_output_____
###Markdown
Data Loading
###Code
train = pd.read_csv(f'{DATA_PATH}/train.csv')
test = pd.read_csv(f'{DATA_PATH}/sample_submission.csv')
label_map = pd.read_json(f'{DATA_PATH}/label_num_to_disease_map.json',
orient='index')
if CFG['debug']:
train = train.sample(n=1000, random_state=CFG['seed']).reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Utils
###Code
# ====================================================
# Utils
# ====================================================
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
@contextmanager
def timer(name):
t0 = time.time()
LOGGER.info(f'[{name}] start')
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s.')
def init_logger(log_file=OUTPUT_DIR+'train.log'):
from logging import getLogger, FileHandler, Formatter, StreamHandler
from logging import INFO as INFO_
logger = getLogger(__name__)
logger.setLevel(INFO_)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
logger_path = OUTPUT_DIR+f'{start_time_str}_train.log'
LOGGER = init_logger(logger_path)
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_torch(seed=CFG['seed'])
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, save_path='checkpoint.pt',
counter=0, best_score=None, save_latest_path=None):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
save_path (str): Directory for saving a model.
Default: "'checkpoint.pt'"
"""
self.patience = patience
self.verbose = verbose
self.save_path = save_path
self.counter = counter
self.best_score = best_score
self.save_latest_path = save_latest_path
self.early_stop = False
self.val_loss_min = np.Inf
def __call__(self, val_loss, model, preds, epoch):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, preds, epoch)
elif score >= self.best_score:
self.best_score = score
self.save_checkpoint(val_loss, model, preds, epoch)
self.counter = 0
# nanになったら学習ストップ
elif math.isnan(score):
self.early_stop = True
else:
self.counter += 1
if self.save_latest_path is not None:
self.save_latest(val_loss, model, preds, epoch, score)
if self.verbose:
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
def save_checkpoint(self, val_loss, model, preds, epoch):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.10f} --> {val_loss:.10f}). Saving model ...')
torch.save({'model': model.state_dict(), 'preds': preds,
'epoch' : epoch, 'best_score' : self.best_score, 'counter' : self.counter},
self.save_path)
self.val_loss_min = val_loss
def save_latest(self, val_loss, model, preds, epoch, score):
'''Saves latest model.'''
torch.save({'model': model.state_dict(), 'preds': preds,
'epoch' : epoch, 'score' : score, 'counter' : self.counter},
self.save_latest_path)
self.val_loss_min = val_loss
###Output
_____no_output_____
###Markdown
CV split
###Code
folds = train.copy()
Fold = StratifiedKFold(n_splits=CFG['n_fold'], shuffle=True, random_state=CFG['seed'])
for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[CFG['target_col']])):
folds.loc[val_index, 'fold'] = int(n)
folds['fold'] = folds['fold'].astype(int)
print(folds.groupby(['fold', CFG['target_col']]).size())
###Output
fold label
0 0 218
1 438
2 477
3 2631
4 516
1 0 218
1 438
2 477
3 2631
4 516
2 0 217
1 438
2 477
3 2632
4 515
3 0 217
1 438
2 477
3 2632
4 515
4 0 217
1 437
2 478
3 2632
4 515
dtype: int64
###Markdown
Dataset
###Code
# ====================================================
# Dataset
# ====================================================
class TrainDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df['image_id'].values
self.labels = df['label'].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{DATA_PATH}/train_images/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
label = torch.tensor(self.labels[idx]).long()
return image, label
class TestDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df['image_id'].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{DATA_PATH}/test_images/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
return image
# train_dataset = TrainDataset(train, transform=None)
# for i in range(1):
# image, label = train_dataset[i]
# plt.imshow(image)
# plt.title(f'label: {label}')
# plt.show()
###Output
_____no_output_____
###Markdown
Transforms
###Code
def _get_augmentations(aug_list):
process = []
for aug in aug_list:
if aug == 'Resize':
process.append(Resize(CFG['size'], CFG['size']))
elif aug == 'RandomResizedCrop':
process.append(RandomResizedCrop(CFG['size'], CFG['size']))
elif aug == 'Transpose':
process.append(Transpose(p=0.5))
elif aug == 'HorizontalFlip':
process.append(HorizontalFlip(p=0.5))
elif aug == 'VerticalFlip':
process.append(VerticalFlip(p=0.5))
elif aug == 'ShiftScaleRotate':
process.append(ShiftScaleRotate(p=0.5))
elif aug == 'Normalize':
process.append(Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
))
else:
raise ValueError(f'{aug} is not suitable')
process.append(ToTensorV2())
return process
# ====================================================
# Transforms
# ====================================================
def get_transforms(*, data):
if data == 'train':
return Compose(
_get_augmentations(TAG['augmentation'])
)
elif data == 'valid':
return Compose(
_get_augmentations(['Resize', 'Normalize'])
)
# train_dataset = TrainDataset(train, transform=get_transforms(data='train'))
# for i in range(1):
# image, label = train_dataset[i]
# plt.imshow(image[0])
# plt.title(f'label: {label}')
# plt.show()
###Output
_____no_output_____
###Markdown
Bi-tempered logistic loss
###Code
def log_t(u, t):
"""Compute log_t for `u'."""
if t==1.0:
return u.log()
else:
return (u.pow(1.0 - t) - 1.0) / (1.0 - t)
def exp_t(u, t):
"""Compute exp_t for `u'."""
if t==1:
return u.exp()
else:
return (1.0 + (1.0-t)*u).relu().pow(1.0 / (1.0 - t))
def compute_normalization_fixed_point(activations, t, num_iters):
"""Returns the normalization value for each example (t > 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (> 1.0 for tail heaviness).
num_iters: Number of iterations to run the method.
Return: A tensor of same shape as activation with the last dimension being 1.
"""
mu, _ = torch.max(activations, -1, keepdim=True)
normalized_activations_step_0 = activations - mu
normalized_activations = normalized_activations_step_0
for _ in range(num_iters):
logt_partition = torch.sum(
exp_t(normalized_activations, t), -1, keepdim=True)
normalized_activations = normalized_activations_step_0 * \
logt_partition.pow(1.0-t)
logt_partition = torch.sum(
exp_t(normalized_activations, t), -1, keepdim=True)
normalization_constants = - log_t(1.0 / logt_partition, t) + mu
return normalization_constants
def compute_normalization_binary_search(activations, t, num_iters):
"""Returns the normalization value for each example (t < 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
mu, _ = torch.max(activations, -1, keepdim=True)
normalized_activations = activations - mu
effective_dim = \
torch.sum(
(normalized_activations > -1.0 / (1.0-t)).to(torch.int32),
dim=-1, keepdim=True).to(activations.dtype)
shape_partition = activations.shape[:-1] + (1,)
lower = torch.zeros(shape_partition, dtype=activations.dtype, device=activations.device)
upper = -log_t(1.0/effective_dim, t) * torch.ones_like(lower)
for _ in range(num_iters):
logt_partition = (upper + lower)/2.0
sum_probs = torch.sum(
exp_t(normalized_activations - logt_partition, t),
dim=-1, keepdim=True)
update = (sum_probs < 1.0).to(activations.dtype)
lower = torch.reshape(
lower * update + (1.0-update) * logt_partition,
shape_partition)
upper = torch.reshape(
upper * (1.0 - update) + update * logt_partition,
shape_partition)
logt_partition = (upper + lower)/2.0
return logt_partition + mu
class ComputeNormalization(torch.autograd.Function):
"""
Class implementing custom backward pass for compute_normalization. See compute_normalization.
"""
@staticmethod
def forward(ctx, activations, t, num_iters):
if t < 1.0:
normalization_constants = compute_normalization_binary_search(activations, t, num_iters)
else:
normalization_constants = compute_normalization_fixed_point(activations, t, num_iters)
ctx.save_for_backward(activations, normalization_constants)
ctx.t=t
return normalization_constants
@staticmethod
def backward(ctx, grad_output):
activations, normalization_constants = ctx.saved_tensors
t = ctx.t
normalized_activations = activations - normalization_constants
probabilities = exp_t(normalized_activations, t)
escorts = probabilities.pow(t)
escorts = escorts / escorts.sum(dim=-1, keepdim=True)
grad_input = escorts * grad_output
return grad_input, None, None
def compute_normalization(activations, t, num_iters=5):
"""Returns the normalization value for each example.
Backward pass is implemented.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
return ComputeNormalization.apply(activations, t, num_iters)
def tempered_sigmoid(activations, t, num_iters = 5):
"""Tempered sigmoid function.
Args:
activations: Activations for the positive class for binary classification.
t: Temperature tensor > 0.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities tensor.
"""
internal_activations = torch.stack([activations,
torch.zeros_like(activations)],
dim=-1)
internal_probabilities = tempered_softmax(internal_activations, t, num_iters)
return internal_probabilities[..., 0]
def tempered_softmax(activations, t, num_iters=5):
"""Tempered softmax function.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature > 1.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities tensor.
"""
if t == 1.0:
return activations.softmax(dim=-1)
normalization_constants = compute_normalization(activations, t, num_iters)
return exp_t(activations - normalization_constants, t)
def bi_tempered_binary_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing = 0.0,
num_iters=5,
reduction='mean'):
"""Bi-Tempered binary logistic loss.
Args:
activations: A tensor containing activations for class 1.
labels: A tensor with shape as activations, containing probabilities for class 1
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing
num_iters: Number of iterations to run the method.
Returns:
A loss tensor.
"""
internal_activations = torch.stack([activations,
torch.zeros_like(activations)],
dim=-1)
internal_labels = torch.stack([labels.to(activations.dtype),
1.0 - labels.to(activations.dtype)],
dim=-1)
return bi_tempered_logistic_loss(internal_activations,
internal_labels,
t1,
t2,
label_smoothing = label_smoothing,
num_iters = num_iters,
reduction = reduction)
def bi_tempered_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing=0.0,
num_iters=5,
reduction = 'mean'):
"""Bi-Tempered Logistic Loss.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
labels: A tensor with shape and dtype as activations (onehot),
or a long tensor of one dimension less than activations (pytorch standard)
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing parameter between [0, 1). Default 0.0.
num_iters: Number of iterations to run the method. Default 5.
reduction: ``'none'`` | ``'mean'`` | ``'sum'``. Default ``'mean'``.
``'none'``: No reduction is applied, return shape is shape of
activations without the last dimension.
``'mean'``: Loss is averaged over minibatch. Return shape (1,)
``'sum'``: Loss is summed over minibatch. Return shape (1,)
Returns:
A loss tensor.
"""
if len(labels.shape)<len(activations.shape): #not one-hot
labels_onehot = torch.zeros_like(activations)
labels_onehot.scatter_(1, labels[..., None], 1)
else:
labels_onehot = labels
if label_smoothing > 0:
num_classes = labels_onehot.shape[-1]
labels_onehot = ( 1 - label_smoothing * num_classes / (num_classes - 1) ) \
* labels_onehot + \
label_smoothing / (num_classes - 1)
probabilities = tempered_softmax(activations, t2, num_iters)
loss_values = labels_onehot * log_t(labels_onehot + 1e-10, t1) \
- labels_onehot * log_t(probabilities, t1) \
- labels_onehot.pow(2.0 - t1) / (2.0 - t1) \
+ probabilities.pow(2.0 - t1) / (2.0 - t1)
loss_values = loss_values.sum(dim = -1) #sum over classes
if reduction == 'none':
return loss_values
if reduction == 'sum':
return loss_values.sum()
if reduction == 'mean':
return loss_values.mean()
###Output
_____no_output_____
###Markdown
MODEL
###Code
# ====================================================
# MODEL
# ====================================================
class CustomModel(nn.Module):
def __init__(self, model_name, pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
if hasattr(self.model, 'classifier'):
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, CFG['target_size'])
elif hasattr(self.model, 'fc'):
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, CFG['target_size'])
def forward(self, x):
x = self.model(x)
return x
model = CustomModel(model_name=TAG['model_name'], pretrained=False)
train_dataset = TrainDataset(train, transform=get_transforms(data='train'))
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True,
num_workers=4, pin_memory=True, drop_last=True)
for image, label in train_loader:
output = model(image)
print(output)
break
###Output
tensor([[ 0.1942, -0.0228, 0.0547, 0.3079, -0.0856],
[ 0.1010, 0.0974, 0.0921, 0.5386, 0.1265],
[ 0.1124, 0.0810, 0.0548, 0.4364, -0.0349],
[ 0.1089, 0.0594, 0.0561, 0.5599, 0.1196]],
grad_fn=<AddmmBackward>)
###Markdown
Helper functions
###Code
# ====================================================
# Helper functions
# ====================================================
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (remain %s)' % (asMinutes(s), asMinutes(rs))
# ====================================================
# loss
# ====================================================
def get_loss(criterion, y_preds, labels):
if TAG['criterion']=='CrossEntropyLoss':
loss = criterion(y_preds, labels)
elif TAG['criterion'] == 'bi_tempered_logistic_loss':
loss = criterion(y_preds, labels, t1=CFG['bi_tempered_loss_t1'], t2=CFG['bi_tempered_loss_t2'])
return loss
# ====================================================
# Helper functions
# ====================================================
def train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
start = end = time.time()
global_step = 0
for step, (images, labels) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.to(device)
labels = labels.to(device)
batch_size = labels.size(0)
y_preds = model(images)
loss = get_loss(criterion, y_preds, labels)
# record loss
losses.update(loss.item(), batch_size)
if CFG['gradient_accumulation_steps'] > 1:
loss = loss / CFG['gradient_accumulation_steps']
if CFG['apex']:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CFG['max_grad_norm'])
if (step + 1) % CFG['gradient_accumulation_steps'] == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % CFG['print_freq'] == 0 or step == (len(train_loader)-1):
print('Epoch: [{0}][{1}/{2}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Elapsed {remain:s} '
'Loss: {loss.val:.4f}({loss.avg:.4f}) '
'Grad: {grad_norm:.4f} '
#'LR: {lr:.6f} '
.format(
epoch+1, step, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses,
remain=timeSince(start, float(step+1)/len(train_loader)),
grad_norm=grad_norm,
#lr=scheduler.get_lr()[0],
))
return losses.avg
def valid_fn(valid_loader, model, criterion, device):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to evaluation mode
model.eval()
preds = []
start = end = time.time()
for step, (images, labels) in enumerate(valid_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.to(device)
labels = labels.to(device)
batch_size = labels.size(0)
# compute loss
with torch.no_grad():
y_preds = model(images)
loss = get_loss(criterion, y_preds, labels)
losses.update(loss.item(), batch_size)
# record accuracy
preds.append(y_preds.softmax(1).to('cpu').numpy())
if CFG['gradient_accumulation_steps'] > 1:
loss = loss / CFG['gradient_accumulation_steps']
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % CFG['print_freq'] == 0 or step == (len(valid_loader)-1):
print('EVAL: [{0}/{1}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Elapsed {remain:s} '
'Loss: {loss.val:.4f}({loss.avg:.4f}) '
.format(
step, len(valid_loader), batch_time=batch_time,
data_time=data_time, loss=losses,
remain=timeSince(start, float(step+1)/len(valid_loader)),
))
predictions = np.concatenate(preds)
return losses.avg, predictions
def inference(model, states, test_loader, device):
model.to(device)
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
for i, (images) in tk0:
images = images.to(device)
avg_preds = []
for state in states:
# model.load_state_dict(state['model'])
model.load_state_dict(state)
model.eval()
with torch.no_grad():
y_preds = model(images)
avg_preds.append(y_preds.softmax(1).to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs
###Output
_____no_output_____
###Markdown
Train loop
###Code
# ====================================================
# scheduler
# ====================================================
def get_scheduler(optimizer):
if TAG['scheduler']=='ReduceLROnPlateau':
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=CFG['factor'], patience=CFG['patience'], verbose=True, eps=CFG['eps'])
elif TAG['scheduler']=='CosineAnnealingLR':
scheduler = CosineAnnealingLR(optimizer, T_max=CFG['T_max'], eta_min=CFG['min_lr'], last_epoch=-1)
elif TAG['scheduler']=='CosineAnnealingWarmRestarts':
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=CFG['T_0'], T_mult=1, eta_min=CFG['min_lr'], last_epoch=-1)
return scheduler
# ====================================================
# criterion
# ====================================================
def get_criterion():
if TAG['criterion']=='CrossEntropyLoss':
criterion = nn.CrossEntropyLoss()
elif TAG['criterion'] == 'bi_tempered_logistic_loss':
criterion = bi_tempered_logistic_loss
return criterion
# ====================================================
# Train loop
# ====================================================
def train_loop(folds, fold):
LOGGER.info(f"========== fold: {fold} training ==========")
if not CFG['debug']:
mlflow.set_tag('running.fold', str(fold))
# ====================================================
# loader
# ====================================================
trn_idx = folds[folds['fold'] != fold].index
val_idx = folds[folds['fold'] == fold].index
train_folds = folds.loc[trn_idx].reset_index(drop=True)
valid_folds = folds.loc[val_idx].reset_index(drop=True)
train_dataset = TrainDataset(train_folds,
transform=get_transforms(data='train'))
valid_dataset = TrainDataset(valid_folds,
transform=get_transforms(data='valid'))
train_loader = DataLoader(train_dataset,
batch_size=CFG['batch_size'],
shuffle=True,
num_workers=CFG['num_workers'], pin_memory=True, drop_last=True)
valid_loader = DataLoader(valid_dataset,
batch_size=CFG['batch_size'],
shuffle=False,
num_workers=CFG['num_workers'], pin_memory=True, drop_last=False)
# ====================================================
# model & optimizer & criterion
# ====================================================
best_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth'
latest_model_path = OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_latest.pth'
model = CustomModel(TAG['model_name'], pretrained=True)
model.to(device)
# 学習途中の重みがあれば読み込み
if os.path.isfile(latest_model_path):
state_latest = torch.load(latest_model_path)
state_best = torch.load(best_model_path)
model.load_state_dict(state_latest['model'])
epoch_start = state_latest['epoch']+1
# er_best_score = state_latest['score']
er_counter = state_latest['counter']
er_best_score = state_best['best_score']
LOGGER.info(f'Retrain model in epoch:{epoch_start}, best_score:{er_best_score:.3f}, counter:{er_counter}')
else:
epoch_start = 0
er_best_score = None
er_counter = 0
optimizer = Adam(model.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'], amsgrad=False)
scheduler = get_scheduler(optimizer)
criterion = get_criterion()
# ====================================================
# apex
# ====================================================
if CFG['apex']:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
# ====================================================
# loop
# ====================================================
# best_score = 0.
# best_loss = np.inf
early_stopping = EarlyStopping(
patience=CFG['early_stopping_round'],
verbose=True,
save_path=best_model_path,
counter=er_counter, best_score=er_best_score,
save_latest_path=latest_model_path)
for epoch in range(epoch_start, CFG['epochs']):
start_time = time.time()
# train
avg_loss = train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device)
# eval
avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device)
valid_labels = valid_folds[CFG['target_col']].values
# early stopping
early_stopping(avg_val_loss, model, preds, epoch)
if early_stopping.early_stop:
print(f'Epoch {epoch+1} - early stopping')
break
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(avg_val_loss)
elif isinstance(scheduler, CosineAnnealingLR):
scheduler.step()
elif isinstance(scheduler, CosineAnnealingWarmRestarts):
scheduler.step()
# scoring
score = get_score(valid_labels, preds.argmax(1))
elapsed = time.time() - start_time
LOGGER.info(f'Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s')
LOGGER.info(f'Epoch {epoch+1} - Accuracy: {score}')
# log mlflow
if not CFG['debug']:
mlflow.log_metric(f"fold{fold} avg_train_loss", avg_loss, step=epoch)
mlflow.log_metric(f"fold{fold} avg_valid_loss", avg_val_loss, step=epoch)
mlflow.log_metric(f"fold{fold} score", score, step=epoch)
mlflow.log_metric(f"fold{fold} lr", scheduler.get_last_lr()[0], step=epoch)
mlflow.log_artifact(best_model_path)
if os.path.isfile(latest_model_path):
mlflow.log_artifact(latest_model_path)
check_point = torch.load(best_model_path)
valid_folds[[str(c) for c in range(5)]] = check_point['preds']
valid_folds['preds'] = check_point['preds'].argmax(1)
return valid_folds
# ====================================================
# main
# ====================================================
def get_result(result_df):
preds = result_df['preds'].values
labels = result_df[CFG['target_col']].values
score = get_score(labels, preds)
LOGGER.info(f'Score: {score:<.5f}')
return score
def main():
"""
Prepare: 1.train 2.test 3.submission 4.folds
"""
if CFG['train']:
# train
oof_df = pd.DataFrame()
for fold in range(CFG['n_fold']):
if fold in CFG['trn_fold']:
_oof_df = train_loop(folds, fold)
oof_df = pd.concat([oof_df, _oof_df])
LOGGER.info(f"========== fold: {fold} result ==========")
_ = get_result(_oof_df)
# CV result
LOGGER.info(f"========== CV ==========")
score = get_result(oof_df)
# save result
oof_df.to_csv(OUTPUT_DIR+'oof_df.csv', index=False)
# log mlflow
if not CFG['debug']:
mlflow.log_metric('oof score', score)
mlflow.delete_tag('running.fold')
mlflow.log_artifact(OUTPUT_DIR+'oof_df.csv')
if CFG['inference']:
# inference
model = CustomModel(TAG['model_name'], pretrained=False)
states = [torch.load(OUTPUT_DIR+f'{TAG["model_name"]}_fold{fold}_best.pth') for fold in CFG['trn_fold']]
test_dataset = TestDataset(test, transform=get_transforms(data='valid'))
test_loader = DataLoader(test_dataset, batch_size=CFG['batch_size'], shuffle=False,
num_workers=CFG['num_workers'], pin_memory=True)
predictions = inference(model, states, test_loader, device)
# submission
test['label'] = predictions.argmax(1)
test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False)
###Output
_____no_output_____
###Markdown
rerun
###Code
def _load_save_point(run_id):
# どこで中断したか取得
stop_fold = int(mlflow.get_run(run_id=run_id).to_dictionary()['data']['tags']['running.fold'])
# 学習対象のfoldを変更
CFG['trn_fold'] = [fold for fold in CFG['trn_fold'] if fold>=stop_fold]
# 学習済みモデルがあれば.pthファイルを取得(学習中も含む)
client = mlflow.tracking.MlflowClient()
artifacts = [artifact for artifact in client.list_artifacts(run_id) if ".pth" in artifact.path]
for artifact in artifacts:
client.download_artifacts(run_id, artifact.path, OUTPUT_DIR)
def check_have_run():
results = mlflow.search_runs(INFO['EXPERIMENT_ID'])
run_id_list = results[results['tags.mlflow.runName']==TITLE]['run_id'].tolist()
# 初めて実行する場合
if len(run_id_list) == 0:
run_id = None
# 既に実行されている場合
else:
assert len(run_id_list)==1
run_id = run_id_list[0]
_load_save_point(run_id)
return run_id
if __name__ == '__main__':
if CFG['debug']:
main()
else:
mlflow.set_tracking_uri(INFO['TRACKING_URI'])
mlflow.set_experiment('single model')
# 既に実行済みの場合は続きから実行する
run_id = check_have_run()
with mlflow.start_run(run_id=run_id, run_name=TITLE):
if run_id is None:
mlflow.log_artifact(CONFIG_PATH)
mlflow.log_param('device', device)
mlflow.set_tags(TAG)
mlflow.log_params(CFG)
mlflow.log_artifact(notebook_path)
main()
mlflow.log_artifacts(OUTPUT_DIR)
shutil.copytree(OUTPUT_DIR, f'{INFO["SHARE_DRIVE_PATH"]}/{TITLE}')
shutil.copy2(CONFIG_PATH, f'{INFO["SHARE_DRIVE_PATH"]}/{TITLE}/{CONFIG_NAME}')
###Output
========== fold: 0 training ==========
|
example/multiple_figure/multiple_figure.ipynb | ###Markdown
Import modules
###Code
import matplotlib.pyplot as plt
from ternary_diagram import TernaryDiagram
import pandas as pd
import os
###Output
_____no_output_____
###Markdown
If you want to arrange them horizontally, you may be able to solve the problem of broken layout by specifying a figsize that is large enough.
###Code
fig, axes = plt.subplots(1, 2, dpi=72, facecolor='white', figsize=(10, 3.6))
fpath_example = '..'
fpath_contour = os.path.join(fpath_example, 'contour', 'example_contour.csv')
fpath_mono_scatter = os.path.join(fpath_example, 'mono_scatter', 'example_mono_scatter.csv')
df_contour = pd.read_csv(fpath_contour)
df_mono_scatter = pd.read_csv(fpath_mono_scatter)
display(df_contour.head(), df_mono_scatter.head())
materials = df_contour.columns[0:3]
td1 = TernaryDiagram(materials=materials, ax=axes[0])
td1.contour(vector=df_contour[materials], z=df_contour['z'])
fig
materials = df_mono_scatter.columns[0:3]
td2 = TernaryDiagram(materials=materials, ax=axes[1])
td2.scatter(vector=df_mono_scatter[materials])
fig
fig.savefig('multiple_figure.png', dpi=144)
###Output
_____no_output_____
###Markdown
Import modules
###Code
import matplotlib.pyplot as plt
from ternary_diagram import TernaryDiagram
import pandas as pd
import os
###Output
_____no_output_____
###Markdown
If you want to arrange them horizontally, you may be able to solve the problem of broken layout by specifying a figsize that is large enough.
###Code
fig, axes = plt.subplots(1, 2, dpi=72, facecolor='white', figsize=(10, 3.6))
fpath_example = '..'
fpath_contour = os.path.join(fpath_example, 'contour', 'example_contour.csv')
fpath_mono_scatter = os.path.join(fpath_example, 'mono_scatter', 'example_mono_scatter.csv')
df_contour = pd.read_csv(fpath_contour)
df_mono_scatter = pd.read_csv(fpath_mono_scatter)
display(df_contour.head(), df_mono_scatter.head())
materials = df_contour.columns[0:3]
td1 = TernaryDiagram(materials=materials, ax=axes[0])
td1.contour(vector=df_contour[materials], z=df_contour['z'])
fig
materials = df_mono_scatter.columns[0:3]
td2 = TernaryDiagram(materials=materials, ax=axes[1])
td2.scatter(vector=df_mono_scatter[materials])
fig
td2.fig.savefig('multiple_figure.png', dpi=144)
###Output
_____no_output_____ |
Tree-based_IDS_GlobeCom19.ipynb | ###Markdown
Tree-Based Intelligent Intrusion Detection System in Internet of Vehicles This is the code for the paper entitled "[**Tree-Based Intelligent Intrusion Detection System in Internet of Vehicles**](https://arxiv.org/pdf/1910.08635.pdf)" published in IEEE GlobeCom 2019. Authors: Li Yang ([email protected]), Abdallah Moubayed, Ismail Hamieh, and Abdallah Shami Organization: The Optimized Computing and Communications (OC2) Lab, ECE Department, Western UniversityL. Yang, A. Moubayed, I. Hamieh and A. Shami, "Tree-Based Intelligent Intrusion Detection System in Internet of Vehicles," 2019 IEEE Global Communications Conference (GLOBECOM), 2019, pp. 1-6, doi: 10.1109/GLOBECOM38437.2019.9013892. Import libraries
###Code
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder,Imputer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score,precision_recall_fscore_support
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
from xgboost import plot_importance
###Output
_____no_output_____
###Markdown
Read the CICIDS2017 datasetThe CICIDS2017 dataset is publicly available at: https://www.unb.ca/cic/datasets/ids-2017.html Due to the large size of this dataset, the sampled subsets of CICIDS2017 is used. The subsets are in the "data" folder.
###Code
#Read dataset
df = pd.read_csv('./data/CICIDS2017.csv')
df
df.Label.value_counts()
###Output
_____no_output_____
###Markdown
Data samplingDue to the space limit of GitHub files, we sample a small-sized subset for model learning using random sampling
###Code
# Randomly sample instances from majority classes
df_minor = df[(df['Label']=='WebAttack')|(df['Label']=='Bot')|(df['Label']=='Infiltration')]
df_BENIGN = df[(df['Label']=='BENIGN')]
df_BENIGN = df_BENIGN.sample(n=None, frac=0.01, replace=False, weights=None, random_state=None, axis=0)
df_DoS = df[(df['Label']=='DoS')]
df_DoS = df_DoS.sample(n=None, frac=0.05, replace=False, weights=None, random_state=None, axis=0)
df_PortScan = df[(df['Label']=='PortScan')]
df_PortScan = df_PortScan.sample(n=None, frac=0.05, replace=False, weights=None, random_state=None, axis=0)
df_BruteForce = df[(df['Label']=='BruteForce')]
df_BruteForce = df_BruteForce.sample(n=None, frac=0.2, replace=False, weights=None, random_state=None, axis=0)
df_s = df_BENIGN.append(df_DoS).append(df_PortScan).append(df_BruteForce).append(df_minor)
df_s = df_s.sort_index()
# Save the sampled dataset
df_s.to_csv('./data/CICIDS2017_sample.csv',index=0)
###Output
_____no_output_____
###Markdown
Preprocessing (normalization and padding values)
###Code
df = pd.read_csv('./data/CICIDS2017_sample.csv')
# Min-max normalization
numeric_features = df.dtypes[df.dtypes != 'object'].index
df[numeric_features] = df[numeric_features].apply(
lambda x: (x - x.min()) / (x.max()-x.min()))
# Fill empty values by 0
df = df.fillna(0)
###Output
C:\Users\41364\AppData\Roaming\Python\Python35\site-packages\pandas\compat\_optional.py:106: UserWarning: Pandas requires version '2.6.2' or newer of 'numexpr' (version '2.6.1' currently installed).
warnings.warn(msg, UserWarning)
###Markdown
split train set and test set
###Code
labelencoder = LabelEncoder()
df.iloc[:, -1] = labelencoder.fit_transform(df.iloc[:, -1])
X = df.drop(['Label'],axis=1).values
y = df.iloc[:, -1].values.reshape(-1,1)
y=np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X,y, train_size = 0.8, test_size = 0.2, random_state = 0,stratify = y)
X_train.shape
pd.Series(y_train).value_counts()
###Output
_____no_output_____
###Markdown
Oversampling by SMOTE
###Code
from imblearn.over_sampling import SMOTE
smote=SMOTE(n_jobs=-1,sampling_strategy={4:1500}) # Create 1500 samples for the minority class "4"
X_train, y_train = smote.fit_sample(X_train, y_train)
pd.Series(y_train).value_counts()
###Output
_____no_output_____
###Markdown
Machine learning model training Training four base learners: decision tree, random forest, extra trees, XGBoost
###Code
# Decision tree training and prediction
dt = DecisionTreeClassifier(random_state = 0)
dt.fit(X_train,y_train)
dt_score=dt.score(X_test,y_test)
y_predict=dt.predict(X_test)
y_true=y_test
print('Accuracy of DT: '+ str(dt_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of DT: '+(str(precision)))
print('Recall of DT: '+(str(recall)))
print('F1-score of DT: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
dt_train=dt.predict(X_train)
dt_test=dt.predict(X_test)
# Random Forest training and prediction
rf = RandomForestClassifier(random_state = 0)
rf.fit(X_train,y_train)
rf_score=rf.score(X_test,y_test)
y_predict=rf.predict(X_test)
y_true=y_test
print('Accuracy of RF: '+ str(rf_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of RF: '+(str(precision)))
print('Recall of RF: '+(str(recall)))
print('F1-score of RF: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
rf_train=rf.predict(X_train)
rf_test=rf.predict(X_test)
# Extra trees training and prediction
et = ExtraTreesClassifier(random_state = 0)
et.fit(X_train,y_train)
et_score=et.score(X_test,y_test)
y_predict=et.predict(X_test)
y_true=y_test
print('Accuracy of ET: '+ str(et_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of ET: '+(str(precision)))
print('Recall of ET: '+(str(recall)))
print('F1-score of ET: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
et_train=et.predict(X_train)
et_test=et.predict(X_test)
# XGboost training and prediction
xg = xgb.XGBClassifier(n_estimators = 10)
xg.fit(X_train,y_train)
xg_score=xg.score(X_test,y_test)
y_predict=xg.predict(X_test)
y_true=y_test
print('Accuracy of XGBoost: '+ str(xg_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of XGBoost: '+(str(precision)))
print('Recall of XGBoost: '+(str(recall)))
print('F1-score of XGBoost: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
xg_train=xg.predict(X_train)
xg_test=xg.predict(X_test)
###Output
_____no_output_____
###Markdown
Stacking model construction (ensemble for 4 base learners)
###Code
# Use the outputs of 4 base models to construct a new ensemble model
base_predictions_train = pd.DataFrame( {
'DecisionTree': dt_train.ravel(),
'RandomForest': rf_train.ravel(),
'ExtraTrees': et_train.ravel(),
'XgBoost': xg_train.ravel(),
})
base_predictions_train.head(5)
dt_train=dt_train.reshape(-1, 1)
et_train=et_train.reshape(-1, 1)
rf_train=rf_train.reshape(-1, 1)
xg_train=xg_train.reshape(-1, 1)
dt_test=dt_test.reshape(-1, 1)
et_test=et_test.reshape(-1, 1)
rf_test=rf_test.reshape(-1, 1)
xg_test=xg_test.reshape(-1, 1)
x_train = np.concatenate(( dt_train, et_train, rf_train, xg_train), axis=1)
x_test = np.concatenate(( dt_test, et_test, rf_test, xg_test), axis=1)
stk = xgb.XGBClassifier().fit(x_train, y_train)
y_predict=stk.predict(x_test)
y_true=y_test
stk_score=accuracy_score(y_true,y_predict)
print('Accuracy of Stacking: '+ str(stk_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of Stacking: '+(str(precision)))
print('Recall of Stacking: '+(str(recall)))
print('F1-score of Stacking: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
###Output
Accuracy of Stacking: 0.9960292949792641
Precision of Stacking: 0.9960126519428796
Recall of Stacking: 0.9960292949792641
F1-score of Stacking: 0.9960148981765187
precision recall f1-score support
0 1.00 0.99 1.00 4547
1 0.99 0.98 0.98 393
2 0.99 1.00 1.00 554
3 1.00 1.00 1.00 3807
4 0.83 0.71 0.77 7
5 1.00 1.00 1.00 1589
6 0.99 0.99 0.99 436
accuracy 1.00 11333
macro avg 0.97 0.95 0.96 11333
weighted avg 1.00 1.00 1.00 11333
###Markdown
Feature Selection Feature importance
###Code
# Save the feature importance lists generated by four tree-based algorithms
dt_feature = dt.feature_importances_
rf_feature = rf.feature_importances_
et_feature = et.feature_importances_
xgb_feature = xg.feature_importances_
# calculate the average importance value of each feature
avg_feature = (dt_feature + rf_feature + et_feature + xgb_feature)/4
feature=(df.drop(['Label'],axis=1)).columns.values
print ("Features sorted by their score:")
print (sorted(zip(map(lambda x: round(x, 4), avg_feature), feature), reverse=True))
f_list = sorted(zip(map(lambda x: round(x, 4), avg_feature), feature), reverse=True)
len(f_list)
# Select the important features from top-importance to bottom-importance until the accumulated importance reaches 0.9 (out of 1)
Sum = 0
fs = []
for i in range(0, len(f_list)):
Sum = Sum + f_list[i][0]
fs.append(f_list[i][1])
if Sum>=0.9:
break
X_fs = df[fs].values
X_train, X_test, y_train, y_test = train_test_split(X_fs,y, train_size = 0.8, test_size = 0.2, random_state = 0,stratify = y)
X_train.shape
pd.Series(y_train).value_counts()
###Output
_____no_output_____
###Markdown
Oversampling by SMOTE
###Code
from imblearn.over_sampling import SMOTE
smote=SMOTE(n_jobs=-1,sampling_strategy={4:1500})
X_train, y_train = smote.fit_sample(X_train, y_train)
pd.Series(y_train).value_counts()
###Output
_____no_output_____
###Markdown
Machine learning model training after feature selection
###Code
dt = DecisionTreeClassifier(random_state = 0)
dt.fit(X_train,y_train)
dt_score=dt.score(X_test,y_test)
y_predict=dt.predict(X_test)
y_true=y_test
print('Accuracy of DT: '+ str(dt_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of DT: '+(str(precision)))
print('Recall of DT: '+(str(recall)))
print('F1-score of DT: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
dt_train=dt.predict(X_train)
dt_test=dt.predict(X_test)
rf = RandomForestClassifier(random_state = 0)
rf.fit(X_train,y_train) # modelin veri üzerinde öğrenmesi fit fonksiyonuyla yapılıyor
rf_score=rf.score(X_test,y_test)
y_predict=rf.predict(X_test)
y_true=y_test
print('Accuracy of RF: '+ str(rf_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of RF: '+(str(precision)))
print('Recall of RF: '+(str(recall)))
print('F1-score of RF: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
rf_train=rf.predict(X_train)
rf_test=rf.predict(X_test)
et = ExtraTreesClassifier(random_state = 0)
et.fit(X_train,y_train)
et_score=et.score(X_test,y_test)
y_predict=et.predict(X_test)
y_true=y_test
print('Accuracy of ET: '+ str(et_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of ET: '+(str(precision)))
print('Recall of ET: '+(str(recall)))
print('F1-score of ET: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
et_train=et.predict(X_train)
et_test=et.predict(X_test)
xg = xgb.XGBClassifier(n_estimators = 10)
xg.fit(X_train,y_train)
xg_score=xg.score(X_test,y_test)
y_predict=xg.predict(X_test)
y_true=y_test
print('Accuracy of XGBoost: '+ str(xg_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of XGBoost: '+(str(precision)))
print('Recall of XGBoost: '+(str(recall)))
print('F1-score of XGBoost: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
xg_train=xg.predict(X_train)
xg_test=xg.predict(X_test)
###Output
_____no_output_____
###Markdown
Stacking model construction
###Code
base_predictions_train = pd.DataFrame( {
'DecisionTree': dt_train.ravel(),
'RandomForest': rf_train.ravel(),
'ExtraTrees': et_train.ravel(),
'XgBoost': xg_train.ravel(),
})
base_predictions_train.head(5)
dt_train=dt_train.reshape(-1, 1)
et_train=et_train.reshape(-1, 1)
rf_train=rf_train.reshape(-1, 1)
xg_train=xg_train.reshape(-1, 1)
dt_test=dt_test.reshape(-1, 1)
et_test=et_test.reshape(-1, 1)
rf_test=rf_test.reshape(-1, 1)
xg_test=xg_test.reshape(-1, 1)
x_train = np.concatenate(( dt_train, et_train, rf_train, xg_train), axis=1)
x_test = np.concatenate(( dt_test, et_test, rf_test, xg_test), axis=1)
stk = xgb.XGBClassifier().fit(x_train, y_train)
y_predict=stk.predict(x_test)
y_true=y_test
stk_score=accuracy_score(y_true,y_predict)
print('Accuracy of Stacking: '+ str(stk_score))
precision,recall,fscore,none= precision_recall_fscore_support(y_true, y_predict, average='weighted')
print('Precision of Stacking: '+(str(precision)))
print('Recall of Stacking: '+(str(recall)))
print('F1-score of Stacking: '+(str(fscore)))
print(classification_report(y_true,y_predict))
cm=confusion_matrix(y_true,y_predict)
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
###Output
Accuracy of Stacking: 0.9955881055325156
Precision of Stacking: 0.9955944258266153
Recall of Stacking: 0.9955881055325156
F1-score of Stacking: 0.9955625491897051
precision recall f1-score support
0 0.99 1.00 1.00 4547
1 1.00 0.97 0.98 393
2 0.99 1.00 1.00 554
3 1.00 1.00 1.00 3807
4 1.00 0.71 0.83 7
5 0.99 1.00 1.00 1589
6 1.00 0.98 0.99 436
accuracy 1.00 11333
macro avg 1.00 0.95 0.97 11333
weighted avg 1.00 1.00 1.00 11333
|
Model/Test Models/GRU with Attentionn_4,000_records_40_Epochs.ipynb | ###Markdown
Italian to English Language Translation (NMT) using GRU & Attention**Import All the neccessary Libraries which we will be requiring to run this notebook and the project**This is seq2seq model by using GRU and Attention. let us import all the necessary libraries that is needed to run this notebookthe tensorflow GPU version must be updated before running this notebook and eager_execution of the tensorflow should be enabled
###Code
from __future__ import absolute_import, division, print_function
import tensorflow as tf
tf.enable_eager_execution()
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
from numpy import array, argmax, random, take
import time
import pandas as pd
import string
print(tf.__version__)
###Output
1.13.1
###Markdown
Creating a function which will read the file, encode it and save it and then the funtion to_lines wil split the data into Italian & English part seperately by '\n' and build it in the form of sentences
###Code
def read_text(filename):
file = open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
# split a text into sentences
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
###Output
User uploaded file "ita.txt" with length 19184787 bytes
###Markdown
By using the function we wrote the text file is imported to the jupyter notebook. Once the notebook is opened and read using the function Read_TextFile we will pass it to the to_lines to split and to build the sentences of the same
###Code
data = read_text(uploaded['ita.txt'])
ItalianNEng = to_lines(data)
ItalianNEng
###Output
_____no_output_____
###Markdown
The ItalianEng has the data which we just imported and then ran through a funtion is now converted to the array by using the Python's inbuilt function "array"
###Code
data = read_text("ita.txt")
ItalianNEng = to_lines(data)
ItalianNEng = array(ItalianNEng)
###Output
_____no_output_____
###Markdown
PreProcessing the data/ Cleaning the data**remove all the punctuation and then change the case of every word to lower case**we will remove the punctuation in the below code by going through each line of the data and storing it in the same variable ItalianEng
###Code
# Remove punctuation
ItalianNEng[:,0] = [s.translate(str.maketrans('', '', string.punctuation)) for s in ItalianNEng[:,0]]
ItalianNEng[:,1] = [s.translate(str.maketrans('', '', string.punctuation)) for s in ItalianNEng[:,1]]
###Output
_____no_output_____
###Markdown
Here all the punctuation is removed/cleaned and the data looks like below
###Code
ItalianNEng
###Output
_____no_output_____
###Markdown
Now we will convert the data into lower case by using python's inbuilt function lower() and save the data to its original variable ItalianEng
###Code
# convert to lowercase
for i in range(len(ItalianNEng)):
ItalianNEng[i,0] = ItalianNEng[i,0].lower()
ItalianNEng[i,1] = ItalianNEng[i,1].lower()
ItalianNEng
ItalianNEng[0][0]
###Output
_____no_output_____
###Markdown
we convert the data into pandas dataframe and save it in the variable ita_eng_df
###Code
ita_eng_df = pd.DataFrame(ItalianNEng.tolist(), columns = ['English', 'Italian'])
ita_eng_df.head()
###Output
_____no_output_____
###Markdown
let us split the data into English and Italian part seperately and save it in their respective variable
###Code
english_sentences = ita_eng_df.iloc[:,0:1]
italian_sentences = ita_eng_df.iloc[:,1:2]
print(english_sentences.columns)
print(italian_sentences.columns)
###Output
Index(['English'], dtype='object')
Index(['Italian'], dtype='object')
###Markdown
As the dataset is very large we will sample the model with only 2000 rows, later we can play around the data size and see how the model performs
###Code
n = 4000
italian_sample = italian_sentences.iloc[:n,:]
english_sample = english_sentences.iloc[:n,:]
###Output
_____no_output_____
###Markdown
we will now convert any row which is not of string type, if there is any row with different datatype we will convert it into string so that everything is of same datatypewe will do this for both English and Italian data
###Code
english_sample.columns
english_sample['English']
for i in range(len(english_sample['English'].index)):
if type(english_sample['English'][i]) != str:
english_sample['English'][i] = str(english_sample['English'][i])
italian_sample['Italian']
for i in range(len(italian_sample['Italian'].index)):
if type(italian_sample['Italian'][i]) != str:
italian_sample['Italian'][i] = str(italian_sample['Italian'][i])
###Output
_____no_output_____
###Markdown
create a fucntion to add start and end token of each row as a identifier to the model and convert the data into lower case
###Code
def preprocess_sentence(w):
w = w.lower().strip()
w = '<start> ' + w + ' <end>'
return w
def cleaning_sentence(data, column):
sentence = [preprocess_sentence(i) for i in data[column]]
return sentence
###Output
_____no_output_____
###Markdown
We will continue to create a class which has function which will create word to index and then index to word
###Code
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
self.create_index()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx['<pad>'] = 0
for index, word in enumerate(self.vocab):
self.word2idx[word] = index + 1
for word, index in self.word2idx.items():
self.idx2word[index] = word
def max_length(tensor):
return max(len(t) for t in tensor)
###Output
_____no_output_____
###Markdown
**Cleaning the data**we will make use of the funciton such as cleaning_sentence, LanguageIndex, and word2idx which we have written to clean, process, Index and padd the datawe use Keras's inbuil function which performs sequence padding to the each sentence to the maximum data length
###Code
#Cleaning Sentences
italian_sent = cleaning_sentence(italian_sample, 'Italian')
english_sent = cleaning_sentence(english_sample, 'English')
# index language using the class defined above
inp_lang = LanguageIndex(it for it in italian_sent)
targ_lang = LanguageIndex(en for en in english_sent)
#Italian Sentences which will be indexed
input_tensor = [[inp_lang.word2idx[s] for s in it.split(' ')] for it in italian_sent]
#English sentences which will be indexed
target_tensor = [[targ_lang.word2idx[s] for s in en.split(' ')] for en in english_sent]
max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)
# Padding the input and output tensor to the maximum length
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor,
maxlen=max_length_inp,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor,
maxlen=max_length_tar,
padding='post')
###Output
_____no_output_____
###Markdown
**split the data into training data and test data by using Sklearn.Model_selection's train_test_split function**Split the whole data which we saved in the variable ItalianNEng as 80% to train and 20% to the test.
###Code
ax_train, ay_train, ax_test, ay_test = train_test_split(input_tensor, target_tensor, test_size=0.2)
###Output
_____no_output_____
###Markdown
after we split the training data and the testing data we will check the length and the data in it, if we see the below data its vectorized which we did using the word2idx and Keras's inbuilt function sequence_padding and is in the array format
###Code
ax_train
# Show length
len(ax_train), len(ay_train), len(ax_test), len(ay_test)
###Output
_____no_output_____
###Markdown
Create the parameters where we can modify and pass later to tune the model and we will create the tensorflow dataset using the tf's function tf.data.dataset
###Code
BUFFER_SIZE = len(ax_train)
BATCH_SIZE = 64
N_BATCH = BUFFER_SIZE//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word2idx)
vocab_tar_size = len(targ_lang.word2idx)
print(BUFFER_SIZE)
print(BATCH_SIZE)
print(N_BATCH)
print(vocab_inp_size)
print(vocab_tar_size)
dataset = tf.data.Dataset.from_tensor_slices((ax_train, ax_test)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
dataset
###Output
_____no_output_____
###Markdown
We will build the encoder and decoder model by implementing "attention equation"attention overcomes the limitation in the encode-decoder architecture by allowing the network to learn where to pay attention to the input for each item in the output sequence.this approach has been used across different types sequence prediction problems include text translation, speech recognition, and more.
###Code
def gru(units):
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.enc_units)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.dec_units)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.W1 = tf.keras.layers.Dense(self.dec_units)
self.W2 = tf.keras.layers.Dense(self.dec_units)
self.V = tf.keras.layers.Dense(1)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
# hidden shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# we are doing this to perform addition to calculate the score
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying tanh(FC(EO) + FC(H)) to self.V
score = self.V(tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size * 1, vocab)
x = self.fc(output)
return x, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.dec_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
###Output
_____no_output_____
###Markdown
**Optimizer and Loss Function**We are using "Adam" as the optimizer for the model and sparse_softmax_cross_entropy as the loss function.we can play around with these to see which is better suitable for our model, but to train the model we will be going with the above mentoned optimizer and the loss function
###Code
optimizer = tf.train.AdamOptimizer()
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
###Output
_____no_output_____
###Markdown
Training the modelwe will train the model by passing the training data which we split and passed it through the funtion. we will be saving out model on each iteration of the epochs We will be running this model with **30 Epochs**we will play around with the number of epochs to see what best works for the model
###Code
EPOCHS = 40
for epoch in range(EPOCHS):
start = time.time()
hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
total_loss += batch_loss
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / N_BATCH))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/iterator_ops.py:532: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
Epoch 1 Batch 0 Loss 3.6454
Epoch 1 Loss 2.5360
Time taken for 1 epoch 9.944748163223267 sec
Epoch 2 Batch 0 Loss 1.9223
Epoch 2 Loss 1.8720
Time taken for 1 epoch 8.805859565734863 sec
Epoch 3 Batch 0 Loss 1.7307
Epoch 3 Loss 1.7082
Time taken for 1 epoch 9.48662543296814 sec
Epoch 4 Batch 0 Loss 1.5072
Epoch 4 Loss 1.5026
Time taken for 1 epoch 8.954044580459595 sec
Epoch 5 Batch 0 Loss 1.4262
Epoch 5 Loss 1.2997
Time taken for 1 epoch 8.833491802215576 sec
Epoch 6 Batch 0 Loss 1.1589
Epoch 6 Loss 1.1484
Time taken for 1 epoch 8.69847846031189 sec
Epoch 7 Batch 0 Loss 1.0229
Epoch 7 Loss 1.0363
Time taken for 1 epoch 8.853046417236328 sec
Epoch 8 Batch 0 Loss 0.9543
Epoch 8 Loss 0.9238
Time taken for 1 epoch 8.750823974609375 sec
Epoch 9 Batch 0 Loss 0.7996
Epoch 9 Loss 0.8197
Time taken for 1 epoch 8.902754068374634 sec
Epoch 10 Batch 0 Loss 0.7619
Epoch 10 Loss 0.7445
Time taken for 1 epoch 8.692206859588623 sec
Epoch 11 Batch 0 Loss 0.6503
Epoch 11 Loss 0.6723
Time taken for 1 epoch 8.646382093429565 sec
Epoch 12 Batch 0 Loss 0.5428
Epoch 12 Loss 0.6044
Time taken for 1 epoch 9.473032712936401 sec
Epoch 13 Batch 0 Loss 0.5566
Epoch 13 Loss 0.5469
Time taken for 1 epoch 8.906400442123413 sec
Epoch 14 Batch 0 Loss 0.4571
Epoch 14 Loss 0.4824
Time taken for 1 epoch 8.779704809188843 sec
Epoch 15 Batch 0 Loss 0.4145
Epoch 15 Loss 0.4292
Time taken for 1 epoch 8.7562735080719 sec
Epoch 16 Batch 0 Loss 0.3186
Epoch 16 Loss 0.3694
Time taken for 1 epoch 9.722482442855835 sec
Epoch 17 Batch 0 Loss 0.2717
Epoch 17 Loss 0.3141
Time taken for 1 epoch 9.086331129074097 sec
Epoch 18 Batch 0 Loss 0.2053
Epoch 18 Loss 0.2676
Time taken for 1 epoch 8.70476484298706 sec
Epoch 19 Batch 0 Loss 0.2077
Epoch 19 Loss 0.2372
Time taken for 1 epoch 8.663342237472534 sec
Epoch 20 Batch 0 Loss 0.2257
Epoch 20 Loss 0.1914
Time taken for 1 epoch 8.741352319717407 sec
Epoch 21 Batch 0 Loss 0.1347
Epoch 21 Loss 0.1585
Time taken for 1 epoch 9.414800882339478 sec
Epoch 22 Batch 0 Loss 0.1262
Epoch 22 Loss 0.1400
Time taken for 1 epoch 8.973430871963501 sec
Epoch 23 Batch 0 Loss 0.0934
Epoch 23 Loss 0.1284
Time taken for 1 epoch 8.72501254081726 sec
Epoch 24 Batch 0 Loss 0.1024
Epoch 24 Loss 0.1261
Time taken for 1 epoch 8.555520296096802 sec
Epoch 25 Batch 0 Loss 0.1167
Epoch 25 Loss 0.1142
Time taken for 1 epoch 8.650434017181396 sec
Epoch 26 Batch 0 Loss 0.0614
Epoch 26 Loss 0.1074
Time taken for 1 epoch 8.618392705917358 sec
Epoch 27 Batch 0 Loss 0.0791
Epoch 27 Loss 0.0931
Time taken for 1 epoch 8.639946460723877 sec
Epoch 28 Batch 0 Loss 0.0854
Epoch 28 Loss 0.0784
Time taken for 1 epoch 8.617584466934204 sec
Epoch 29 Batch 0 Loss 0.0498
Epoch 29 Loss 0.0723
Time taken for 1 epoch 8.559330224990845 sec
Epoch 30 Batch 0 Loss 0.0810
Epoch 30 Loss 0.0688
Time taken for 1 epoch 9.288172245025635 sec
Epoch 31 Batch 0 Loss 0.0751
Epoch 31 Loss 0.0665
Time taken for 1 epoch 9.042842388153076 sec
Epoch 32 Batch 0 Loss 0.0479
Epoch 32 Loss 0.0616
Time taken for 1 epoch 8.523797512054443 sec
Epoch 33 Batch 0 Loss 0.0442
Epoch 33 Loss 0.0621
Time taken for 1 epoch 9.351551055908203 sec
Epoch 34 Batch 0 Loss 0.0479
Epoch 34 Loss 0.0609
Time taken for 1 epoch 8.494425773620605 sec
Epoch 35 Batch 0 Loss 0.0912
Epoch 35 Loss 0.0610
Time taken for 1 epoch 8.608481407165527 sec
Epoch 36 Batch 0 Loss 0.0423
Epoch 36 Loss 0.0576
Time taken for 1 epoch 8.638583660125732 sec
Epoch 37 Batch 0 Loss 0.0692
Epoch 37 Loss 0.0570
Time taken for 1 epoch 8.421333312988281 sec
Epoch 38 Batch 0 Loss 0.0477
Epoch 38 Loss 0.0552
Time taken for 1 epoch 8.54841160774231 sec
Epoch 39 Batch 0 Loss 0.0571
Epoch 39 Loss 0.0553
Time taken for 1 epoch 8.925658226013184 sec
Epoch 40 Batch 0 Loss 0.0605
Epoch 40 Loss 0.0551
Time taken for 1 epoch 9.23789644241333 sec
###Markdown
PredictionOnce the model is trained we can translate the Italian sentences by passing it to the below function
###Code
def Prediction_eval(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word2idx[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
# storing the attention weigths to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.idx2word[predicted_id] + ' '
if targ_lang.idx2word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
def translate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
result, sentence, attention_plot = Prediction_eval(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
print('Input: {}'.format(sentence))
print('Predicted translation: {}'.format(result))
english_sample.iloc[:1,:].values[0][0]
translate(italian_sample.iloc[:1,:].values[0][0], encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_tar)
test = 'venire'
translate(sentence=test, encoder=encoder, decoder=decoder, inp_lang= inp_lang, targ_lang= targ_lang, max_length_inp= max_length_inp, max_length_targ= max_length_tar )
###Output
Input: <start> venire <end>
Predicted translation: come over <end>
|
ETL Pipelines/10_imputation_exercise/10_imputations_exercise.ipynb | ###Markdown
Imputing DataWhen a dataset has missing values, you can either remove those values or fill them in. In this exercise, you'll work with World Bank GDP (Gross Domestic Product) data to fill in missing values.
###Code
# run this code cell to read in the data set
import pandas as pd
df = pd.read_csv('../data/gdp_data.csv', skiprows=4)
df.drop('Unnamed: 62', axis=1, inplace=True)
# run this code cell to see what the data looks like
df.head()
# Run this code cell to check how many null values are in the data set
df.isnull().sum()
###Output
_____no_output_____
###Markdown
There are quite a few null values. Run the code below to plot the data for a few countries in the data set.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# put the data set into long form instead of wide
df_melt = pd.melt(df, id_vars=['Country Name', 'Country Code', 'Indicator Name', 'Indicator Code'], var_name='year', value_name='GDP')
# convert year to a date time
df_melt['year'] = pd.to_datetime(df_melt['year'])
def plot_results(column_name):
# plot the results for Afghanistan, Albania, and Honduras
fig, ax = plt.subplots(figsize=(8,6))
df_melt[(df_melt['Country Name'] == 'Afghanistan') |
(df_melt['Country Name'] == 'Albania') |
(df_melt['Country Name'] == 'Honduras')].groupby('Country Name').plot('year', column_name, legend=True, ax=ax)
ax.legend(labels=['Afghanistan', 'Albania', 'Honduras'])
plot_results('GDP')
###Output
_____no_output_____
###Markdown
Afghanistan and Albania are missing data, which show up as gaps in the results. Exercise - Part 1Your first task is to calculate mean GDP for each country and fill in missing values with the country mean. This is a bit tricky to do in pandas. Here are a few links that should be helpful:* https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DataFrame.groupby.html* https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.transform.html* https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.fillna.html
###Code
# TODO: Use the df_melt dataframe and fill in missing values with a country's mean GDP
# If you aren't sure how to do this,
# look up something like "how to group data and fill in nan values in pandas" in a search engine
# Put the results in a new column called 'GDP_filled'.
# HINT: You can do this with these methods: groupby(), transform(), a lambda function, fillna(), and mean()
df_melt['GDP_filled'] = df_melt.groupby(['Country Name'])['GDP'].transform(lambda x: x.fillna(x.mean()))
# Plot the results
plot_results('GDP_filled')
###Output
_____no_output_____
###Markdown
This is somewhat of an improvement. At least there is no missing data; however, because GDP tends to increase over time, the mean GDP is probably not the best way to fill in missing values for this particular case. Next, try using forward fill to deal with any missing values. Excercise - Part 2Use the fillna forward fill method to fill in the missing data. Here is the [documentation](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.fillna.html). As explained in the course video, forward fill takes previous values to fill in nulls.The pandas fillna method has a forward fill option. For example, if you wanted to use forward fill on the GDP dataset, you could execute `df_melt['GDP'].fillna(method='ffill')`. However, there are two issues with that code. 1. You want to first make sure the data is sorted by year2. You need to group the data by country name so that the forward fill stays within each countryWrite code to first sort the df_melt dataframe by year, then group by 'Country Name', and finally use the forward fill method.
###Code
# TODO: Use forward fill to fill in missing GDP values
# HINTS: use the sort_values(), groupby(), and fillna() methods
df_melt['GDP_ffill'] = df_melt.sort_values('year').groupby(['Country Name'])['GDP'].fillna(method='ffill')
# plot the results
plot_results('GDP_ffill')
###Output
_____no_output_____
###Markdown
This looks better at least for the Afghanistan data; however, the Albania data is still missing values. You can fill in the Albania data using back fill. That is what you'll do next. Exercise - Part 3This part is similar to Part 2, but now you will use backfill. Write code that backfills the missing GDP data.
###Code
# TODO: Use back fill to fill in missing GDP values
# HINTS: use the sort_values(), groupby(), and fillna() methods
df_melt['GDP_bfill'] = df_melt.sort_values('year').groupby(['Country Name'])['GDP'].fillna(method='bfill')
# plot the results
plot_results('GDP_bfill')
###Output
_____no_output_____
###Markdown
Conclusion In this case, the GDP data for all three countries is now complete. Note that forward fill did not fill all the Albania data because the first data entry in 1960 was NaN. Forward fill would try to fill the 1961 value with the NaN value from 1960.To completely fill the entire GDP data for all countries, you might have to run both forward fill and back fill. Note as well that the results will be slightly different depending on if you run forward fill first or back fill first. Afghanistan, for example, is missing data in the middle of the data set. Hence forward fill and back fill will have slightly different results.Run this next code cell to see if running both forward fill and back fill end up filling all the GDP NaN values.
###Code
# Run forward fill and backward fill on the GDP data
df_melt['GDP_ff_bf'] = df_melt.sort_values('year').groupby('Country Name')['GDP'].fillna(method='ffill').fillna(method='bfill')
# Check if any GDP values are null
df_melt['GDP_ff_bf'].isnull().sum()
###Output
_____no_output_____ |
notebooks/GravLook.ipynb | ###Markdown
First Look at Gravity Data Import Data Files
###Code
%reset -f
import pandas as pd
import numpy as np
import matplotlib.dates as dates
import warnings
warnings.filterwarnings('ignore')
import glob, os
from time import sleep
def inc(x):
sleep(1)
return x + 1
def add(x, y):
sleep(1)
return x + y
import datetime
def dateparse (date_string):
return datetime.datetime.strptime(date_string, '%d-%m-%Y %H:%M:%S')
def dateparseSPAIN (date_string):
return datetime.datetime.strptime(date_string, '%Y-%m-%d-%H:%M:%S')
!head ~jovyan/data/bravoseis/gravity/gravimetro_bruto/21012019.gravimetro_bruto.proc
!ls /home/jovyan/data/bravoseis_data/SADO/jan_2019/gravimetro_bruto.proc/
!head /home/jovyan/data/bravoseis_data/SADO/jan_2019/gravimetro_bruto.proc/17012019.gravimetro_bruto.proc
###Output
fecha,status,gravimetria_bruta,spring_tension,longitud,latitud,velocidad,rumbo,fecha_telegrama
17-01-2019 00:00:00,0,15303.49,15305.59,-58.905947,-62.2022067,0.12,177.08,17-01-2019 00:00:00
17-01-2019 00:00:01,0,15303.51,15305.58,-58.905947,-62.2022068,0.12,177.43,17-01-2019 00:00:01
17-01-2019 00:00:02,0,15303.53,15305.58,-58.905947,-62.2022069,0.12,177.759,17-01-2019 00:00:02
17-01-2019 00:00:03,0,15303.55,15305.57,-58.9059471,-62.202207,0.12,178.068,17-01-2019 00:00:03
17-01-2019 00:00:04,0,15303.57,15305.57,-58.9059471,-62.2022072,0.12,178.356,17-01-2019 00:00:04
17-01-2019 00:00:05,0,15303.59,15305.56,-58.9059471,-62.2022072,0.12,178.624,17-01-2019 00:00:05
17-01-2019 00:00:06,0,15303.61,15305.56,-58.9059471,-62.2022073,0.12,178.873,17-01-2019 00:00:06
17-01-2019 00:00:07,0,15303.63,15305.55,-58.9059471,-62.2022074,0.12,179.103,17-01-2019 00:00:07
17-01-2019 00:00:08,0,15303.65,15305.55,-58.9059471,-62.2022075,0.12,179.315,17-01-2019 00:00:08
###Markdown
Read Gravity Files
###Code
%%time
path = '/home/jovyan/data/bravoseis_data/SADO/jan_2019/gravimetro_bruto.proc/' # use your path
all_files = glob.glob(os.path.join(path, "*.proc")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, parse_dates=True, date_parser=dateparse, index_col='fecha',
dtype = {'Date': object,'status': np.float64,
'gravimetria_bruta': np.float64, 'spring_tension': np.float64,
'longitud': np.float64, 'latitud': np.float64,
'velocidad': np.float64,'rumbo': np.float64 }) for f in all_files)
concatenated_df = pd.concat(df_from_each_file, ignore_index=False)
df_grav = concatenated_df.sort_values(by='fecha_telegrama')
df_grav.head()
del df_grav['fecha_telegrama']
del df_grav['rumbo']
del df_grav['velocidad']
del df_grav['spring_tension']
del df_grav['status']
df_grav = df_grav.resample('s').mean()
df_grav.head()
###Output
_____no_output_____
###Markdown
Read Bathy Files
###Code
#!head /home/jovyan/data/bravoseis_data/SADO/jan_2019/posicion.proc/01012019.posicion.proc
%%time
path = '/home/jovyan/data/bravoseis_data/SADO/jan_2019/posicion.proc/' # use your path
all_files = glob.glob(os.path.join(path, "*.proc")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_Bath = (pd.read_csv(f, parse_dates=True, date_parser=dateparse, index_col='fecha',
dtype = {'Date': object,'longitud': np.float64,
'latitud': np.float64, 'rumbo': np.float64,
'velocidad': np.float64, 'profundidad': np.float64,
'cog': np.float64,'sog': np.float64 }) for f in all_files)
concatBathy_df = pd.concat(df_from_each_Bath, ignore_index=False)
df_bath = concatBathy_df.sort_values(by='fecha_telegrama')
df_bath.head()
del df_bath['fecha_telegrama']
del df_bath['rumbo']
del df_bath['velocidad']
df_bath = df_bath.resample('s').mean()
df_bath.head()
###Output
_____no_output_____
###Markdown
Merge Dataframes
###Code
test = pd.merge(df_bath, df_grav,how='inner', indicator=True, left_index=True, right_index=True, suffixes=('_B', '_G'))
del df_bath
del df_grav
df_gravMerge = pd.DataFrame()
df_gravMerge = test[test['_merge'] == 'both']
del df_gravMerge['_merge']
df_gravMerge['longitud'] = df_gravMerge['longitud_G']
df_gravMerge['latitud'] = df_gravMerge['latitud_G']
del df_gravMerge['longitud_B']
del df_gravMerge['latitud_B']
del df_gravMerge['longitud_G']
del df_gravMerge['latitud_G']
df_gravMerge.head()
df_gravMerge['survey']= np.nan # Survey Part
df_gravMerge['loca'] = np.nan # Survey Location
df_gravMerge['line']= np.nan # Line Number
df_gravMerge['date']=df_gravMerge.index.date
df_gravMerge['time']=df_gravMerge.index.time
del test
!tail bravoseis_tables.csv
df_lineNumbers = (pd.read_csv('bravoseis_tables.csv', parse_dates=[3, 4], date_parser=dateparseSPAIN))
df_lineNumbers.columns = ['survey','loca','line',
's_time','e_time']
#df_lineNumbers.head(50)
df_lineNumbers.dtypes
pd.to_datetime(df_lineNumbers.e_time, format = "%Y-%m-%d-%H:%M:%S");
for index, row in df_lineNumbers.iterrows():
mask = (df_gravMerge.index > row.s_time) & (df_gravMerge.index <= row.e_time)
df_gravMerge.survey[mask]= row.survey
df_gravMerge.loca[mask]= row.loca
df_gravMerge.line[mask]= row.line
gravimetria = df_gravMerge.dropna()
del df_gravMerge
gravimetria['profundidad']= gravimetria.profundidad.round(2)
gravimetria['cog']= gravimetria.cog.round(2)
gravimetria['sog']= gravimetria.sog.round(2)
gravimetria['gravimetria_bruta']= gravimetria.gravimetria_bruta.round(3)
gravimetria.head()
gravimetria = gravimetria[gravimetria.loca != 'EdifaceA'];
gravimetria = gravimetria[gravimetria.loca != 'Transit'];
gravimetria = gravimetria[gravimetria.line != 'RIF12'];
gravimetria = gravimetria[gravimetria.line != 'RIF11'];
gravimetria = gravimetria[gravimetria.line != 'RIF11b'];
gravimetria = gravimetria[gravimetria.line != 'RIF10'];
gravimetria = gravimetria[gravimetria.line != 'RIF04'];
gravimetria = gravimetria[gravimetria.line != 'RIF03'];
gravimetria = gravimetria[gravimetria.line != 'RIF02'];
gravimetria = gravimetria[gravimetria.line != 'ORK10'];
gravimetria = gravimetria[gravimetria.line != 'ORK17'];
gravimetria = gravimetria[gravimetria.line != 'ORK02']; #Start of line for OR_2 at 08:59 UTC. The streamer is completely outside the line (Feather angle of -13 º in the good part of the line). At 1300 UTC end of line for OR_2
gravimetria = gravimetria[gravimetria.line != 'ORK05'];
gravimetria = gravimetria[gravimetria.line != 'ORK18b'];#We begin the Turn B with the line OR18. There is a large iceberg 4 km away, at the moment we will not deviate from the line. At 15:10 (UTC) the guns have tangled with the streamer. They stopped the acquisition of data. At 18:05 we returned to the initial point to redo the survey of the line 2300 UTC bad weather, large waves make multibeam data poor quality.
gravimetria = gravimetria[gravimetria.line != 'T10'];
gravimetria = gravimetria.rename(columns={"profundidad": "depth", "longitud": "Longitude", "latitud": "Latitude"});
###Output
_____no_output_____
###Markdown
Calculate Local Gravity for Reduction http://the-mostly.ru/misc/local_gravity_online_calculator.htmlγ = 9.7803267714*(1 + 0.00193185138639*sin2θ)/(1 - 0.00669437999013*sin2θ)1/2 * (1 + z/a)-2
###Code
gravimetria['depth_na']=gravimetria.depth * -1
gravimetria['normal_grav']=(9.7803267714*(1 + 0.00193185138639*np.sin(2*gravimetria.Latitude.values))\
/(1 - 0.00669437999013*np.sin(2*gravimetria.Latitude.values))**(1/2) * (1 + -0.5/6371000)**-2)*100000
gravimetria['normal_grav']= gravimetria.normal_grav.round(2)
gravimetria['normal_mean']= gravimetria.normal_grav.mean()
gravimetria['normal_mean']=gravimetria.normal_mean.round(0)
gravimetria['Normal_Geoff'] = 982104
gravimetria['elevation']= -0.5
gravimetria['FaCor']= (0.3087691 - 0.0004398)*np.sin(gravimetria.Latitude.values)**2 * gravimetria.elevation.values + (7.2125e-8 * gravimetria.elevation.values**2)
gravimetria['relGrav'] = gravimetria.gravimetria_bruta.values.astype(float)- gravimetria.gravimetria_bruta.values.mean()
gravimetria['relGrav']= gravimetria.relGrav.round(2)
gravimetria['Abs_Gravity']= gravimetria['normal_grav'] + gravimetria['relGrav']
gravimetria['Abs_Gravity']= gravimetria.Abs_Gravity.round(2)
gravimetria['Abs_Geoff']= gravimetria['Normal_Geoff'] + gravimetria['relGrav']
gravimetria['Abs_Geoff']= gravimetria.Abs_Geoff.round(2)
gravimetria = gravimetria.rename(columns={"gravimetria_bruta": "raw_grav"});
gravimetria.head()
###Output
_____no_output_____
###Markdown
Exact: E = (-2*Ws*ve*cos(lat))-(ve^2)/(ra)*(1-(h/ra)-())-()Where:E Eötvös correctionV Velocity in knots = sogα Heading = cogφ Latitude e Correction for Earth’s flattening towards the poles = 0.0818191908426ra Earth’s major axis = 6378137.0 mrb Earth’s minor axis = 6356752.3141 mε Earth’s eccentricity = (ra-rb)/(ra)Ws Angular velocity of Earth’s rotation = 7.2921158533 E-5 rad/secνe & νn Velocities in easting & northing directions calculated from the heading and velocity channels
###Code
# e= 0.0818191908426 # Correction for Earth’s flattening towards the poles
# ra = 6378137.0 # (m) earth's major axis
# rb = 6356752.3141 # (m) earth's minor axis
# ecc = (ra - rb)/ ra
# Ws = 7.2921158533e-5 # Angular velocity of Earth’s rotation rad/sec
gravimetria.to_csv('gravimetria13.csv');
#gravimetria.sample(100)
gravimetria.hvplot.points('fecha', 'gravimetria_bruta', color='gravimetria_bruta',
cmap='colorwheel', size=.5,
hover_cols=['cog', 'line'], title= 'proc_gravity')
gravimetria.hvplot.scatter('Longitude', 'Latitude',
height=500,
color ='gravimetria_bruta',
cmap='colorwheel',
size=50,
hover_cols=['line'], title= 'proc_gravity subset')
df_minuteGrav2.hvplot.points('index', 'proc_gravity', color='proc_gravity',
cmap='colorwheel', size=.5,
hover_cols=['cog'], title= 'proc_gravity')
df_minuteGrav.hvplot.scatter('lon ', 'lat',
height=500,
color ='gravimetria_bruta',
cmap='colorwheel',
size=50,
hover_cols=['depth'], title= 'proc_gravity subset')
###Output
_____no_output_____
###Markdown
Gravity is measured in MGals
###Code
df_gravMerge['eotvos'] = 4.040 * df_gravMerge['sog'].values * df_gravMerge['cog'].apply(np.sin)* df_gravMerge['latitud'].apply(np.cos) + (0.001211 * df_gravMerge['sog']**2 )
df_gravMerge.head()
###Output
_____no_output_____
###Markdown
Latitude Correction https://rallen.berkeley.edu/teaching/F04_GEO594_IntroAppGeophys/Lectures/L03_GravCorrAnalysis.pdf Geodetic Reference System (GRS-1967) formula gφ =9.780318(1+0.0053024sin2φ−0.0000059sin2 2φ) m/s2 Bouguer correction Accounts for rock thickness between current and base station elevation Treat the rock as an infinite horizontal slab: CB = 0.000419∆hρ where ∆h is in m and ρ is in km/m3
###Code
#df_gravMerge.size
df_minuteGrav.size
df_minuteGrav2=df_minuteGrav.loc['2019-01-20 00:00:00':'2019-01-24 00:00:00']
df_temp=df_minuteGrav.loc['2019-01-26 21:00:00':'2019-02-05 23:58:00']
df_minuteGrav2=df_minuteGrav2.append(df_temp)
df_minuteGrav2.hvplot.points('lon', 'lat',
height=500,
color='proc_gravity',
cmap='colorwheel',
size=3,
hover_cols=['depth'], title= 'proc_gravity',
fontsize={'title': 16, 'labels': 14, 'xticks': 12, 'yticks': 12})
#df_minuteGrav2.hvplot.heatmap(x='lon', y='lat', C='proc_gravity', reduce_function=np.mean, colorbar=True)
###Output
_____no_output_____
###Markdown
Things to notice:1. The depth signiature is visable2. Examine crossing paths... there is a directioal dependence to our readings related to ship direction. 3. Is the difference between these lines just the ETVOS correction or are their other corrections that need to be applied? 4. Whould you please share the processing stream?
###Code
df_minuteGrav2.hvplot.points('index', 'proc_gravity', color='proc_gravity',
cmap='colorwheel', size=.5,
hover_cols=['cog'], title= 'proc_gravity')
df_minuteGrav2.head(1)
cond1 = df_minuteGrav2["lat"] < -62.44
cond2 = df_minuteGrav2["lat"] > -62.45
cond3 = df_minuteGrav2["lon"] > -58.42
cond4 = df_minuteGrav2["lon"] < -58.36
df_minuteGrav3 = df_minuteGrav2[cond1 & cond2 & cond3 & cond4]
del df_minuteGrav3['eotvos']
del df_minuteGrav3['grav_corr']
df_minuteGrav3.head()
df_minuteGrav3.hvplot.scatter('lon', 'lat',
height=500,
color='proc_gravity',
cmap='colorwheel',
size=50,
hover_cols=['depth'], title= 'proc_gravity subset').opts(bgcolor= grey)
df_minuteGrav3.to_csv('proc_gravity_subset.csv')
###Output
_____no_output_____
###Markdown
The gravitational constant in SI units :math:`m^3 kg^{-1} s^{-1}` GRAVITATIONAL_CONST = 0.00000000006673 If terrain corrections (see below) are not applied, the term simple Bouguer anomaly is used. If they have, the term complete Bouguer anomaly is used. A second order correction to account for the curvature of the Earth is often added to this calculation.
###Code
ellipsoid = get_ellipsoid()
#Convert latitude to radians
latitude_rad = np.radians(latitude)
prime_vertical_radius = ellipsoid.semimajor_axis / np.sqrt(1 - ellipsoid.first_eccentricity ** 2 * np.sin(latitude_rad) ** 2)
# Instead of computing X and Y, we only comupute the projection on the XY plane:
# xy_projection = sqrt( X**2 + Y**2 )
xy_projection = (height + prime_vertical_radius) * np.cos(latitude_rad)
z_cartesian = (height + (1 - ellipsoid.first_eccentricity ** 2) * prime_vertical_radius) * np.sin(latitude_rad)
radius = np.sqrt(xy_projection ** 2 + z_cartesian ** 2)
geocentric_latitude = 180 / np.pi * np.arcsin(z_cartesian / radius)
return geocentric_latitude, radius
###Output
_____no_output_____ |
naukri-scrapper.ipynb | ###Markdown
to get all search result jobs rather than only top 10 pages(if exists) follow: to get all jobs get total no in serach reesults and then keep a counter in inner for loop. then stop when counter passes totla search result no.
###Code
# store in whatever formate you want to for further implementation of methos
# json, csv, or .....
###Output
_____no_output_____ |
answers/Worksheet 1.2 - Exploring One Dimensional Data - Answers.ipynb | ###Markdown
Worksheet 1.2: Exploring One Dimensional Data - AnswersThis worksheet covers concepts covered in the first half of Module 1 - Exploratory Data Analysis in One Dimension. It should take no more than 20-30 minutes to complete. Please raise your hand if you get stuck. There are many ways to accomplish the tasks that you are presented with, however you will find that by using the techniques covered in class, the exercises should be relatively simple. Import the LibrariesFor this exercise, we will be using:* Pandas (http://pandas.pydata.org/pandas-docs/stable/)* Numpy (https://docs.scipy.org/doc/numpy/reference/)* Matplotlib (http://matplotlib.org/api/pyplot_api.html)
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Exercise 1: Summarize the DataFor this exercise, you are given a Series of random numbers creatively names `random_numbers`. For the first exercise please do the following:1. Remove all the numbers less than 102. Sort the series3. Calculate the Tukey 5 number summary for this dataset4. Count the number of even and odd numbers5. Find the five largest and 5 smallest numbers in the series
###Code
#Generate a series of random numbers between 1 and 100.
random_numbers = pd.Series( np.random.randint(1, 100, 50) )
# Your code here...
#Filter the Series
random_numbers = random_numbers[random_numbers >= 10]
#Sort the Series
random_numbers.sort_values(inplace=True)
#Calculate the Tukey 5 Number Summary
random_numbers.describe()
#Count the number of even and odd numbers
even_numbers = random_numbers[random_numbers % 2 == 0].count()
odd_numbers = random_numbers[random_numbers % 2 != 0].count()
print( "Even numbers: " + str(even_numbers))
print( "Odd numbers: " + str(odd_numbers))
#Find the five largest and smallest numbers
print( "Smallest Numbers:")
print( random_numbers.head(5))
print( "Largest Numbers:")
print( random_numbers.tail(5))
###Output
Even numbers: 17
Odd numbers: 27
Smallest Numbers:
11 13
3 15
4 16
48 19
47 19
dtype: int64
Largest Numbers:
38 94
46 97
43 98
32 99
20 99
dtype: int64
###Markdown
Exercise 2: Using the random number Series create a histogram with 8 bins.
###Code
random_numbers.hist(bins=8)
###Output
_____no_output_____
###Markdown
Exercise 3:You have been given a list of US phone numbers. The area code is the first three digits. Your task is to produce a summary of how many times each area code appears in the list. To do this you will need to:1. Extract the area code from each phone number2. Count the unique occurances.
###Code
phone_numbers = [
'(833) 759-6854',
'(811) 268-9951',
'(855) 449-4648',
'(833) 212-2929',
'(833) 893-7475',
'(822) 346-3086',
'(844) 259-9074',
'(855) 975-8945',
'(811) 385-8515',
'(811) 523-5090',
'(844) 593-5677',
'(833) 534-5793',
'(899) 898-3043',
'(833) 662-7621',
'(899) 146-8244',
'(822) 793-4965',
'(822) 641-7853',
'(833) 153-7848',
'(811) 958-2930',
'(822) 332-3070',
'(833) 223-1776',
'(811) 397-1451',
'(844) 096-0377',
'(822) 000-0717',
'(899) 311-1880']
phone_number_series = pd.Series(phone_numbers)
area_codes = phone_number_series.str.slice(1,4)
area_codes2 = phone_number_series.str.extract( '\((\d{3})\)', expand=False)
area_codes3 = phone_number_series.str.split(')').str[0].str.replace('(','')
area_codes.value_counts()
area_codes2.value_counts()
area_codes3.value_counts()
###Output
_____no_output_____
###Markdown
Worksheet 1.2: Exploring One Dimensional Data - AnswersThis worksheet covers concepts covered in the first half of Module 1 - Exploratory Data Analysis in One Dimension. It should take no more than 20-30 minutes to complete. Please raise your hand if you get stuck. There are many ways to accomplish the tasks that you are presented with, however you will find that by using the techniques covered in class, the exercises should be relatively simple. Import the LibrariesFor this exercise, we will be using:* Pandas (http://pandas.pydata.org/pandas-docs/stable/)* Numpy (https://docs.scipy.org/doc/numpy/reference/)* Matplotlib (http://matplotlib.org/api/pyplot_api.html)
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Exercise 1: Summarize the DataFor this exercise, you are given a Series of random numbers creatively names `random_numbers`. For the first exercise please do the following:1. Remove all the numbers less than 102. Sort the series3. Calculate the Tukey 5 number summary for this dataset4. Count the number of even and odd numbers5. Find the five largest and 5 smallest numbers in the series
###Code
#Generate a series of random numbers between 1 and 100.
random_numbers = pd.Series( np.random.randint(1, 100, 50) )
# Your code here...
#Filter the Series
random_numbers = random_numbers[random_numbers >= 10]
#Sort the Series
random_numbers.sort_values(inplace=True)
#Calculate the Tukey 5 Number Summary
random_numbers.describe()
#Count the number of even and odd numbers
even_numbers = random_numbers[random_numbers % 2 == 0].count()
odd_numbers = random_numbers[random_numbers % 2 != 0].count()
print( "Even numbers: " + str(even_numbers))
print( "Odd numbers: " + str(odd_numbers))
#Find the five largest and smallest numbers
print( "Smallest Numbers:")
print( random_numbers.head(5))
print( "Largest Numbers:")
print( random_numbers.tail(5))
###Output
Even numbers: 17
Odd numbers: 27
Smallest Numbers:
11 13
3 15
4 16
48 19
47 19
dtype: int64
Largest Numbers:
38 94
46 97
43 98
32 99
20 99
dtype: int64
###Markdown
Exercise 2: Using the random number Series create a histogram with 8 bins.
###Code
random_numbers.hist(bins=8)
###Output
_____no_output_____
###Markdown
Exercise 3:You have been given a list of US phone numbers. The area code is the first three digits. Your task is to produce a summary of how many times each area code appears in the list. To do this you will need to:1. Extract the area code from each phone number2. Count the unique occurances.
###Code
phone_numbers = [
'(833) 759-6854',
'(811) 268-9951',
'(855) 449-4648',
'(833) 212-2929',
'(833) 893-7475',
'(822) 346-3086',
'(844) 259-9074',
'(855) 975-8945',
'(811) 385-8515',
'(811) 523-5090',
'(844) 593-5677',
'(833) 534-5793',
'(899) 898-3043',
'(833) 662-7621',
'(899) 146-8244',
'(822) 793-4965',
'(822) 641-7853',
'(833) 153-7848',
'(811) 958-2930',
'(822) 332-3070',
'(833) 223-1776',
'(811) 397-1451',
'(844) 096-0377',
'(822) 000-0717',
'(899) 311-1880']
phone_number_series = pd.Series(phone_numbers)
area_codes = phone_number_series.str.slice(1,4)
area_codes2 = phone_number_series.str.extract( '\((\d{3})\)', expand=False)
area_codes3 = phone_number_series.str.split(')').str[0].str.replace('(','')
area_codes.value_counts()
area_codes2.value_counts()
area_codes3.value_counts()
###Output
_____no_output_____ |
notebooks/neuron_view_gpt2.ipynb | ###Markdown
Neuron ViewThe attention-head view visualizes attention, as well as query and key values, in a particuler attention head.
###Code
from bertviz.transformers_neuron_view import GPT2Model, GPT2Tokenizer
from bertviz.neuron_view import show
###Output
_____no_output_____
###Markdown
Usage* **Hover** over any of the tokens on the left side of the visualization to filter attention from that token.* Then **click** on the **plus** icon that is revealed when hovering. This shows the query vectors, key vectors, and intermediate computations for the attention weights (blue=positive, orange=negative). * Once in the expanded view, **hover** over any other **token** on the left to see the associated attention computations.* **Click** on the **Layer** or **Head** drop-downs to change the model layer or head (zero-indexed).
###Code
model_type = 'gpt2'
model_version = 'gpt2'
model = GPT2Model.from_pretrained(model_version)
tokenizer = GPT2Tokenizer.from_pretrained(model_version)
text = "At the store, she bought apples, oranges, bananas,"
show(model, model_type, tokenizer, text, display_mode='dark')
###Output
_____no_output_____
###Markdown
Neuron ViewThe attention-head view visualizes attention, as well as query and key values, in a particuler attention head.
###Code
from bertviz.transformers_neuron_view import GPT2Model, GPT2Tokenizer
from bertviz.neuron_view import show
###Output
_____no_output_____
###Markdown
Usage* **Hover** over any of the tokens on the left side of the visualization to filter attention from that token.* Then **click** on the **plus** icon that is revealed when hovering. This shows the query vectors, key vectors, and intermediate computations for the attention weights (blue=positive, orange=negative). * Once in the expanded view, **hover** over any other **token** on the left to see the associated attention computations.* **Click** on the **Layer** or **Head** drop-downs to change the model layer or head (zero-indexed).
###Code
model_type = 'gpt2'
model_version = 'gpt2'
model = GPT2Model.from_pretrained(model_version)
tokenizer = GPT2Tokenizer.from_pretrained(model_version)
text = "At the store, she bought apples, oranges, bananas,"
show(model, model_type, tokenizer, text, display_mode='dark')
###Output
_____no_output_____ |
finmath/termstructure/curve_bootstrap_example.ipynb | ###Markdown
Brazilian bond and the Curve Bootstrap class Author: Gustavo Soares
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from finmath.brazilian_bonds.government_bonds import LTN, NTNF
from finmath.termstructure.curve_models import CurveBootstrap as CB
###Output
_____no_output_____
###Markdown
LTNs (zero coupon bonds)
###Code
ref_date = pd.to_datetime('2021-02-05').date()
ltn_expires = [
'2021-04-01',
'2021-07-01',
'2021-10-01',
'2022-01-01',
'2022-04-01',
'2022-07-01',
'2022-10-01',
'2023-01-01',
'2023-07-01',
'2024-01-01',
'2024-07-01',
]
ltn_yields = [
0.020580,
0.023885,
0.029904,
0.034463,
0.040148,
0.044847,
0.049137,
0.052500,
0.057519,
0.061150,
0.064247,
]
ltn_prices = []
ltn_cash_flows = []
for T, y in zip(ltn_expires, ltn_yields):
ltn = LTN(expiry=T, rate=y, ref_date=ref_date)
ltn_prices += [ltn.price]
ltn_cash_flows += [pd.Series(index=[pd.to_datetime(T)], data=[ltn.principal])]
###Output
_____no_output_____
###Markdown
NTNFs (coupon paying bonds)
###Code
ntnf_expires = [
'2023-01-01',
'2025-01-01',
'2027-01-01',
'2029-01-01',
'2031-01-01',
]
ntnf_yields = [
0.05113,
0.06215,
0.06869,
0.07317,
0.07639,
]
ntnf_prices = []
ntnf_cash_flows = []
for T, y in zip(ntnf_expires, ntnf_yields):
ntnf = NTNF(expiry=T, rate=y, ref_date=ref_date)
ntnf_prices += [ntnf.price]
ntnf_cash_flows += [ntnf.cash_flows]
###Output
_____no_output_____
###Markdown
Curve Bootstrap
###Code
all_bond_prices = ltn_prices + ntnf_prices
all_bond_cash_flows = ltn_cash_flows + ntnf_cash_flows
cb = CB(prices=all_bond_prices, cash_flows=all_bond_cash_flows, ref_date=ref_date)
###Output
_____no_output_____
###Markdown
Plot curves
###Code
ntnf_curve = pd.DataFrame(index=[cb.dc.tf(cb.ref_date, x) for x in pd.to_datetime(ntnf_expires)],
columns=['ntnfs'],
data=ntnf_yields).sort_index()
ltn_curve = pd.DataFrame(index=[cb.dc.tf(cb.ref_date, x) for x in pd.to_datetime(ltn_expires)],
columns=['ltns'],
data=ltn_yields).sort_index()
zero_curve = (cb.zero_curve).to_frame('zero')
curves = pd.concat([zero_curve, ltn_curve, ntnf_curve], join='outer', axis=1, sort=True)
curves.plot(figsize=(15,10), fontsize=16, marker='o')
plt.title('Bootstrap (using flat-forward) curves on %s' % cb.ref_date.strftime('%d-%b-%y'),fontsize=20)
plt.legend(fontsize=20)
plt.show()
###Output
_____no_output_____ |
docs/source/tutorials/02-recover-a-planet.ipynb | ###Markdown
How to recover a known planet in Kepler data? This tutorial will demonstrate the basic steps required to recover the signal of [Kepler-10b](https://en.wikipedia.org/wiki/Kepler-10b), the first rocky planet that was discovered by Kepler!Let's start by downloading the pixel data for this target for one of Kepler's observing quarters:
###Code
import lightkurve as lk
tpf = lk.search_targetpixelfile("Kepler-10", quarter=3).download()
###Output
_____no_output_____
###Markdown
Let's use the `plot` method to show the pixel data at one point in time (frame index 100). We'll also pass along a few plotting arguments.
###Code
tpf.plot(frame=100, scale='log', show_colorbar=True);
###Output
_____no_output_____
###Markdown
The target pixel file appears to show one bright star with a core brightness of approximately 50,000 electrons/seconds. Now, we will use the ``to_lightcurve`` method to create a simple aperture photometry lightcurve using themask defined by the pipeline which is stored in `tpf.pipeline_mask`.
###Code
lc = tpf.to_lightcurve(aperture_mask=tpf.pipeline_mask)
###Output
_____no_output_____
###Markdown
Let's take a look at the output lightcurve.
###Code
lc.plot();
###Output
_____no_output_____
###Markdown
Now let's use the `flatten` method, which removes long-term variability that we are not interested in using a high-pass filter called *Savitzky-Golay*.
###Code
flat, trend = lc.flatten(window_length=301, return_trend=True)
###Output
_____no_output_____
###Markdown
Let's plot the trend estimated in red:
###Code
ax = lc.errorbar(label="Kepler-10") # plot() returns a matplotlib axes ...
trend.plot(ax=ax, color='red', lw=2, label='Trend'); # which we can pass to the next plot() to use the same axes
###Output
_____no_output_____
###Markdown
and the flat lightcurve:
###Code
flat.errorbar(label="Kepler-10");
###Output
_____no_output_____
###Markdown
Now, let's run a period search function using the well-known Box-Least Squares algorithm (BLS), which was added to the [AstroPy package](http://docs.astropy.org) in version 3.1.We will use the BLS algorithm to search a pre-defined grid of transit periods:
###Code
import numpy as np
periodogram = flat.to_periodogram(method="bls", period=np.arange(0.3, 1.5, 0.001))
periodogram.plot();
###Output
_____no_output_____
###Markdown
It looks like we found a strong signal with a periodicity of 0.8 days!
###Code
best_fit_period = periodogram.period_at_max_power
print('Best fit period: {:.3f}'.format(best_fit_period))
flat.fold(period=best_fit_period, t0=periodogram.transit_time_at_max_power).errorbar();
###Output
_____no_output_____
###Markdown
How to recover a known planet in Kepler data? This tutorial demonstrates the basic steps required to recover a transiting planet candidate in the Kepler data.We will show how you can recover the signal of [Kepler-10b](https://en.wikipedia.org/wiki/Kepler-10b), the first rocky planet that was discovered by Kepler!
###Code
from lightkurve import search_targetpixelfile
tpf = search_targetpixelfile("Kepler-10", quarter=3).download()
tpf.shape
###Output
_____no_output_____
###Markdown
Let's use the `plot` method and pass along an aperture mask and a few plotting arguments.
###Code
tpf.plot(scale='log');
###Output
_____no_output_____
###Markdown
The target pixel file contains one bright star with approximately 50,000 counts. Now, we will use the ``to_lightcurve`` method to create a simple aperture photometry lightcurve using themask defined by the pipeline which is stored in `tpf.pipeline_mask`.
###Code
lc = tpf.to_lightcurve(aperture_mask=tpf.pipeline_mask)
###Output
_____no_output_____
###Markdown
Let's take a look at the output lightcurve.
###Code
lc.plot();
###Output
_____no_output_____
###Markdown
Now let's use the `flatten` method, removes long-term variability that we are not interested in.
###Code
flat, trend = lc.flatten(window_length=301, return_trend=True)
###Output
_____no_output_____
###Markdown
Let's plot the trend estimated by the Savitzky-Golay filter:
###Code
ax = lc.errorbar() # plot() returns a matplotlib axes ...
trend.plot(ax=ax, color='red', label='Trend'); # which we can pass to the next plot() to use the same axes
###Output
_____no_output_____
###Markdown
and the flat lightcurve:
###Code
flat.errorbar();
###Output
_____no_output_____
###Markdown
Now, let's run a period search function using the [Box-Least Squares algorithm](http://docs.astropy.org/en/latest/stats/bls.html), which was added to the [AstroPy package](http://docs.astropy.org) in version 3.1.
###Code
from astropy.stats import BoxLeastSquares
bls = BoxLeastSquares(flat.time, flat.flux, flat.flux_err)
###Output
_____no_output_____
###Markdown
We will use the BLS algorithm to search a pre-defined grid of transit periods and durations:
###Code
import numpy as np
periods = np.arange(0.3, 1.5, 0.001)
durations = np.arange(0.005, 0.15, 0.001)
periodogram = bls.power(periods, durations)
import matplotlib.pyplot as plt
plt.plot(periodogram.period, periodogram.power)
plt.ylabel("Power")
plt.xlabel("Period [day]");
best_fit = periods[np.argmax(periodogram.power)]
print('Best Fit Period: {:0.4f} days'.format(best_fit))
flat.fold(best_fit).errorbar();
###Output
_____no_output_____
###Markdown
How to recover a known planet in Kepler data? This tutorial will demonstrate the basic steps required to recover the signal of [Kepler-10b](https://en.wikipedia.org/wiki/Kepler-10b), the first rocky planet that was discovered by Kepler!Let's start by downloading the pixel data for this target for one of Kepler's observing quarters:
###Code
import lightkurve as lk
tpf = lk.search_targetpixelfile("Kepler-10", quarter=3).download()
###Output
_____no_output_____
###Markdown
Let's use the `plot` method to show the pixel data at one point in time (frame index 100). We'll also pass along a few plotting arguments.
###Code
tpf.plot(frame=100, scale='log', show_colorbar=True);
###Output
_____no_output_____
###Markdown
The target pixel file appears to show one bright star with a core brightness of approximately 50,000 electrons/seconds. Now, we will use the ``to_lightcurve`` method to create a simple aperture photometry lightcurve using themask defined by the pipeline which is stored in `tpf.pipeline_mask`.
###Code
lc = tpf.to_lightcurve(aperture_mask=tpf.pipeline_mask)
###Output
_____no_output_____
###Markdown
Let's take a look at the output lightcurve.
###Code
lc.plot();
###Output
_____no_output_____
###Markdown
Now let's use the `flatten` method, which removes long-term variability that we are not interested in using a high-pass filter called *Savitzky-Golay*.
###Code
flat, trend = lc.flatten(window_length=301, return_trend=True)
###Output
_____no_output_____
###Markdown
Let's plot the trend estimated in red:
###Code
ax = lc.errorbar(label="Kepler-10") # plot() returns a matplotlib axes ...
trend.plot(ax=ax, color='red', lw=2, label='Trend'); # which we can pass to the next plot() to use the same axes
###Output
_____no_output_____
###Markdown
and the flat lightcurve:
###Code
flat.errorbar(label="Kepler-10");
###Output
_____no_output_____
###Markdown
Now, let's run a period search function using the well-known Box-Least Squares algorithm (BLS), which was added to the [AstroPy package](http://docs.astropy.org) in version 3.1.We will use the BLS algorithm to search a pre-defined grid of transit periods:
###Code
import numpy as np
periodogram = flat.to_periodogram(method="bls", period=np.arange(0.3, 1.5, 0.001))
periodogram.plot();
###Output
_____no_output_____
###Markdown
It looks like we found a strong signal with a periodicity of 0.8 days!
###Code
best_fit_period = periodogram.period_at_max_power
print('Best fit period: {:.3f}'.format(best_fit_period))
flat.fold(period=best_fit_period, t0=periodogram.transit_time_at_max_power).errorbar();
###Output
_____no_output_____
###Markdown
How to recover a known planet in Kepler data? This tutorial will demonstrate the basic steps required to recover the signal of [Kepler-10b](https://en.wikipedia.org/wiki/Kepler-10b), the first rocky planet that was discovered by Kepler!Let's start by downloading the pixel data for this target for one of Kepler's observing quarters:
###Code
import lightkurve as lk
tpf = lk.search_targetpixelfile("Kepler-10", quarter=3).download()
###Output
_____no_output_____
###Markdown
Let's use the `plot` method to show the pixel data at one point in time (frame index 100). We'll also pass along a few plotting arguments.
###Code
tpf.plot(frame=100, scale='log', show_colorbar=True);
###Output
_____no_output_____
###Markdown
The target pixel file appears to show one bright star with a core brightness of approximately 50,000 electrons/seconds. Now, we will use the ``to_lightcurve`` method to create a simple aperture photometry lightcurve using themask defined by the pipeline which is stored in `tpf.pipeline_mask`.
###Code
lc = tpf.to_lightcurve(aperture_mask=tpf.pipeline_mask)
###Output
_____no_output_____
###Markdown
Let's take a look at the output lightcurve.
###Code
lc.plot();
###Output
_____no_output_____
###Markdown
Now let's use the `flatten` method, which removes long-term variability that we are not interested in using a high-pass filter called *Savitzky-Golay*.
###Code
flat, trend = lc.flatten(window_length=301, return_trend=True)
###Output
_____no_output_____
###Markdown
Let's plot the trend estimated in red:
###Code
ax = lc.errorbar(label="Kepler-10") # plot() returns a matplotlib axes ...
trend.plot(ax=ax, color='red', lw=2, label='Trend'); # which we can pass to the next plot() to use the same axes
###Output
_____no_output_____
###Markdown
and the flat lightcurve:
###Code
flat.errorbar(label="Kepler-10");
###Output
_____no_output_____
###Markdown
Now, let's run a period search function using the well-known Box-Least Squares algorithm (BLS), which was added to the [AstroPy package](http://docs.astropy.org) in version 3.1.We will use the BLS algorithm to search a pre-defined grid of transit periods:
###Code
import numpy as np
periodogram = flat.to_periodogram(method="bls", period=np.arange(0.3, 1.5, 0.001))
periodogram.plot();
###Output
_____no_output_____
###Markdown
It looks like we found a strong signal with a periodicity of 0.8 days!
###Code
best_fit_period = periodogram.period_at_max_power
print('Best fit period: {:.3f}'.format(best_fit_period))
flat.fold(period=best_fit_period, t0=periodogram.transit_time_at_max_power).errorbar();
###Output
_____no_output_____ |
9-dictionary-and-set/dictionary.ipynb | ###Markdown
DictionariesA dictionary is an unordered collection of elements where each element has two parts: a key and a value. Or you can say that an element is a key-value pair.A dictioinary is unordered. It means that when you iterate over or print a dictionary, the order of elements is not guaranteed. It may change when elements are added or removed from the dictionary. The key can be any object as long as it is immutable. Common key types include `int` and `string`.People use dictionaries to store key-value pairs thus it is easy to find out a value. For example, you use `student_id` to retrieve a student object. 1 Creating a DictionaryYou use `{}` to create a dictionary. The `{}` creates an empty dictionary. To create elements, create a sequnce of `key: value` pairs separated by `,`.
###Code
empty_dict = {}
print(empty_dict)
students = {90: 'Alice', 27: 'Bob', 50: 'Cindy'}
print(students)
more_students = {90: 'Alice', 27: 'Bob', 90: 'Cindy', 200: 'Mike'}
print(more_students)
###Output
_____no_output_____
###Markdown
In the above examples, `more_students` has two elements that have the same key of `90`. Python only store the last element that has the same key.You can use a dictionary variable as a boolean expression to check if it is empty 2 Basic OperationsYou can use a dictionary variable as a boolean expression to check if it is empty. The built-in `len` function tells how many elements in a dictionary.
###Code
if empty_dict:
print('empty_dict is NOT empty')
else:
print('empty_dict is empty')
print(f'empty_dict has {len(empty_dict)} elements')
if students:
print('students is NOT empty')
else:
print('students is empty')
print(f'students has {len(students)} elements')
###Output
_____no_output_____
###Markdown
The `in` and `not in` operators test whether a key exists in a dictionary.
###Code
month_days = {'Jan': 31, 'Apr': 30, 'Jul': 31}
if 'Jan' in month_days:
print('Jan is in the dictionary')
if 'Feb' not in month_days:
print('Feb is not in the dictionary')
###Output
_____no_output_____
###Markdown
The `del` operator delete a key-value pair from a dictionary if the specified key exists, otherwise, it throws a `KeyError` exception. The syntax is `del dictionary_name[key]`. To avoid exception, use `in` to make sure the key is there before `del`.
###Code
month_days = {'Jan': 31, 'Apr': 30, 'Jul': 31}
if 'Jan' in month_days:
del month_days['Jan']
print(month_days)
# throw a KeyError exception because the key doesn't exist
del month_days['Jan']
print(month_days)
###Output
_____no_output_____
###Markdown
3 Reading or Writing a Dictionary ElementYou uses the `dictionary_name[key]` to access an individual element. You can read or update the value in the key-value pair. There is no way to change the key because it is immutable. However, you can delete an element and insert another element if that's what you want.
###Code
students = {90: 'Alice', 27: 'Bob', 50: 'Cindy'}
# read a value for a key
name_with_id_90 = students[90]
print(name_with_id_90)
# change a value for a key
students[90] = 'Mike'
print(students[90])
# add a new key-value pair because 97 doesn't exist
students[97] = 'Bill'
print(students)
# reading a value for a non-exist key throws a KeyError exception
name_nobody = students[404]
###Output
_____no_output_____
###Markdown
Becareful, there are two cases that could be wrong in using dictionries: - A non exist key throws a `KeyError` exception. To avoid it, use `get` method with a specified default value. For example: `students.get(42, 'Unknown')`- when the `dictionary_name[key]` is on the left hand side, you set a new value for an existing key or create a new key-value pair if the key doesn't exist. Any typo in the key name could be a big bug. 4 Iterating a DictionaryYou can use `for key in dictionary_name:` to iterate over all keys of a dictionary. Then you use `dictionary_name[key]` to access each value.
###Code
month_days = {'Jan': 31, 'Apr': 30, 'Jul': 31}
for month in month_days:
print(f'{month} has {month_days[month]} days')
###Output
_____no_output_____
###Markdown
The `items` method returns a sequence of key-value pairs. Therefore, you can use `for key, value in dictionary_name.items():` to iterate over a dictionary.
###Code
month_days = {'Jan': 31, 'Apr': 30, 'Jul': 31}
for month, days in month_days.items():
print(f'{month} has {days} days')
###Output
_____no_output_____
###Markdown
The `values()` method returns all values correspondingly. Don't assume any order of the return values !
###Code
month_days = {'Jan': 31, 'Apr': 30, 'Jul': 31}
days_sequence = month_days.values()
for days in days_sequence:
print(days, end=' ')
###Output
_____no_output_____
###Markdown
The `items()`, `keys()` and `values()` return an iterable collection. It is a bultin-object. You can convert an iterable collection to a list using the `list(iterable)` built-in funciton. In the following example, the `key-value` pair of `items()` method is a tuple of `(key, value)`.
###Code
month_days = {'Jan': 31, 'Apr': 30, 'Jul': 31}
item_list = list(month_days.items())
print(item_list)
key_list = list(month_days.keys())
print(key_list)
value_list = list(month_days.values())
print(value_list)
###Output
_____no_output_____ |
python/.ipynb_checkpoints/Grafos-checkpoint.ipynb | ###Markdown
Resolução de problemas computacionais com Grafos Vetices são os pontos, arestas são as linhas $$ F + V = A + C$$ Matriz de adjacencia
###Code
matriz = [[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0]]
def calc_vertice(matriz):
return len(matriz[0])
def calc_aresta(matriz):
if matriz == []:
return 0
return sum(matriz[0]) + calc_aresta(matriz[1:])
def vertices_arestas(matriz):
return (calc_vertice(matriz), calc_vertice(matriz))
vertices_arestas(matriz)
matriz
matriz[1:]
###Output
_____no_output_____ |
sem06-asm-x86/asm_x86.ipynb | ###Markdown
Assembler x86* Мало регистров* Много команд* Много легаси* Много соглашений о вызовах* Разные синтаксисы Syntaxes AT&T
###Code
%%cpp att_example.c
%run gcc -m32 -masm=att -O3 att_example.c -S -o att_example.S
%run cat att_example.S | grep -v "^\s*\."
#include <stdint.h>
int32_t sum(int32_t a, int32_t b) {
return a + b;
}
###Output
_____no_output_____
###Markdown
IntelDWORD PTR — это переменная типа двойного слова. Слово — это 16 бит. Термин получил распространение в эпоху 16-ти битных процессоров, тогда в регистр помещалось ровно 16 бит. Такой объем информации стали называть словом (word). Т. е. в нашем случае dword (double word) 2*16 = 32 бита = 4 байта (обычный int). https://habr.com/ru/post/344896/
###Code
%%cpp att_example.c
%run gcc -m32 -masm=intel -O3 att_example.c -S -o att_example.S
%run cat att_example.S | grep -v "^\s*\."
#include <stdint.h>
int32_t sum(int32_t a, int32_t b) {
return a + b;
}
###Output
_____no_output_____
###Markdown
Пишем функцию clamp тремя способами
###Code
%%asm clamp_disasm.S
.intel_syntax noprefix
.text
.globl clamp
clamp:
mov edx, DWORD PTR [esp+4]
mov eax, DWORD PTR [esp+8]
cmp edx, eax
jl .L2
cmp edx, DWORD PTR [esp+12]
mov eax, edx
cmovg eax, DWORD PTR [esp+12]
.L2:
rep ret
%%asm clamp_if.S
.intel_syntax noprefix
.text
.globl clamp
clamp:
mov edx, DWORD PTR [esp + 4] // X
mov eax, DWORD PTR [esp + 8] // A
cmp edx, eax
jl return_eax // return A if X < A
mov eax, DWORD PTR [esp + 12] // B
cmp edx, eax
jg return_eax // return B if X > B
mov eax, edx
return_eax:
ret
%%asm clamp_cmov.S
.intel_syntax noprefix
.text
.globl clamp
clamp:
mov eax, DWORD PTR [esp + 4] // X
mov edx, DWORD PTR [esp + 8] // A
cmp eax, edx
cmovl eax, edx // if (X < A) X = A
mov edx, DWORD PTR [esp + 12] // B
cmp eax, edx
cmovg eax, edx // if (X > B) X = B
ret
%%cpp clamp_test.c
// compile and test using all three asm clamp implementations
%run gcc -m32 -masm=intel -O2 clamp.S clamp_test.c -o clamp_test.exe
%run ./clamp_test.exe
%run gcc -m32 -masm=intel -O2 clamp_if.S clamp_test.c -o clamp_if_test.exe
%run ./clamp_if_test.exe
%run gcc -m32 -masm=intel -O2 clamp_cmov.S clamp_test.c -o clamp_cmov_test.exe
%run ./clamp_cmov_test.exe
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
int32_t clamp(int32_t a, int32_t b, int32_t c);
int main() {
assert(clamp(1, 10, 20) == 10);
assert(clamp(100, 10, 20) == 20);
assert(clamp(15, 10, 20) == 15);
fprintf(stderr, "All is OK");
return 0;
}
###Output
_____no_output_____
###Markdown
Inline ASMhttp://asm.sourceforge.net/articles/linasm.html
###Code
%%cpp clamp_inline_test.c
%run gcc -m32 -masm=intel -O2 clamp_inline_test.c -o clamp_inline_test.exe
%run ./clamp_inline_test.exe
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
int32_t clamp(int32_t a, int32_t b, int32_t c);
__asm__(R"(
clamp:
mov eax, DWORD PTR [esp + 4]
mov edx, DWORD PTR [esp + 8]
cmp eax, edx
cmovl eax, edx
mov edx, DWORD PTR [esp + 12]
cmp eax, edx
cmovg eax, edx
ret
)");
int main() {
assert(clamp(1, 10, 20) == 10);
assert(clamp(100, 10, 20) == 20);
assert(clamp(15, 10, 20) == 15);
fprintf(stderr, "All is OK");
return 0;
}
###Output
_____no_output_____
###Markdown
Поработаем с памятьюДаны n, x. Посчитаем $\sum_{i=0}^{n - 1} (-1)^i \cdot x[i]$
###Code
%%asm my_sum.S
.intel_syntax noprefix
.text
.globl my_sum
my_sum:
push ebx
mov eax, 0
mov edx, DWORD PTR [esp + 8]
mov ebx, DWORD PTR [esp + 12]
start_loop:
cmp edx, 0
jle return_eax
add eax, DWORD PTR [ebx]
add ebx, 4
dec edx
cmp edx, 0
jle return_eax
sub eax, DWORD PTR [ebx]
add ebx, 4
dec edx
jmp start_loop
return_eax:
pop ebx
ret
%%cpp my_sum_test.c
%run gcc -g3 -m32 -masm=intel my_sum_test.c my_sum.S -o my_sum_test.exe
%run ./my_sum_test.exe
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
int32_t my_sum(int32_t n, int32_t* x);
int main() {
int32_t x[] = {100, 2, 200, 3};
assert(my_sum(sizeof(x) / sizeof(int32_t), x) == 100 - 2 + 200 - 3);
int32_t y[] = {100, 2, 200};
assert(my_sum(sizeof(y) / sizeof(int32_t), y) == 100 - 2 + 200);
return 0;
}
###Output
_____no_output_____
###Markdown
Развлекательно-познавательная часть
###Code
%%cpp mul.c
%run gcc -m32 -masm=intel -O3 mul.c -S -o mul.S
%run cat mul.S | grep -v "^\s*\."
#include <stdint.h>
int32_t mul(int32_t a) {
return a * 13;
}
%%cpp div.c
%run gcc -m32 -masm=intel -O3 div.c -S -o div.S
%run cat div.S | grep -v "^\s*\." | grep -v "^\s*\#"
#include <stdint.h>
int32_t div(int32_t a) {
return a / 4;
}
uint32_t udiv(uint32_t a) {
return a / 2;
}
%%cpp simdiv.c
%run gcc -m32 -masm=intel -O3 simdiv.c -o simdiv.exe
%run ./simdiv.exe
#include <stdint.h>
#include <assert.h>
int32_t simdiv(int32_t a) {
uint32_t eax = ((uint32_t)a >> 31) + a;
__asm__("sar %0" : "=a"(eax) : "a"(eax));
return eax;
}
int main() {
assert(simdiv(1) == 0);
assert(simdiv(5) == 2);
assert(simdiv(-1) == 0);
assert(simdiv(-5) == -2);
}
###Output
_____no_output_____
###Markdown
Assembler x86* Мало регистров* Много команд* Много легаси* Много соглашений о вызовах* Разные синтаксисы Syntaxes AT&T
###Code
%%cpp att_example.c
%run gcc -m32 -masm=att -O3 att_example.c -S -o att_example.S
%run cat att_example.S | grep -v "^\s*\."
#include <stdint.h>
int32_t sum(int32_t a, int32_t b) {
return a + b;
}
###Output
_____no_output_____
###Markdown
IntelDWORD PTR — это переменная типа двойного слова. Слово — это 16 бит. Термин получил распространение в эпоху 16-ти битных процессоров, тогда в регистр помещалось ровно 16 бит. Такой объем информации стали называть словом (word). Т. е. в нашем случае dword (double word) 2*16 = 32 бита = 4 байта (обычный int). https://habr.com/ru/post/344896/
###Code
%%cpp att_example.c
%run gcc -m32 -masm=intel -O3 att_example.c -S -o att_example.S
%run cat att_example.S | grep -v "^\s*\."
#include <stdint.h>
int32_t sum(int32_t a, int32_t b) {
return a + b;
}
###Output
_____no_output_____
###Markdown
Пишем функцию clamp тремя способами
###Code
%%asm clamp_disasm.S
.intel_syntax noprefix
.text
.globl clamp
clamp:
mov edx, DWORD PTR [esp+4]
mov eax, DWORD PTR [esp+8]
cmp edx, eax
jl .L2
cmp edx, DWORD PTR [esp+12]
mov eax, edx
cmovg eax, DWORD PTR [esp+12]
.L2:
rep ret
%%asm clamp_if.S
.intel_syntax noprefix
.text
.globl clamp
clamp:
mov edx, DWORD PTR [esp + 4] // X
mov eax, DWORD PTR [esp + 8] // A
cmp edx, eax
jl return_eax // return A if X < A
mov eax, DWORD PTR [esp + 12] // B
cmp edx, eax
jg return_eax // return B if X > B
mov eax, edx
return_eax:
ret
%%asm clamp_cmov.S
.intel_syntax noprefix
.text
.globl clamp
clamp:
mov eax, DWORD PTR [esp + 4] // X
mov edx, DWORD PTR [esp + 8] // A
cmp eax, edx
cmovl eax, edx // if (X < A) X = A
mov edx, DWORD PTR [esp + 12] // B
cmp eax, edx
cmovg eax, edx // if (X > B) X = B
ret
%%cpp clamp_test.c
// compile and test using all three asm clamp implementations
%run gcc -m32 -masm=intel -O2 clamp.S clamp_test.c -o clamp_test.exe
%run ./clamp_test.exe
%run gcc -m32 -masm=intel -O2 clamp_if.S clamp_test.c -o clamp_if_test.exe
%run ./clamp_if_test.exe
%run gcc -m32 -masm=intel -O2 clamp_cmov.S clamp_test.c -o clamp_cmov_test.exe
%run ./clamp_cmov_test.exe
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
int32_t clamp(int32_t a, int32_t b, int32_t c);
int main() {
assert(clamp(1, 10, 20) == 10);
assert(clamp(100, 10, 20) == 20);
assert(clamp(15, 10, 20) == 15);
fprintf(stderr, "All is OK");
return 0;
}
###Output
_____no_output_____
###Markdown
Inline ASMhttp://asm.sourceforge.net/articles/linasm.html
###Code
%%cpp clamp_inline_test.c
%run gcc -m32 -masm=intel -O2 clamp_inline_test.c -o clamp_inline_test.exe
%run ./clamp_inline_test.exe
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
int32_t clamp(int32_t a, int32_t b, int32_t c);
__asm__(R"(
clamp:
mov eax, DWORD PTR [esp + 4]
mov edx, DWORD PTR [esp + 8]
cmp eax, edx
cmovl eax, edx
mov edx, DWORD PTR [esp + 12]
cmp eax, edx
cmovg eax, edx
ret
)");
int main() {
assert(clamp(1, 10, 20) == 10);
assert(clamp(100, 10, 20) == 20);
assert(clamp(15, 10, 20) == 15);
fprintf(stderr, "All is OK");
return 0;
}
###Output
_____no_output_____
###Markdown
Поработаем с памятьюДаны n, x. Посчитаем $\sum_{i=0}^{n - 1} (-1)^i \cdot x[i]$
###Code
%%asm my_sum.S
.intel_syntax noprefix
.text
.globl my_sum
my_sum:
push ebx
mov eax, 0
mov edx, DWORD PTR [esp + 8]
mov ebx, DWORD PTR [esp + 12]
start_loop:
cmp edx, 0
jle return_eax
add eax, DWORD PTR [ebx]
add ebx, 4
dec edx
cmp edx, 0
jle return_eax
sub eax, DWORD PTR [ebx]
add ebx, 4
dec edx
jmp start_loop
return_eax:
pop ebx
ret
%%cpp my_sum_test.c
%run gcc -g3 -m32 -masm=intel my_sum_test.c my_sum.S -o my_sum_test.exe
%run ./my_sum_test.exe
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
int32_t my_sum(int32_t n, int32_t* x);
int main() {
int32_t x[] = {100, 2, 200, 3};
assert(my_sum(sizeof(x) / sizeof(int32_t), x) == 100 - 2 + 200 - 3);
int32_t y[] = {100, 2, 200};
assert(my_sum(sizeof(y) / sizeof(int32_t), y) == 100 - 2 + 200);
return 0;
}
###Output
_____no_output_____
###Markdown
Развлекательно-познавательная часть
###Code
%%cpp mul.c
%run gcc -m32 -masm=intel -O3 mul.c -S -o mul.S
%run cat mul.S | grep -v "^\s*\."
#include <stdint.h>
int32_t mul(int32_t a) {
return a * 14;
}
%%cpp div_0.c
%run gcc -m64 -masm=intel -O3 div_0.c -S -o div_0.S
%run cat div_0.S | grep -v "^\s*\."
#include <stdint.h>
uint32_t div(uint32_t a) {
return a / 11;
}
uint32_t div2(uint32_t a, uint32_t b) {
return a / b;
}
%%cpp div.c
%run gcc -m32 -masm=intel -O3 div.c -S -o div.S
%run cat div.S | grep -v "^\s*\." | grep -v "^\s*\#"
#include <stdint.h>
int32_t div(int32_t a) {
return a / 4;
}
uint32_t udiv(uint32_t a) {
return a / 2;
}
%%cpp simdiv.c
%run gcc -m32 -masm=intel -O3 simdiv.c -o simdiv.exe
%run ./simdiv.exe
#include <stdint.h>
#include <assert.h>
int32_t simdiv(int32_t a) {
uint32_t eax = ((uint32_t)a >> 31) + a;
__asm__("sar %0" : "=a"(eax) : "a"(eax));
return eax;
}
int main() {
assert(simdiv(1) == 0);
assert(simdiv(5) == 2);
assert(simdiv(-1) == 0);
assert(simdiv(-5) == -2);
}
###Output
_____no_output_____ |
res/Python-for-Data-Visualization/Seaborn/Grids.ipynb | ###Markdown
___ ___ GridsGrids are general types of plots that allow you to map plot types to rows and columns of a grid, this helps you create similar plots separated by features.
###Code
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
iris = sns.load_dataset('iris')
iris.head()
###Output
_____no_output_____
###Markdown
PairGridPairgrid is a subplot grid for plotting pairwise relationships in a dataset.
###Code
# Just the Grid
sns.PairGrid(iris)
# Then you map to the grid
g = sns.PairGrid(iris)
g.map(plt.scatter)
# Map to upper,lower, and diagonal
g = sns.PairGrid(iris)
g.map_diag(plt.hist)
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot)
###Output
_____no_output_____
###Markdown
pairplotpairplot is a simpler version of PairGrid (you'll use quite often)
###Code
sns.pairplot(iris)
sns.pairplot(iris,hue='species',palette='rainbow')
###Output
_____no_output_____
###Markdown
Facet GridFacetGrid is the general way to create grids of plots based off of a feature:
###Code
tips = sns.load_dataset('tips')
tips.head()
# Just the Grid
g = sns.FacetGrid(tips, col="time", row="smoker")
g = sns.FacetGrid(tips, col="time", row="smoker")
g = g.map(plt.hist, "total_bill")
g = sns.FacetGrid(tips, col="time", row="smoker",hue='sex')
# Notice hwo the arguments come after plt.scatter call
g = g.map(plt.scatter, "total_bill", "tip").add_legend()
###Output
_____no_output_____
###Markdown
JointGridJointGrid is the general version for jointplot() type grids, for a quick example:
###Code
g = sns.JointGrid(x="total_bill", y="tip", data=tips)
g = sns.JointGrid(x="total_bill", y="tip", data=tips)
g = g.plot(sns.regplot, sns.distplot)
###Output
/Users/marci/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
|
data/Data to MongoDB.ipynb | ###Markdown
Populate the DB with CSV dataset Modules needed and configMake sure you have a valid .env file to load the correct MONGO_URL
###Code
import os
from dotenv import load_dotenv
load_dotenv()
import pandas as pd
from pymongo import MongoClient
client = MongoClient(os.getenv("MONGO_URL"))
from datetime import datetime
###Output
_____no_output_____
###Markdown
Import Dataset
###Code
df_lter = pd.read_csv("./source/penguins_lter.csv")
###Output
_____no_output_____
###Markdown
Setup MongoDB client and collection for data
###Code
db = client["palmer-penguins"]
collection_kpl = db["kaggle-penguins-lter"]
###Output
_____no_output_____
###Markdown
Dataset cleaning and formatting
###Code
df_lter["Date Egg"]=df_lter["Date Egg"].apply(lambda e: datetime.strptime(e,'%m/%d/%y'))
df_lter = df_lter.drop("Comments", 1)
df_lter = df_lter.rename(columns=lambda x: x.split(
"(")[0].strip().replace(" ", "_").lower())
df_lter["clutch_completion"] = df_lter["clutch_completion"].apply(
lambda x: True if x == "Yes" else False)
###Output
_____no_output_____
###Markdown
Transform DataFrame to MongoDB documents and insertOutput is the documents inserted on the database
###Code
documents = df_lter.to_dict("records")
collection_kpl.drop()
result = collection_kpl.insert_many(documents)
len(result.inserted_ids)
client.close()
###Output
_____no_output_____ |
slides/04_scikit_learn/transformers.ipynb | ###Markdown
Transformer are stateful as well
###Code
transformer.categories_
race
###Output
_____no_output_____
###Markdown
Scikit-learn provides utilities to transform a whole dataframe
###Code
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder
categorical_columns = ['RACE', 'OCCUPATION', 'SECTOR',
'MARR', 'UNION', 'SEX', 'SOUTH']
numerical_columns = ['EDUCATION', 'EXPERIENCE', 'AGE']
preprocessor = make_column_transformer(
(OneHotEncoder(), categorical_columns),
remainder='passthrough'
)
preprocessor.fit(survey['data'])
X = preprocessor.transform(survey['data'])
###Output
_____no_output_____
###Markdown
We may now feed this to an estimator
###Code
X
###Output
_____no_output_____ |
tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb | ###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) 6. Context Spell Checker - Medical
###Code
import json
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
license_keys.keys()
license_keys['JSL_VERSION']
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
secret = license_keys['SECRET']
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
jsl_version = license_keys['JSL_VERSION']
version = license_keys['PUBLIC_VERSION']
! pip install --ignore-installed -q pyspark==2.4.4
! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret
! pip install --ignore-installed -q spark-nlp==$version
import sparknlp
print (sparknlp.version())
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(secret, params=params)
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,___Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet.__With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__.__She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds.__Abdomen is __sort__, nontender, and __nonintended__.__Patient not showing pain or any __wealth__ problems._ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
###Output
[('Witth', 'With'), ('the', 'the'), ('hell', 'cell'), ('of', 'of'), ('phisical', 'physical'), ('terapy', 'therapy'), ('the', 'the'), ('patient', 'patient'), ('was', 'was'), ('imbulated', 'ambulated'), ('and', 'and'), ('on', 'on'), ('posoperative', 'postoperative'), (',', ','), ('the', 'the'), ('impatient', 'patient'), ('tolerating', 'tolerating'), ('a', 'a'), ('post', 'post'), ('curgical', 'surgical'), ('soft', 'soft'), ('diet', 'diet'), ('.', '.')]
[('With', 'With'), ('paint', 'pain'), ('wel', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('pain', 'pain'), ('medications', 'medications'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('too', 'to'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('She', 'She'), ('is', 'is'), ('to', 'to'), ('also', 'also'), ('call', 'call'), ('the', 'the'), ('ofice', 'once'), ('if', 'if'), ('she', 'she'), ('has', 'has'), ('any', 'any'), ('ever', 'fever'), ('greater', 'greater'), ('than', 'than'), ('101', '101'), (',', ','), ('or', 'or'), ('leeding', 'leading'), ('form', 'from'), ('the', 'the'), ('surgical', 'surgical'), ('wounds', 'wounds'), ('.', '.')]
[('Abdomen', 'Abdomen'), ('is', 'is'), ('sort', 'sort'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'unintended'), ('.', '.')]
[('Patient', 'Patient'), ('not', 'not'), ('showing', 'showing'), ('pain', 'pain'), ('or', 'or'), ('any', 'any'), ('wealth', 'health'), ('problems', 'problems'), ('.', '.')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) 6. Context Spell Checker - Medical
###Code
import json
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
%%capture
for k,v in license_keys.items():
%set_env $k=$v
!wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh
!bash jsl_colab_setup.sh
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
import sparknlp
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print ("Spark NLP Version :", sparknlp.version())
print ("Spark NLP_JSL Version :", sparknlp_jsl.version())
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,___Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet.__With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__.__She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds.__Abdomen is __sort__, nontender, and __nonintended__._ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
###Output
[('Witth', 'With'), ('the', 'the'), ('hell', 'cell'), ('of', 'of'), ('phisical', 'physical'), ('terapy', 'therapy'), ('the', 'the'), ('patient', 'patient'), ('was', 'was'), ('imbulated', 'ambulated'), ('and', 'and'), ('on', 'on'), ('posoperative', 'postoperative'), (',', ','), ('the', 'the'), ('impatient', 'inpatient'), ('tolerating', 'tolerating'), ('a', 'a'), ('post', 'post'), ('curgical', 'surgical'), ('soft', 'soft'), ('diet', 'diet'), ('.', '.')]
[('With', 'With'), ('paint', 'paint'), ('wel', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('pain', 'pain'), ('medications', 'medications'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('too', 'too'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('She', 'She'), ('is', 'is'), ('to', 'to'), ('also', 'also'), ('call', 'call'), ('the', 'the'), ('ofice', 'office'), ('if', 'if'), ('she', 'she'), ('has', 'has'), ('any', 'any'), ('ever', 'ever'), ('greater', 'greater'), ('than', 'than'), ('101', '101'), (',', ','), ('or', 'or'), ('leeding', 'leading'), ('form', 'form'), ('the', 'the'), ('surgical', 'surgical'), ('wounds', 'wounds'), ('.', '.')]
[('Abdomen', 'Abdomen'), ('is', 'is'), ('sort', 'sort'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'unintended'), ('.', '.')]
[('Patient', 'Patient'), ('not', 'not'), ('showing', 'showing'), ('pain', 'pain'), ('or', 'or'), ('any', 'any'), ('wealth', 'wealth'), ('problems', 'problems'), ('.', '.')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) 6. Context Spell Checker - Medical
###Code
import json, os
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
# Adding license key-value pairs to environment variables
os.environ.update(license_keys)
# Installing pyspark and spark-nlp
! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION
# Installing Spark NLP Healthcare
! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
import sparknlp
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print ("Spark NLP Version :", sparknlp.version())
print ("Spark NLP_JSL Version :", sparknlp_jsl.version())
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,___Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet.__With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__.__She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds.__Abdomen is __sort__, nontender, and __nonintended__._ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
###Output
[('Witth', 'With'), ('the', 'the'), ('hell', 'cell'), ('of', 'of'), ('phisical', 'physical'), ('terapy', 'therapy'), ('the', 'the'), ('patient', 'patient'), ('was', 'was'), ('imbulated', 'ambulated'), ('and', 'and'), ('on', 'on'), ('posoperative', 'postoperative'), (',', ','), ('the', 'the'), ('impatient', 'inpatient'), ('tolerating', 'tolerating'), ('a', 'a'), ('post', 'post'), ('curgical', 'surgical'), ('soft', 'soft'), ('diet', 'diet'), ('.', '.')]
[('With', 'With'), ('paint', 'paint'), ('wel', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('pain', 'pain'), ('medications', 'medications'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('too', 'too'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('She', 'She'), ('is', 'is'), ('to', 'to'), ('also', 'also'), ('call', 'call'), ('the', 'the'), ('ofice', 'office'), ('if', 'if'), ('she', 'she'), ('has', 'has'), ('any', 'any'), ('ever', 'ever'), ('greater', 'greater'), ('than', 'than'), ('101', '101'), (',', ','), ('or', 'or'), ('leeding', 'leading'), ('form', 'form'), ('the', 'the'), ('surgical', 'surgical'), ('wounds', 'wounds'), ('.', '.')]
[('Abdomen', 'Abdomen'), ('is', 'is'), ('sort', 'sort'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'unintended'), ('.', '.')]
[('Patient', 'Patient'), ('not', 'not'), ('showing', 'showing'), ('pain', 'pain'), ('or', 'or'), ('any', 'any'), ('wealth', 'wealth'), ('problems', 'problems'), ('.', '.')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) Context Spell Checker - Medical
###Code
import json
with open('workshop_license_keys_Aug2020.json') as f:
license_keys = json.load(f)
license_keys.keys()
license_keys['JSL_VERSION']
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
secret = license_keys['SECRET']
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
jsl_version = license_keys['JSL_VERSION']
version = license_keys['PUBLIC_VERSION']
! pip install --ignore-installed -q pyspark==2.4.4
! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret
! pip install --ignore-installed -q spark-nlp==$version
import sparknlp
print (sparknlp.version())
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
spark = sparknlp_jsl.start(secret)
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,___Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet.__With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__.__She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds.__Abdomen is __sort__, nontender, and __nonintended__.__Patient not showing pain or any __wealth__ problems._ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
###Output
[('Witth', 'With'), ('the', 'the'), ('hell', 'cell'), ('of', 'of'), ('phisical', 'physical'), ('terapy', 'therapy'), ('the', 'the'), ('patient', 'patient'), ('was', 'was'), ('imbulated', 'ambulated'), ('and', 'and'), ('on', 'on'), ('posoperative', 'postoperative'), (',', ','), ('the', 'the'), ('impatient', 'patient'), ('tolerating', 'tolerating'), ('a', 'a'), ('post', 'post'), ('curgical', 'surgical'), ('soft', 'soft'), ('diet', 'diet'), ('.', '.')]
[('With', 'With'), ('paint', 'pain'), ('wel', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('pain', 'pain'), ('medications', 'medications'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('too', 'to'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('She', 'She'), ('is', 'is'), ('to', 'to'), ('also', 'also'), ('call', 'call'), ('the', 'the'), ('ofice', 'once'), ('if', 'if'), ('she', 'she'), ('has', 'has'), ('any', 'any'), ('ever', 'fever'), ('greater', 'greater'), ('than', 'than'), ('101', '101'), (',', ','), ('or', 'or'), ('leeding', 'leading'), ('form', 'from'), ('the', 'the'), ('surgical', 'surgical'), ('wounds', 'wounds'), ('.', '.')]
[('Abdomen', 'Abdomen'), ('is', 'is'), ('sort', 'sort'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'unintended'), ('.', '.')]
[('Patient', 'Patient'), ('not', 'not'), ('showing', 'showing'), ('pain', 'pain'), ('or', 'or'), ('any', 'any'), ('wealth', 'health'), ('problems', 'problems'), ('.', '.')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) Context Spell Checker - Medical
###Code
import json
with open('workshop_license_keys_Aug2020.json') as f:
license_keys = json.load(f)
license_keys.keys()
license_keys['JSL_VERSION']
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
secret = license_keys['SECRET']
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['SPARK_OCR_LICENSE'] = license_keys['SPARK_OCR_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
jsl_version = license_keys['JSL_VERSION']
version = license_keys['PUBLIC_VERSION']
! pip install --ignore-installed -q pyspark==2.4.4
! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret
! pip install --ignore-installed -q spark-nlp==$version
import sparknlp
print (sparknlp.version())
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
spark = sparknlp_jsl.start(secret)
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,___Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet.__With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__.__She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds.__Abdomen is __sort__, nontender, and __nonintended__.__Patient not showing pain or any __wealth__ problems._ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
###Output
[('Witth', 'With'), ('the', 'the'), ('hell', 'cell'), ('of', 'of'), ('phisical', 'physical'), ('terapy', 'therapy'), ('the', 'the'), ('patient', 'patient'), ('was', 'was'), ('imbulated', 'ambulated'), ('and', 'and'), ('on', 'on'), ('posoperative', 'postoperative'), (',', ','), ('the', 'the'), ('impatient', 'patient'), ('tolerating', 'tolerating'), ('a', 'a'), ('post', 'post'), ('curgical', 'surgical'), ('soft', 'soft'), ('diet', 'diet'), ('.', '.')]
[('With', 'With'), ('paint', 'pain'), ('wel', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('pain', 'pain'), ('medications', 'medications'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('too', 'to'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('She', 'She'), ('is', 'is'), ('to', 'to'), ('also', 'also'), ('call', 'call'), ('the', 'the'), ('ofice', 'once'), ('if', 'if'), ('she', 'she'), ('has', 'has'), ('any', 'any'), ('ever', 'fever'), ('greater', 'greater'), ('than', 'than'), ('101', '101'), (',', ','), ('or', 'or'), ('leeding', 'leading'), ('form', 'from'), ('the', 'the'), ('surgical', 'surgical'), ('wounds', 'wounds'), ('.', '.')]
[('Abdomen', 'Abdomen'), ('is', 'is'), ('sort', 'sort'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'unintended'), ('.', '.')]
[('Patient', 'Patient'), ('not', 'not'), ('showing', 'showing'), ('pain', 'pain'), ('or', 'or'), ('any', 'any'), ('wealth', 'health'), ('problems', 'problems'), ('.', '.')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) 6. Context Spell Checker - Medical
###Code
import json, os
from google.colab import files
if 'spark_jsl.json' not in os.listdir():
license_keys = files.upload()
os.rename(list(license_keys.keys())[0], 'spark_jsl.json')
with open('spark_jsl.json') as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
os.environ.update(license_keys)
# Installing pyspark and spark-nlp
! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION
# Installing Spark NLP Healthcare
! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET
import json
import os
import sparknlp
import sparknlp_jsl
from sparknlp.base import *
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print("Spark NLP Version :", sparknlp.version())
print("Spark NLP_JSL Version :", sparknlp_jsl.version())
spark
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,_She was **treathed** with a five day course of **amoxicilin** for a **resperatory** **truct** infection.__With pain well controlled on **orall** **meditation**, she was discharged to **reihabilitation** **facilitay**.__Her **adominal** examination is soft, nontender, and **nonintended**__The patient was seen by the **entocrinology** service and she was discharged on 40 units of **unsilin** glargine at night_ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["She was treathed with a five day course of amoxicilin for a resperatory truct infection . ",
"With pain well controlled on orall meditation, she was discharged to reihabilitation facilitay.",
"Her adominal examination is soft, nontender, and nonintended.",
"The patient was seen by the entocrinology service and she was discharged on 40 units of unsilin glargine at night",
"No cute distress",
]
for pairs in lp.annotate(example):
print(list(zip(pairs['token'],pairs['checked'])))
###Output
[('She', 'She'), ('was', 'was'), ('treathed', 'treated'), ('with', 'with'), ('a', 'a'), ('five', 'five'), ('day', 'day'), ('course', 'course'), ('of', 'of'), ('amoxicilin', 'amoxicillin'), ('for', 'for'), ('a', 'a'), ('resperatory', 'respiratory'), ('truct', 'tract'), ('infection', 'infection'), ('.', '.')]
[('With', 'With'), ('pain', 'pain'), ('well', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('meditation', 'medication'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('to', 'to'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('Her', 'Her'), ('adominal', 'abdominal'), ('examination', 'examination'), ('is', 'is'), ('soft', 'soft'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'nondistended'), ('.', '.')]
[('The', 'The'), ('patient', 'patient'), ('was', 'was'), ('seen', 'seen'), ('by', 'by'), ('the', 'the'), ('entocrinology', 'endocrinology'), ('service', 'service'), ('and', 'and'), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('on', 'on'), ('40', '40'), ('units', 'units'), ('of', 'of'), ('unsilin', 'insulin'), ('glargine', 'glargine'), ('at', 'at'), ('night', 'night')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) 6. Context Spell Checker - Medical
###Code
import json, os
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
# Adding license key-value pairs to environment variables
os.environ.update(license_keys)
# Installing pyspark and spark-nlp
! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION
# Installing Spark NLP Healthcare
! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
import sparknlp
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(license_keys['SECRET'],params=params)
print ("Spark NLP Version :", sparknlp.version())
print ("Spark NLP_JSL Version :", sparknlp_jsl.version())
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,___Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet.__With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__.__She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds.__Abdomen is __sort__, nontender, and __nonintended__._ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
###Output
[('Witth', 'With'), ('the', 'the'), ('hell', 'cell'), ('of', 'of'), ('phisical', 'physical'), ('terapy', 'therapy'), ('the', 'the'), ('patient', 'patient'), ('was', 'was'), ('imbulated', 'ambulated'), ('and', 'and'), ('on', 'on'), ('posoperative', 'postoperative'), (',', ','), ('the', 'the'), ('impatient', 'inpatient'), ('tolerating', 'tolerating'), ('a', 'a'), ('post', 'post'), ('curgical', 'surgical'), ('soft', 'soft'), ('diet', 'diet'), ('.', '.')]
[('With', 'With'), ('paint', 'paint'), ('wel', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('pain', 'pain'), ('medications', 'medications'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('too', 'too'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('She', 'She'), ('is', 'is'), ('to', 'to'), ('also', 'also'), ('call', 'call'), ('the', 'the'), ('ofice', 'office'), ('if', 'if'), ('she', 'she'), ('has', 'has'), ('any', 'any'), ('ever', 'ever'), ('greater', 'greater'), ('than', 'than'), ('101', '101'), (',', ','), ('or', 'or'), ('leeding', 'leading'), ('form', 'form'), ('the', 'the'), ('surgical', 'surgical'), ('wounds', 'wounds'), ('.', '.')]
[('Abdomen', 'Abdomen'), ('is', 'is'), ('sort', 'sort'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'unintended'), ('.', '.')]
[('Patient', 'Patient'), ('not', 'not'), ('showing', 'showing'), ('pain', 'pain'), ('or', 'or'), ('any', 'any'), ('wealth', 'wealth'), ('problems', 'problems'), ('.', '.')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
###Markdown
 [](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb) Context Spell Checker - Medical
###Code
import json
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
license_keys.keys()
license_keys['JSL_VERSION']
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
secret = license_keys['SECRET']
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
jsl_version = license_keys['JSL_VERSION']
version = license_keys['PUBLIC_VERSION']
! pip install --ignore-installed -q pyspark==2.4.4
! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret
! pip install --ignore-installed -q spark-nlp==$version
import sparknlp
print (sparknlp.version())
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
spark = sparknlp_jsl.start(secret)
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
###Output
_____no_output_____
###Markdown
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,___Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet.__With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__.__She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds.__Abdomen is __sort__, nontender, and __nonintended__.__Patient not showing pain or any __wealth__ problems._ _No __cute__ distress_Check that some of the errors are valid English words, only by considering the context the right choice can be made.
###Code
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
###Output
[('Witth', 'With'), ('the', 'the'), ('hell', 'cell'), ('of', 'of'), ('phisical', 'physical'), ('terapy', 'therapy'), ('the', 'the'), ('patient', 'patient'), ('was', 'was'), ('imbulated', 'ambulated'), ('and', 'and'), ('on', 'on'), ('posoperative', 'postoperative'), (',', ','), ('the', 'the'), ('impatient', 'patient'), ('tolerating', 'tolerating'), ('a', 'a'), ('post', 'post'), ('curgical', 'surgical'), ('soft', 'soft'), ('diet', 'diet'), ('.', '.')]
[('With', 'With'), ('paint', 'pain'), ('wel', 'well'), ('controlled', 'controlled'), ('on', 'on'), ('orall', 'oral'), ('pain', 'pain'), ('medications', 'medications'), (',', ','), ('she', 'she'), ('was', 'was'), ('discharged', 'discharged'), ('too', 'to'), ('reihabilitation', 'rehabilitation'), ('facilitay', 'facility'), ('.', '.')]
[('She', 'She'), ('is', 'is'), ('to', 'to'), ('also', 'also'), ('call', 'call'), ('the', 'the'), ('ofice', 'once'), ('if', 'if'), ('she', 'she'), ('has', 'has'), ('any', 'any'), ('ever', 'fever'), ('greater', 'greater'), ('than', 'than'), ('101', '101'), (',', ','), ('or', 'or'), ('leeding', 'leading'), ('form', 'from'), ('the', 'the'), ('surgical', 'surgical'), ('wounds', 'wounds'), ('.', '.')]
[('Abdomen', 'Abdomen'), ('is', 'is'), ('sort', 'sort'), (',', ','), ('nontender', 'nontender'), (',', ','), ('and', 'and'), ('nonintended', 'unintended'), ('.', '.')]
[('Patient', 'Patient'), ('not', 'not'), ('showing', 'showing'), ('pain', 'pain'), ('or', 'or'), ('any', 'any'), ('wealth', 'health'), ('problems', 'problems'), ('.', '.')]
[('No', 'No'), ('cute', 'acute'), ('distress', 'distress')]
|
DEEP LEARNING/Kaggle: Avito Demand Prediction Challenge (bronze solution)/ridge regression XGBOOST.ipynb | ###Markdown
from sklearn.model_selection import KFoldimport datetimeprint(datetime.datetime.now())folds = KFold(n_splits=5, shuffle=True, random_state=50001)oof_preds = np.zeros(X.shape[0])sub_preds = np.zeros(testing.shape[0])for n_fold, (trn_idx, val_idx) in enumerate(folds.split(X)): dtrain =lgb.Dataset(X.tocsr()[trn_idx], y.iloc[trn_idx]) dval =lgb.Dataset(X.tocsr()[val_idx], y.iloc[val_idx]) m_gbm=lgb.train(params=lgbm_params,train_set=dtrain,num_boost_round=1300,verbose_eval=400, valid_sets=[dtrain,dval],valid_names=['train','valid']) oof_preds[val_idx] = m_gbm.predict(X.tocsr()[val_idx]) sub_preds += m_gbm.predict(testing) / folds.n_splits print('Fold %2d rmse : %.6f' % (n_fold + 1, rmse(y.iloc[val_idx],oof_preds[val_idx]))) del dtrain,dval gc.collect() print('Full RMSE score %.6f' % rmse(y, oof_preds)) del X; gc.collect()sub_preds[sub_preds<0]=0sub_preds[sub_preds>1]=1 Mixing lightgbm with ridge. I haven't really tested if this improves the score or notblend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]Submission=pd.read_csv("sample_submission.csv")Submission['deal_probability']=sub_predsSubmission.to_csv("split5.csv", index=False)print(datetime.datetime.now()) import xgboost as xgbclf = xgb.XGBRegressor( params) n_estimators=999, learning_rate=0.02, gamma =0.3, min_child_weight = 3,nthread = 15,max_depth=30, subsample=0.9, colsample_bytree=0.8, seed=2100, eval_metric = "rmse")
###Code
params = {
#'objective' : 'gpu:reg:linear',
#'tree_method':'gpu_hist',
'learning_rate': 0.016,
'gamma' : 0.3,
'min_child_weight' : 3,
'nthread' : 15,
'max_depth' : 12,
'subsample' : 0.9,
'colsample_bytree' : 0.75,
'seed':2100,
'eval_metric' : "rmse",
'num_boost_round' : 500,
'n_estimators':999,
'max_leaves': 90
}
import xgboost as xgb
VALID = True
if VALID == True:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size = 0.06, random_state=23)
tr_data = xgb.DMatrix(X_train, y_train)
va_data = xgb.DMatrix(X_valid, y_valid)
#del X_train, X_valid, y_train, y_valid ; gc.collect()
watchlist = [(tr_data, 'train'), (va_data, 'valid')]
model = xgb.train(params, tr_data, 500, watchlist, maximize=False, early_stopping_rounds = 30, verbose_eval=50)
print("Model Evaluation Stage")
print('RMSE valid:', np.sqrt(metrics.mean_squared_error(y_valid, model.predict(xgb.DMatrix(X_valid)))))
print('RMSE train:', np.sqrt(metrics.mean_squared_error(y_train, model.predict(xgb.DMatrix(X_train)))))
else:
# Go Go Go
del tr_data, va_data, X_train, X_valid, y_train, y_valid; gc.collect()
tr_data = xgb.DMatrix(X, y)
model = xgb.train(params,tr_data, 1000, verbose_eval=100)
print("Model Evaluation Stage")
lgpred = model.predict(xgb.DMatrix(testing))
print('RMSE train:', np.sqrt(metrics.mean_squared_error(y_train, model.predict(xgb.DMatrix(X_train)))))
#Mixing lightgbm with ridge. I haven't really tested if this improves the score or not
#blend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=testdex)
lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("xgsub_tf.csv",index=True,header=True)
#print("Model Runtime: %0.2f Minutes"%((time.time() - modelstart)/60))
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=[10,15])
xgb.plot_importance(model, ax=ax)
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=[10,15])
xgb.plot_importance(model, ax=ax )
###Output
_____no_output_____
###Markdown
RMSE valid: 0.22568683979923207RMSE train: 0.21163101254549618Model Evaluation Stageparams = { 'objective' : 'gpu:reg:linear', 'tree_method':'gpu_hist', 'learning_rate': 0.015, 'gamma' : 0.3, 'min_child_weight' : 3, 'nthread' : 15, 'max_depth' : 12, 'subsample' : 0.9, 'colsample_bytree' : 0.75, 'seed':2100, 'eval_metric' : "rmse", 'num_boost_round' : 300, 'n_estimators':999, 'max_leaves': 120} Model Evaluation StageRMSE valid: 0.22405721827050518 params = { 'objective' : 'gpu:reg:linear', 'tree_method':'gpu_hist', 'learning_rate': 0.015, 'gamma' : 0.3, 'min_child_weight' : 3, 'nthread' : 15, 'max_depth' : 15, 'subsample' : 0.9, 'colsample_bytree' : 0.75, 'seed':2100, 'eval_metric' : "rmse", 'num_boost_round' : 300, 'n_estimators':999, 'max_leaves': 100 RMSE valid: 0.23508655812191442RMSE train: 0.234600064596860 params = { 'objective' : 'gpu:reg:linear', 'tree_method':'gpu_hist', 'learning_rate': 0.015, 'gamma' : 0.3, 'min_child_weight' : 3, 'nthread' : 15, 'max_depth' : 15, 'subsample' : 0.9, 'colsample_bytree' : 0.75, 'seed':2100, 'eval_metric' : "rmse", 'num_boost_round' : 300, 'n_estimators':999, 'max_leaves': 100} Will train until valid-rmse hasn't improved in 30 rounds.[50] train-rmse:0.269386 valid-rmse:0.270039[100] train-rmse:0.234934 valid-rmse:0.236262[150] train-rmse:0.228136 valid-rmse:0.230054[200] train-rmse:0.225995 valid-rmse:0.228401[250] train-rmse:0.224621 valid-rmse:0.227503[299] train-rmse:0.223569 valid-rmse:0.226906 'learning_rate': 0.015, 'gamma' : 0.3, 'min_child_weight' : 3, 'nthread' : 15, 'max_depth' : 8, 'subsample' : 0.9, 'colsample_bytree' : 0.75, 'seed':2100, 'eval_metric' : "rmse", 'num_boost_round' : 300, 'n_estimators':999, 'max_leaves': 100 299] train-rmse:0.223569 valid-rmse:0.226906clf = xgb.XGBRegressor( n_estimators=999, learning_rate=0.015, gamma =0.3, min_child_weight = 3,nthread = 15,max_depth=150, subsample=0.9, colsample_bytree=0.8, seed=2100, eval_metric = "rmse")[0] validation_0-rmse:0.440045Will train until validation_0-rmse hasn't improved in 50 rounds.[50] validation_0-rmse:0.288205[100] validation_0-rmse:0.239962[150] validation_0-rmse:0.22704[200] validation_0-rmse:0.223603[250] validation_0-rmse:0.222562[300] validation_0-rmse:0.222139
###Code
import datetime
datetime.datetime.now()
clf.feature_importances_
###Output
_____no_output_____ |
Clase 9 - Ejercicios score y NBA spark.ipynb | ###Markdown
2016 2C 1 Parcial En este ejercicio queremos programar un sistema que recomiende textos a usuarios en base a sus gustos sobre ciertos términos (palabras).Se cuenta con un RDD de textos de la forma (docId, texto) donde texto es un string de longitud variable.Además contamos con un RDD que indica qué términos le gustan o no a cada usuario de la forma (userId, término, score) por ejemplo (23, “calesita”, -2).Se pide programar en Spark un programa que calcule el score total de cada documento para cada usuario generando un RDD de la forma (userId, docId, score) en donde el score es simplemente la suma de los scores del usuario para los términos que aparecen en el documento.Puede haber términos en los documentos para los cuales no exista score de algunos usuarios, en estos casos simplemente los consideramos neutros (score=0)
###Code
documents_raw = [
(1, 'pablo honey'),
(2, 'the bends'),
(3, 'ok computer'),
(4, 'kid a'),
(5, 'amnesiac'),
(6, 'hail to the thief'),
(7, 'in rainbows'),
(8, 'the king of limbs'),
(9, 'a moon shaped pool')
]
scores_raw = [
('thom', 'pablo', 1),
('thom', 'honey', 1),
('martin', 'pablo', -1),
('martin', 'honey', -1),
('martin', 'ok', 30),
('martin', 'computer', 30),
]
documents = sc.parallelize(documents_raw)
scores = sc.parallelize(scores_raw)
terms = documents.flatMap(lambda x: [(word, x[0]) for word in x[1].split()])
scores_by_word = scores.map(lambda x: (x[1], (x[0], x[2])))
total = terms.join(scores_by_word)
total.collect()
by_user = total.map(lambda x: ((x[1][1][0], x[1][0]), x[1][1][1])).reduceByKey(lambda x, y: x + y).cache()
by_user.first()
users = scores.map(lambda x: x[0]).distinct() # Usuarios unicos
docs = documents.map(lambda x: x[0])
users_docs = users.cartesian(docs).map(lambda x: ((x[0],x[1]), 0)).cache()
users_docs.collect()
by_user.rightOuterJoin(users_docs).map(lambda x: (x[0][0], x[0][1], 0 if x[1][0] is None else x[1][0])).collect()
###Output
_____no_output_____
###Markdown
2017 1C 1 Parcial Se tiene información estadística de la temporada regular de todos los jugadores de la NBA en un RDD de tuplas con el siguiente formato: (id_jugador, nombre, promedio_puntos, promedio_asistencias, promedio_robos, promedio_bloqueos, promedio_rebotes, promedio_faltas).Un analista de la cadena ESPN está trabajando con un RDD que corresponde a la primera ronda de playoffs y que tiene el siguiente formato: (id_jugador, id_partido, timestamp, cantidad_puntos, cantidad_rebotes, cantidad_bloqueos, cantidad_robos, cantidad_asistencias, cantidad_faltas).En base a estos RDDs se quiere programar en PySpark un programa que genere un RDD con los nombres (sin duplicados) de los jugadores que lograron en algún partido de playoffs una cantidad de asistencias mayor a su promedio histórico.
###Code
# (id_jugador, nombre, promedio_asistencias)
players_all_time_stats = [
(1, 'Manu Ginobili', 800),
(2, 'Kobe Bryant', 100),
(3, 'Marc Gasol', 25),
(4, 'James Harden', 1000)]
# (id_jugador, id_partido, timestamp, cantidad_asistencias)
scores = [
(1, 1, 1, 100),
(1, 1, 3, 100),
(2, 1, 1, 150),
(2, 1, 3, 150),
(3, 2, 2, 50),
(3, 2, 3, 50),
(1, 2, 1, 150),
(1, 2, 3, 150),
]
stats = sc.parallelize(players_all_time_stats)
scores = sc.parallelize(scores)
stats = stats.map(lambda x: (x[0], (x[1],x[2])))
stats.first()
scores_by_match = scores.map(lambda x: ((x[0], x[1]), x[3])).reduceByKey(lambda x, y: x + y)\
.map(lambda x: (x[0][0], x[1]))
scores_by_match.first()
resul = scores_by_match.join(stats).filter(lambda x: x[1][0] > x[1][1][1]).map(lambda x: (x[1][1][0])).distinct()
resul.collect()
###Output
_____no_output_____ |
nice_figure.ipynb | ###Markdown
Then we define an array of angles and their sines and cosines using numpy. This time we will use linspace.
###Code
x = np.linspace(0, 2*np.pi, 100)
print (x[-1], 2*np.pi)
y = np.sin(x)
z = np.cos(x)
w = np.sin(4*x)
v = np.cos(4*x)
###Output
_____no_output_____
###Markdown
Now, let's make a two panel plot side-by-side.
###Code
# call subplots to generate a multipanel figure. This means 1 row, 2 columns of figures
f, axarr = plt.subplots(1, 2)
# treat axarr as an array, from left to right
# first panel
axarr[0].plot(x, y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('sin(x)')
axarr[0].set_title(r'$\sin(x)$') #latech using a math mode, two dollar signs make it a special plot, fancy sin(x)
# second panel
axarr[1].plot(x, z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$\cos(x)$')
###Output
_____no_output_____
###Markdown
Here we can see that matplotlib has the panels too close together.We can adjust that using the subplot_adjust() functions. Enables use to move the panels further apart/closer together.
###Code
# call subplots to generate a multipanel figure. This means 1 row, 2 columns of figures
f, axarr = plt.subplots(1, 2)
# treat axarr as an array, from left to right
# first panel
axarr[0].plot(x, y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('sin(x)')
axarr[0].set_title(r'$\sin(x)$') #latech using a math mode, two dollar signs make it a special plot, fancy sin(x)
# second panel
axarr[1].plot(x, z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$\cos(x)$')
#add more space between the figures
f.subplots_adjust(wspace = 0.4) #using hspace will put space between them if they were stacked vertically
###Output
_____no_output_____
###Markdown
Set_aspect can be used to change the axis ratio.The axis ratios are squished so we can fix that.
###Code
# call subplots to generate a multipanel figure. This means 1 row, 2 columns of figures
f, axarr = plt.subplots(1, 2)
# treat axarr as an array, from left to right
# first panel
axarr[0].plot(x, y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('sin(x)')
axarr[0].set_title(r'$\sin(x)$') #latech using a math mode, two dollar signs make it a special plot, fancy sin(x)
# second panel
axarr[1].plot(x, z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$\cos(x)$')
#add more space between the figures
f.subplots_adjust(wspace = 0.4) #using hspace will put space between them if they were stacked vertically
#fix the axis ratio
#here are two possible options
axarr[0].set_aspect('equal') #make the ratio of the tick units equal, a bit counter intuitive
axarr[1].set_aspect(np.pi) #make a square by setting the aspect to be the ratio of the tick unit range
###Output
_____no_output_____
###Markdown
Legends are an easy way to notate a complicated figure.Let's keep the square figure, merge them into one, remove the titles and add legends.
###Code
#adjust the size of the figure
fig = plt.figure(figsize=(6,6))
plt.plot(x, y, label=r'$y =\sin(x)$') #add a label to the line
plt.plot(x, z, label=r'$y =\cos(x)$') #add a label to the second line
plt.plot(x, w, label=r'$y =\sin(4x)$') #add a label to the third line
plt.plot(x, v, label=r'$y =\cos(4x)$') #add a label to the fourth line
plt.xlabel(r'$x$') #note set_xlabel vs. xlabel
plt.ylabel(r'$y(x)$') #note set_ylabel vs. ylabel
plt.xlim ([0, 2*np.pi]) #note set_xlim vs. xlim
plt.ylim ([-1.2,1.2]) #note set_ylim vs. ylim
plt.legend (loc=1, framealpha=0.95) #add a legend with a semi-transparent fram in the upper RH corner
#fix the axis ratio
plt.gca().set_aspect(np.pi/1.2) #use "gca" to get current axis()
###Output
_____no_output_____ |
files/Review Generator.ipynb | ###Markdown
Boy Names:
###Code
boy_names = """""""""Liam
Noah
Oliver
Elijah
William
James
Benjamin
Lucas
Henry
Alexander
Mason
Michael
Ethan
Daniel
Jacob
Logan
Jackson
Levi
Sebastian
Mateo
Jack
Owen
Theodore
Aiden
Samuel
Joseph
John
David
Wyatt
Matthew
Luke
Asher
Carter
Julian
Grayson
Leo
Jayden
Gabriel
Isaac
Lincoln
Anthony
Hudson
Dylan
Ezra
Thomas
Charles
Christopher
Jaxon
Maverick
Josiah
Isaiah
Andrew
Elias
Joshua
Nathan
Caleb
Ryan
Adrian
Miles
Eli
Nolan
Christian
Aaron
Cameron
Ezekiel
Colton
Luca
Landon
Hunter
Jonathan
Santiago
Axel
Easton
Cooper
Jeremiah
Angel
Roman
Connor
Jameson
Robert
Greyson
Jordan
Ian
Carson
Jaxson
Leonardo
Nicholas
Dominic
Austin
Everett
Brooks
Xavier
Kai
Jose
Parker
Adam
Jace
Wesley
Kayden
Silas
Bennett
Declan
Waylon
Weston
Evan
Emmett
Micah
Ryder
Beau
Damian
Brayden
Gael
Rowan
Harrison
Bryson
Sawyer
Amir
Kingston
Jason
Giovanni
Vincent
Ayden
Chase
Myles
Diego
Nathaniel
Legend
Jonah
River
Tyler
Cole
Braxton
George
Milo
Zachary
Ashton
Luis
Jasper
Kaiden
Adriel
Gavin
Bentley
Calvin
Zion
Juan
Maxwell
Max
Ryker
Carlos
Emmanuel
Jayce
Lorenzo
Ivan
Jude
August
Kevin
Malachi
Elliott
Rhett
Archer
Karter
Arthur
Luka
Elliot
Thiago
Brandon
Camden
Justin
Jesus
Maddox
King
Theo
Enzo
Matteo
Emiliano
Dean
Hayden
Finn
Brody
Antonio
Abel
Alex
Tristan
Graham
Zayden
Judah
Xander
Miguel
Atlas
Messiah
Barrett
Tucker
Timothy
Alan
Edward
Leon
Dawson
Eric
Ace
Victor
Abraham
Nicolas
Jesse
Charlie
Patrick
Walker
Joel
Richard
Beckett
Blake
Alejandro
Avery
Grant
Peter
Oscar
Matias
Amari
Lukas
Andres
Arlo
Colt
Adonis
Kyrie
Steven
Felix
Preston
Marcus
Holden
Emilio
Remington
Jeremy
Kaleb
Brantley
Bryce
Mark
Knox
Israel
Phoenix
Kobe
Nash
Griffin
Caden
Kenneth
Kyler
Hayes
Jax
Rafael
Beckham
Javier
Maximus
Simon
Paul
Omar
Kaden
Kash
Lane
Bryan
Riley
Zane
Louis
Aidan
Paxton
Maximiliano
Karson
Cash
Cayden
Emerson
Tobias
Ronan
Brian
Dallas
Bradley
Jorge
Walter
Josue
Khalil
Damien
Jett
Kairo
Zander
Andre
Cohen
Crew
Hendrix
Colin
Chance
Malakai
Clayton
Daxton
Malcolm
Lennox
Martin
Jaden
Kayson
Bodhi
Francisco
Cody
Erick
Kameron
Atticus
Dante
Jensen
Cruz
Finley
Brady
Joaquin
Anderson
Gunner
Muhammad
Zayn
Derek
Raymond
Kyle
Angelo
Reid
Spencer
Nico
Jaylen
Jake
Prince
Manuel
Ali
Gideon
Stephen
Ellis
Orion
Rylan
Eduardo
Mario
Rory
Cristian
Odin
Tanner
Julius
Callum
Sean
Kane
Ricardo
Travis
Wade
Warren
Fernando
Titus
Leonel
Edwin
Cairo
Corbin
Dakota
Ismael
Colson
Killian
Major
Tate
Gianni
Elian
Remy
Lawson
Niko
Nasir
Kade
Armani
Ezequiel
Marshall
Hector
Desmond
Kason
Garrett
Jared
Cyrus
Russell
Cesar
Tyson
Malik
Donovan
Jaxton
Cade
Romeo
Nehemiah
Sergio
Iker
Caiden
Jay
Pablo
Devin
Jeffrey
Otto
Kamari
Ronin
Johnny
Clark
Ari
Marco
Edgar
Bowen
Jaiden
Grady
Zayne
Sullivan
Jayceon
Sterling
Andy
Conor
Raiden
Royal
Royce
Solomon
Trevor
Winston
Emanuel
Finnegan
Pedro
Luciano
Harvey
Franklin
Noel
Troy
Princeton
Johnathan
Erik
Fabian
Oakley
Rhys
Porter
Hugo
Frank
Damon
Kendrick
Mathias
Milan
Peyton
Wilder
Callan
Gregory
Seth
Matthias
Briggs
Ibrahim
Roberto
Conner
Quinn
Kashton
Sage
Santino
Kolton
Alijah
Dominick
Zyaire
Apollo
Kylo
Reed
Philip
Kian
Shawn
Kaison
Leonidas
Ayaan
Lucca
Memphis
Ford
Baylor
Kyson
Uriel
Allen
Collin
Ruben
Archie
Dalton
Esteban
Adan
Forrest
Alonzo
Isaias
Leland
Jase
Dax
Kasen
Gage
Kamden
Marcos
Jamison
Francis
Hank
Alexis
Tripp
Frederick
Jonas
Stetson
Cassius
Izaiah
Eden
Maximilian
Rocco
Tatum
Keegan
Aziel
Moses
Bruce
Lewis
Braylen
Omari"""
boy_names = boy_names.replace(" ","")
boy_names = boy_names.split("\n")
#print(boy_names)
###Output
_____no_output_____
###Markdown
Girl Names:
###Code
girl_names = """""""""Olivia
Emma
Ava
Charlotte
Sophia
Amelia
Isabella
Mia
Evelyn
Harper
Camila
Gianna
Abigail
Luna
Ella
Elizabeth
Sofia
Emily
Avery
Mila
Scarlett
Eleanor
Madison
Layla
Penelope
Aria
Chloe
Grace
Ellie
Nora
Hazel
Zoey
Riley
Victoria
Lily
Aurora
Violet
Nova
Hannah
Emilia
Zoe
Stella
Everly
Isla
Leah
Lillian
Addison
Willow
Lucy
Paisley
Natalie
Naomi
Eliana
Brooklyn
Elena
Aubrey
Claire
Ivy
Kinsley
Audrey
Maya
Genesis
Skylar
Bella
Aaliyah
Madelyn
Savannah
Anna
Delilah
Serenity
Caroline
Kennedy
Valentina
Ruby
Sophie
Alice
Gabriella
Sadie
Ariana
Allison
Hailey
Autumn
Nevaeh
Natalia
Quinn
Josephine
Sarah
Cora
Emery
Samantha
Piper
Leilani
Eva
Everleigh
Madeline
Lydia
Jade
Peyton
Brielle
Adeline
Vivian
Rylee
Clara
Raelynn
Melanie
Melody
Julia
Athena
Maria
Liliana
Hadley
Arya
Rose
Reagan
Eliza
Adalynn
Kaylee
Lyla
Mackenzie
Alaia
Isabelle
Charlie
Arianna
Mary
Remi
Margaret
Iris
Parker
Ximena
Eden
Ayla
Kylie
Elliana
Josie
Katherine
Faith
Alexandra
Eloise
Adalyn
Amaya
Jasmine
Amara
Daisy
Reese
Valerie
Brianna
Cecilia
Andrea
Summer
Valeria
Norah
Ariella
Esther
Ashley
Emerson
Aubree
Isabel
Anastasia
Ryleigh
Khloe
Taylor
Londyn
Lucia
Emersyn
Callie
Sienna
Blakely
Kehlani
Genevieve
Alina
Bailey
Juniper
Maeve
Molly
Harmony
Georgia
Magnolia
Catalina
Freya
Juliette
Sloane
June
Sara
Ada
Kimberly
River
Ember
Juliana
Aliyah
Millie
Brynlee
Teagan
Morgan
Jordyn
London
Alaina
Olive
Rosalie
Alyssa
Ariel
Finley
Arabella
Journee
Hope
Leila
Alana
Gemma
Vanessa
Gracie
Noelle
Marley
Elise
Presley
Kamila
Zara
Amy
Kayla
Payton
Blake
Ruth
Alani
Annabelle
Sage
Aspen
Laila
Lila
Rachel
Trinity
Daniela
Alexa
Lilly
Lauren
Elsie
Margot
Adelyn
Zuri
Brooke
Sawyer
Lilah
Lola
Selena
Mya
Sydney
Diana
Ana
Vera
Alayna
Nyla
Elaina
Rebecca
Angela
Kali
Alivia
Raegan
Rowan
Phoebe
Camilla
Joanna
Malia
Vivienne
Dakota
Brooklynn
Evangeline
Camille
Jane
Nicole
Catherine
Jocelyn
Julianna
Lena
Lucille
Mckenna
Paige
Adelaide
Charlee
Mariana
Myla
Mckenzie
Tessa
Miriam
Oakley
Kailani
Alayah
Amira
Adaline
Phoenix
Milani
Annie
Lia
Angelina
Harley
Cali
Maggie
Hayden
Leia
Fiona
Briella
Journey
Lennon
Saylor
Jayla
Kaia
Thea
Adriana
Mariah
Juliet
Oaklynn
Kiara
Alexis
Haven
Aniyah
Delaney
Gracelynn
Kendall
Winter
Lilith
Logan
Amiyah
Evie
Alexandria
Gracelyn
Gabriela
Sutton
Harlow
Madilyn
Makayla
Evelynn
Gia
Nina
Amina
Giselle
Brynn
Blair
Amari
Octavia
Michelle
Talia
Demi
Alaya
Kaylani
Izabella
Fatima
Tatum
Makenzie
Lilliana
Arielle
Palmer
Melissa
Willa
Samara
Destiny
Dahlia
Celeste
Ainsley
Rylie
Reign
Laura
Adelynn
Gabrielle
Remington
Wren
Brinley
Amora
Lainey
Collins
Lexi
Aitana
Alessandra
Kenzie
Raelyn
Elle
Everlee
Haisley
Hallie
Wynter
Daleyza
Gwendolyn
Paislee
Ariyah
Veronica
Heidi
Anaya
Cataleya
Kira
Avianna
Felicity
Aylin
Miracle
Sabrina
Lana
Ophelia
Elianna
Royalty
Madeleine
Esmeralda
Joy
Kalani
Esme
Jessica
Leighton
Ariah
Makenna
Nylah
Viviana
Camryn
Cassidy
Dream
Luciana
Maisie
Stevie
Kate
Lyric
Daniella
Alicia
Daphne
Frances
Charli
Raven
Paris
Nayeli
Serena
Heaven
Bianca
Helen
Hattie
Averie
Mabel
Selah
Allie
Marlee
Kinley
Regina
Carmen
Jennifer
Jordan
Alison
Stephanie
Maren
Kayleigh
Angel
Annalise
Jacqueline
Braelynn
Emory
Rosemary
Scarlet
Amanda
Danielle
Emelia
Ryan
Carolina
Astrid
Kensley
Shiloh
Maci
Francesca
Rory
Celine
Kamryn
Zariah
Liana
Poppy
Maliyah
Keira
Skyler
Noa
Skye
Nadia
Addilyn
Rosie
Eve
Sarai
Edith
Jolene
Maddison
Meadow
Charleigh
Matilda
Elliott
Madelynn
Bergen
Leona
Azalea
Katie
Mira
Ari
Kaitlyn
Danna
Cameron
Kyla
Bristol
Kora
Armani
Nia
Malani
Dylan
Remy
Maia
Dior
Legacy"""
girl_names = girl_names.replace(" ","")
girl_names = girl_names.split("\n")
#print(girl_names)
###Output
_____no_output_____
###Markdown
Last Names:
###Code
last_names = """""""""
Smith
Johnson
Williams
Brown
Jones
Garcia
Miller
Davis
Rodriguez
Martinez
Hernandez
Lopez
Gonzalez
Wilson
Anderson
Thomas
Taylor
Moore
Jackson
Martin
Lee
Perez
Thompson
White
Harris
Sanchez
Clark
Ramirez
Lewis
Robinson
Walker
Young
Allen
King
Wright
Scott
Torres
Nguyen
Hill
Flores
Green
Adams
Nelson
Baker
Hall
Rivera
Campbell
Mitchell
Carter
Roberts
Gomez
Phillips
Evans
Turner
Diaz
Parker
Cruz
Edwards
Collins
Reyes
Stewart
Morris
Morales
Murphy
Cook
Rogers
Gutierrez
Ortiz
Morgan
Cooper
Peterson
Bailey
Reed
Kelly
Howard
Ramos
Kim
Cox
Ward
Richardson
Watson
Brooks
Chavez
Wood
James
Bennett
Gray
Mendoza
Ruiz
Hughes
Price
Alvarez
Castillo
Sanders
Patel
Myers
Long
Ross
Foster
Jimenez
Powell
Jenkins
Perry
Russell
Sullivan
Bell
Coleman
Butler
Henderson
Barnes
Gonzales
Fisher
Vasquez
Simmons
Romero
Jordan
Patterson
Alexander
Hamilton
Graham
Reynolds
Griffin
Wallace
Moreno
West
Cole
Hayes
Bryant
Herrera
Gibson
Ellis
Tran
Medina
Aguilar
Stevens
Murray
Ford
Castro
Marshall
Owens
Harrison
Fernandez
Mcdonald
Woods
Washington
Kennedy
Wells
Vargas
Henry
Chen
Freeman
Webb
Tucker
Guzman
Burns
Crawford
Olson
Simpson
Porter
Hunter
Gordon
Mendez
Silva
Shaw
Snyder
Mason
Dixon
Munoz
Hunt
Hicks
Holmes
Palmer
Wagner
Black
Robertson
Boyd
Rose
Stone
Salazar
Fox
Warren
Mills
Meyer
Rice
Schmidt
Garza
Daniels
Ferguson
Nichols
Stephens
Soto
Weaver
Ryan
Gardner
Payne
Grant
Dunn
Kelley
Spencer
Hawkins
Arnold
Pierce
Vazquez
Hansen
Peters
Santos
Hart
Bradley
Knight
Elliott
Cunningham
Duncan
Armstrong
Hudson
Carroll
Lane
Riley
Andrews
Alvarado
Ray
Delgado
Berry
Perkins
Hoffman
Johnston
Matthews
Pena
Richards
Contreras
Willis
Carpenter
Lawrence
Sandoval
Guerrero
George
Chapman
Rios
Estrada
Ortega
Watkins
Greene
Nunez
Wheeler
Valdez
Harper
Burke
Larson
Santiago
Maldonado
Morrison
Franklin
Carlson
Austin
Dominguez
Carr
Lawson
Jacobs
Obrien
Lynch
Singh
Vega
Bishop
Montgomery
Oliver
Jensen
Harvey
Williamson
Gilbert
Dean
Sims
Espinoza
Howell
Li
Wong
Reid
Hanson
Le
Mccoy
Garrett
Burton
Fuller
Wang
Weber
Welch
Rojas
Lucas
Marquez
Fields
Park
Yang
Little
Banks
Padilla
Day
Walsh
Bowman
Schultz
Luna
Fowler
Mejia
Davidson
Acosta
Brewer
–221
Holland
Juarez
Newman
Pearson
Curtis
Cortez
Douglas
Schneider
Joseph
Barrett
Navarro
Figueroa
Keller
Avila
Wade
Molina
Stanley
Hopkins
Campos
Barnett
Bates
Chambers
Caldwell
Beck
Lambert
Miranda
Byrd
Craig
Ayala
Lowe
Frazier
Powers
Neal
Leonard
Gregory
Carrillo
Sutton
Fleming
Rhodes
Shelton
Schwartz
Norris
Jennings
Watts
Duran
Walters
Cohen
Mcdaniel
Moran
Parks
Steele
Vaughn
Becker
Holt
Deleon
Barker
Terry
Hale
Leon
Hail
Benson
Haynes
Horton
Miles
Lyons
Pham
Graves
Bush
Thornton
Wolfe
Warner
Cabrera
Mckinney
Mann
Zimmerman
Dawson
Lara
Fletcher
Page
Mccarthy
Love
Robles
Cervantes
Solis
Erickson
Reeves
Chang
Klein
Salinas
Fuentes
Baldwin
Daniel
Simon
Velasquez
Hardy
Higgins
Aguirre
Lin
Cummings
Chandler
Sharp
Barber
Bowen
Ochoa
Dennis
Robbins
Liu
Ramsey
Francis
Griffith
Paul
Blair
Oconnor
Cardenas
Pacheco
Cross
Calderon
Quinn
Moss
Swanson
Chan
Rivas
Khan
Rodgers
Serrano
Fitzgerald
Rosales
Stevenson
Christensen
Manning
Gill
Curry
Mclaughlin
Harmon
Mcgee
Gross
Doyle
Garner
Newton
Burgess
Reese
Walton
Blake
Trujillo
Adkins
Brady
Goodman
Roman
Webster
Goodwin
Fischer
Huang
Potter
Delacruz
Montoya
Todd
Wu
Hines
Mullins
Castaneda
Malone
Cannon
Tate
Mack
Sherman
Hubbard
Hodges
Zhang
Guerra
Wolf
Valencia
Franco
Saunders
Rowe
Gallagher
Farmer
Hammond
Hampton
Townsend
Ingram
Wise
Gallegos
Clarke
Barton
Schroeder
Maxwell
Waters
Logan
Camacho
Strickland
Norman
Person
Colon
Parsons
Frank
Harrington
Glover
Osborne
Buchanan
Casey
Floyd
Patton
Ibarra
Ball
Tyler
Suarez
Bowers
Orozco
Salas
Cobb
Gibbs
Andrade
Bauer
Conner
Moody
Escobar
Mcguire
Lloyd
Mueller
Hartman
French
Kramer
Mcbride
Pope
Lindsey
Velazquez
Norton
Mccormick
Sparks
Flynn
Yates
Hogan
Marsh
Macias
Villanueva
Zamora
Pratt
Stokes
Owen
Ballard
Lang
Brock
Villarreal
Charles
Drake
Barrera
Cain
Patrick
Pineda
Burnett
Mercado
Santana
Shepherd
Bautista
Ali
Shaffer
Lamb
Trevino
Mckenzie
Hess
Beil
Olsen
Cochran
Morton
Nash
Wilkins
Petersen
Briggs
Shah
Roth
Nicholson
Holloway
Lozano
Flowers
Rangel
Hoover
Arias
Short
Mora
Valenzuela
Bryan
Meyers
Weiss
Underwood
Bass
Greer
Summers
Houston
Carson
Morrow
Clayton
Whitaker
Decker
Yoder
Collier
Zuniga
Carey
Wilcox
Melendez
Poole
Roberson
Larsen
Conley
Davenport
Copeland
Massey
Lam
Huff
Rocha
Cameron
Jefferson
Hood
Monroe
Anthony
Pittman
Huynh
Randall
Singleton
Kirk
Combs
Mathis
Christian
Skinner
Bradford
Richard
Galvan
Wall
Boone
Kirby
Wilkinson
Bridges
Bruce
Atkinson
Velez
Meza
Roy
Vincent
York
Hodge
Villa
Abbott
Allison
Tapia
Gates
Chase
Sosa
Sweeney
Farrell
Wyatt
Dalton
Horn
Barron
Phelps
Yu
Dickerson
Heath
Foley
Atkins
Mathews
Bonilla
Acevedo
Benitez
Zavala
Hensley
Glenn
Cisneros
Harrell
Shields
Rubio
Choi
Huffman
Boyer
Garrison
Arroyo
Bond
Kane
Hancock
Callahan
Dillon
Cline
Wiggins
Grimes
Arellano
Melton
Oneill
Savage
Ho
Beltran
Pitts
Parrish
Ponce
Rich
Booth
Koch
Golden
Ware
Brennan
Mcdowell
Marks
Cantu
Humphrey
Baxter
Sawyer
Clay
Tanner
Hutchinson
Kaur
Berg
Wiley
Gilmore
Russo
Villegas
Hobbs
Keith
Wilkerson
Ahmed
Beard
Mcclain
Montes
Mata
Rosario
Vang
Walter
Henson
Oneal
Mosley
Mcclure
Beasley
Stephenson
Snow
Huerta
Preston
Vance
Barry
Johns
Eaton
Blackwell
Dyer
Prince
Macdonald
Solomon
Guevara
Stafford
English
Hurst
Woodard
Cortes
Shannon
Kemp
Nolan
Mccullough
Merritt
Murillo
Moon
Salgado
Strong
Kline
Cordova
Barajas
Roach
Rosas
Winters
Jacobson
Lester
Knox
Bullock
Kerr
Leach
Meadows
Davila
Orr
Whitehead
Pruitt
Kent
Conway
Mckee
Barr
David
Dejesus
Marin
Berger
Mcintyre
Blankenship
Gaines
Palacios
Cuevas
Bartlett
Durham
Dorsey
Mccall
Odonnell
Stein
Browning
Stout
Lowery
Sloan
Mclean
Hendricks
Calhoun
Sexton
Chung
Gentry
Hull
Duarte
Ellison
Nielsen
Gillespie
Buck
Middleton
Sellers
Leblanc
Esparza
Hardin
Bradshaw
Mcintosh
Howe
Livingston
Frost
Glass
Morse
Knapp
Herman
Stark
Bravo
Noble
Spears
Weeks
Corona
Frederick
Buckley
Mcfarland
Hebert
Enriquez
Hickman
Quintero
Randolph
Schaefer
Walls
Trejo
House
Reilly
Pennington
Michael
Conrad
Giles
Benjamin
Crosby
Fitzpatrick
Donovan
Mays
Mahoney
Valentine
Raymond
Medrano
Hahn
Mcmillan
Small
Bentley
Felix
Peck
Lucero
Boyle
Hanna
Pace
Rush
Hurley
Harding
Mcconnell
Bernal
Nava
Ayers
Everett
Ventura
Avery
Pugh
Mayer
Bender
Shepard
Mcmahon
Landry
Case
Sampson
Moses
Magana
Blackburn
Dunlap
Gould
Duffy
Vaughan
Herring
Mckay
Espinosa
Rivers
Farley
Bernard
Ashley
Friedman
Potts
Truong
Costa
Correa
Blevins
Nixon
Clements
Fry
Delarosa
Best
Benton
Lugo
Portillo
Dougherty
Crane
Haley
Phan
Villalobos
Blanchard
Horne
Finley
Quintana
Lynn
Esquivel
Bean
Dodson
Mullen
Xiong
Hayden
Cano
Levy
Huber
Richmond
Moyer
Lim
Frye
Sheppard
Mccarty
Avalos
Booker
Waller
Parra
Woodward
Jaramillo
Krueger
Rasmussen
Brandt
Peralta
Donaldson
Stuart
Faulkner
Maynard
Galindo
Coffey
Estes
Sanford
Burch
Maddox
Vo
Oconnell
Vu
Andersen
Spence
Mcpherson
Church
Schmitt
Stanton
Leal
Cherry
Compton
Dudley
Sierra
Pollard
Alfaro
Hester
Proctor
Lu
Hinton
Novak
Good
Madden
Mccann
Terrell
Jarvis
Dickson
Reyna
Cantrell
Mayo
Branch
Hendrix
Rollins
Rowland
Whitney
Duke
Odom
Daugherty
Travis
Tang
Archer
"""
last_names = last_names.replace(" ","")
last_names = last_names.split("\n")
#print(last_names)
###Output
_____no_output_____
###Markdown
Sentence Generator
###Code
import random
for i in range(40):
sent_count_index = random.randint(0,10)
sent_count = [1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3]
sent_count = sent_count[sent_count_index]
#print(sent_count)
sent_struct_index = [1, 2, 3] # we loved the product; it's so affordable and effective; works great
sent_struct = []
for j in range(sent_count):
index = random.randint(0,len(sent_struct_index)-1)
sent_struct.append(sent_struct_index[index])
sent_struct_index.pop(index)
#print(sent_struct)
sentence = ""
for j in range(len(sent_struct)):
product = [" the product", " insert_product_name", " it"]
if sent_struct[j] == 1: # we loved the product
person = ["we", "we", "we", "we", "we", "we", "we", "we", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "i", "my friend referred me and I", "my friend told me about this brand and I", "my best friend", "my husband", "my mom", "my dad", "my brother", "my sister", "my wife"]
person = person[random.randint(0,len(person)-1)]
verb_num = random.randint(1,3)
verb1 = [" loved", " enjoyed", " recommended", " liked", " appreciated"]
verb2 = [" thought"]
if verb_num != 2:
verb = verb1[random.randint(0,len(verb1)-1)]
if verb_num == 2:
verb = verb2[random.randint(0,len(verb2)-1)]
product = product[random.randint(0, len(product)-1)]
sentence1 = person + verb + product
if verb_num == 2:
verb_end1 = [" was", " is"]
verb_end2 = [" awesome", " great"]
verb_end = verb_end1[random.randint(0, len(verb_end1)-1)] + verb_end2[random.randint(0, len(verb_end2)-1)]
sentence1 += verb_end
elif random.randint(0,3) < 3:
end = [" so much", " a lot"]
end = end[random.randint(0, len(end)-1)]
sentence1 += end
sentence1 = sentence1[0].upper() + sentence1[1:]
punctuation = [".", "!"]
punctuation = punctuation[random.randint(0, len(punctuation)-1)]
sentence1 += punctuation
#print(sentence1)
sentence += " "
sentence += sentence1
if sent_struct[j] == 2: # it's so affordable and effective
product = product[random.randint(0, len(product)-1)]
if random.randint(0,2) < 2:
adverb = [" so", " very"]
adverb = adverb[random.randint(0, len(adverb)-1)]
adj_num = random.randint(1,3)
adjs = [" affordable", " effective", " easy to use", " helpful", " fantastic", " satisfying", " efficient"]
adj = adjs[random.randint(0, len(adjs)-1)]
if adj_num == 2:
adjs.remove(adj)
adj += " and"
adj += adjs[random.randint(0, len(adjs)-1)]
if adj_num == 3:
adjs.remove(adj)
adj += ","
temp = adjs[random.randint(0, len(adjs)-1)]
adj += temp
adjs.remove(temp)
just = ""
if random.randint(0,10) == 10:
just += " just"
adj += ", and" + just + adjs[random.randint(0, len(adjs)-1)]
verb = ["'s", " is", " was"]
verb = verb[random.randint(0, len(verb)-1)]
sentence2 = product + verb + adj
sentence2 = sentence2[1:]
sentence2 = sentence2[0].upper() + sentence2[1:]
punctuation = [".", "!"]
punctuation = punctuation[random.randint(0, len(punctuation)-1)]
sentence2 += punctuation
#print(sentence2)
sentence += " "
sentence += sentence2
if sent_struct[j] == 3: # works great
sentence3 = ["works great", "works super quick", "works fast", "works so fast", "good quality", "great quality", "love it", "loved it", "can't believe how good it is", "remarkable", "super effective"]
sentence3 = sentence3[random.randint(0, len(sentence3)-1)]
sentence3 = sentence3[0].upper() + sentence3[1:]
punctuation = [".", "!"]
punctuation = punctuation[random.randint(0, len(punctuation)-1)]
sentence3 += punctuation
sentence += " "
sentence += sentence3
if random.randint(0,20) == 20:
rating = [" 9/10", " 10/10", " 5 stars", " 5 stars", " You need to try this brand", " You gotta try them", " These are a must buy" " 11/10", " Amazing", " Awesome", " I'm telling all my friends"]
rating = rating[random.randint(0, len(rating)-1)]
if random.randint(0,3) == 0:
rating += "!"
sentence += rating
sentence = sentence[1:]
girl_boy_index = random.randint(1,2)
if girl_boy_index == 1: # boy
boy_index = random.randint(0,len(boy_names)-1)
name = boy_names[boy_index]
if girl_boy_index == 2: # girl
girl_index = random.randint(0,len(girl_names)-1)
name = girl_names[girl_index]
if random.randint(1,10) < 7:
last_index = random.randint(0,len(last_names)-1)
name += " " + last_names[last_index]
sentence += "\n -" + name + "\n"
print(sentence)
###Output
It was fantastic, helpful, and effective.
-Rosie
Can't believe how good it is.
-Vanessa
I thought the product is great.
-Austin Aguirre
My sister thought the product is great!
-Paisley Stafford
Insert_product_name was efficient.
-Tate Byrd
We enjoyed insert_product_name a lot! The product was easy to use.
-Karson Zamora
Insert_product_name's effective, satisfying, and fantastic!
-Sullivan Rowland
The product is fantastic, efficient, and effective. Works fast!
-Lilly
My friend told me about this brand and I thought insert_product_name is great! It's effective.
-Elizabeth
It's easy to use, helpful, and effective! My sister thought it is great!
-Parker
It was effective. We thought it was awesome!
-Josephine
We appreciated it a lot.
-Chance
Works super quick! I appreciated insert_product_name so much.
-Mabel
Great quality! I thought it is great! The product is helpful, efficient, and easy to use!
-Joaquin Sosa
The product is effective and affordable. I appreciated insert_product_name.
-Jeremiah
We thought the product was awesome!
-Riley Reeves
Loved it. It is affordable and efficient!
-Juliette
Insert_product_name is effective and affordable!
-Nia
Works fast. Insert_product_name is efficient.
-Theo
Can't believe how good it is!
-Karter
Loved it! Insert_product_name's effective and helpful!
-Mathias Walter
The product is effective, helpful, and efficient. Super effective! I recommended the product a lot!
-Carolina
Love it.
-Lillian
We recommended it a lot. Super effective! Insert_product_name was affordable and fantastic.
-Collins Clayton
Loved it!
-Gael Zavala
We appreciated the product so much! Good quality.
-Ronan
Works super quick. I loved it. It is effective!
-Atlas Collins
Super effective!
-Jorge Davis
Works great! I recommended the product a lot.
-Eric
My sister thought the product was great. Can't believe how good it is. The product is satisfying!
-Ivy Sheppard
We thought it was great!
-Makenna
Insert_product_name's affordable, helpful, and effective!
-Savannah
I thought insert_product_name is awesome. Insert_product_name is efficient.
-Lyla Dalton
My husband appreciated insert_product_name. The product is satisfying and fantastic! Loved it.
-Felix
It is fantastic, effective, and satisfying.
-Cassius
Works super quick! The product is easy to use and satisfying.
-Felicity Woodward
It is easy to use. Super effective!
-Everly Becker
Insert_product_name was easy to use! I appreciated insert_product_name a lot. Great quality!
-Kyrie Steele
It is effective! I thought it is great. Works super quick!
-Lena Herring
Insert_product_name was efficient. I thought the product was great.
-Prince
|
Chapter7/online_driftdetc_PWPAE/globecom2021_PWPAE_IoTID20.ipynb | ###Markdown
PWPAE: An Ensemble Framework for Concept Drift Adaptation in IoT Data StreamsThis is the code for the paper entitled "**PWPAE: An Ensemble Framework for Concept Drift Adaptation in IoT Data Streams**" accepted in 2021 IEEE Global Communications Conference (GLOBECOM). Authors: Li Yang ([email protected]), Dimitrios Michael Manias ([email protected]), and Abdallah Shami ([email protected]) Organization: The Optimized Computing and Communications (OC2) Lab, ECE Department, Western UniversityL. Yang, D. M. Manias, and A. Shami, “PWPAE: An Ensemble Framework for Concept Drift Adaptation in IoT Data Streams,” in 2021 IEEE Glob. Commun. Conf. (GLOBECOM), Madrid, Spain, Dec. 2021. Import libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score, precision_score, recall_score, f1_score
import lightgbm as lgb
import time
###Output
_____no_output_____
###Markdown
Read the sampled IoTIDS20 dataset
###Code
df = pd.read_csv("./data/IoT_2020_b_0.01_fs.csv")
# df = df.sample(n=None, frac=0.1, replace=False, weights=None, random_state=None, axis=0)
# df = df.sort_index()
###Output
_____no_output_____
###Markdown
Train-test split10% training set, and 90% test set
###Code
X = df.drop(['Label'],axis=1)
y = df['Label']
X_train, X_test, y_train, y_test = train_test_split(X,y, train_size = 0.1, test_size = 0.9, shuffle=False,random_state = 0)
###Output
_____no_output_____
###Markdown
Online Learning Four base online learners for ensemble: * Adaptive Random Forest (ARF) model with ADWIN drift detector (ARF-ADWIN)* Adaptive Random Forest (ARF) model with DDM drift detector (ARF-DDM)* Streaming Random Patches (SRP) model with ADWIN drift detector (SRP-ADWIN)* Streaming Random Patches (SRP) model with DDM drift detector (SRP-DDM)Three other online learners for comparison:* Extremely Fast Decision Tree (EFDT)* Hoeffding Tree (HT)* Leveraging Bagging (LB)An ensemble online learner proposed in the paper:* Performance Weighted Probability Averaging Ensemble (PWPAE) * It combines the 4 base online learners by weighting them based on their accuracy and classification probabilities
###Code
# Import the online learning metrics and algorithms from the River library
from river import metrics
from river import stream
from river import tree,neighbors,naive_bayes,ensemble,linear_model
from river.drift import DDM, ADWIN
# Define a generic adaptive learning function
# The argument "model" means an online adaptive learning algorithm
def adaptive_learning(model, X_train, y_train, X_test, y_test):
metric = metrics.Accuracy() # Use accuracy as the metric
i = 0 # count the number of evaluated data points
t = [] # record the number of evaluated data points
m = [] # record the real-time accuracy
yt = [] # record all the true labels of the test set
yp = [] # record all the predicted labels of the test set
# Learn the training set
for xi1, yi1 in stream.iter_pandas(X_train, y_train):
model.learn_one(xi1,yi1)
# Predict the test set
for xi, yi in stream.iter_pandas(X_test, y_test):
y_pred= model.predict_one(xi) # Predict the test sample
model.learn_one(xi,yi) # Learn the test sample
metric = metric.update(yi, y_pred) # Update the real-time accuracy
t.append(i)
m.append(metric.get()*100)
yt.append(yi)
yp.append(y_pred)
i = i+1
print("Accuracy: "+str(round(accuracy_score(yt,yp),4)*100)+"%")
print("Precision: "+str(round(precision_score(yt,yp),4)*100)+"%")
print("Recall: "+str(round(recall_score(yt,yp),4)*100)+"%")
print("F1-score: "+str(round(f1_score(yt,yp),4)*100)+"%")
return t, m
###Output
_____no_output_____
###Markdown
Base model learning
###Code
# Define a figure function that shows the real-time accuracy changes
def acc_fig(t, m, name):
plt.rcParams.update({'font.size': 15})
plt.figure(1,figsize=(10,6))
sns.set_style("darkgrid")
plt.clf()
plt.plot(t,m,'-b',label='Avg Accuracy: %.2f%%'%(m[-1]))
plt.legend(loc='best')
plt.title(name+' on IoTID20 dataset', fontsize=15)
plt.xlabel('Number of samples')
plt.ylabel('Accuracy (%)')
plt.draw()
%%time
# Use the Adaptive Random Forest (ARF) model with ADWIN drift detector
name1 = "ARF-ADWIN model"
model1 = ensemble.AdaptiveRandomForestClassifier(n_models = 3, drift_detector = ADWIN()) # Define the model
t, m1 = adaptive_learning(model1, X_train, y_train, X_test, y_test) # Learn the model on the dataset
acc_fig(t, m1, name1) # Draw the figure of how the real-time accuracy changes with the number of samples
%%time
# Use the Adaptive Random Forest (ARF) model with DDM drift detector
name2 = "ARF-DDM model"
model2 = ensemble.AdaptiveRandomForestClassifier(n_models = 3, drift_detector = DDM()) # Define the model
t, m2 = adaptive_learning(model2, X_train, y_train, X_test, y_test) # Learn the model on the dataset
acc_fig(t, m2, name2) # Draw the figure of how the real-time accuracy changes with the number of samples
# %%time
# # Use the Streaming Random Patches (SRP) model with ADWIN drift detector
# name3 = "SRP-ADWIN model"
# model3 = ensemble.SRPClassifier(n_models = 3, drift_detector = ADWIN()) # Define the model
# t, m3 = adaptive_learning(model3, X_train, y_train, X_test, y_test) # Learn the model on the dataset
# acc_fig(t, m3, name3) # Draw the figure of how the real-time accuracy changes with the number of samples
%%time
# Use the Streaming Random Patches (SRP) model with DDM drift detector
name4 = "SRP-DDM model"
model4 = ensemble.SRPClassifier(n_models = 3, drift_detector = DDM()) # Define the model
t, m4 = adaptive_learning(model4, X_train, y_train, X_test, y_test) # Learn the model on the dataset
acc_fig(t, m4, name4) # Draw the figure of how the real-time accuracy changes with the number of samples
###Output
Accuracy: 98.54%
Precision: 98.9%
Recall: 99.57000000000001%
F1-score: 99.22999999999999%
CPU times: user 59.6 s, sys: 1.36 s, total: 1min
Wall time: 1min 53s
###Markdown
Comparison model learning
###Code
# %%time
# # Use the Extremely Fast Decision Tree (EFDT) model
# name5 = "EFDT model"
# model5 = tree.ExtremelyFastDecisionTreeClassifier() # Define the model
# t, m5 = adaptive_learning(model5, X_train, y_train, X_test, y_test) # Learn the model on the dataset
# acc_fig(t, m5, name5) # Draw the figure of how the real-time accuracy changes with the number of samples
%%time
# Use the Hoeffding Tree (HT) model
name6 = "HT model"
model6 = tree.HoeffdingTreeClassifier() # Define the model
t, m6 = adaptive_learning(model6, X_train, y_train, X_test, y_test) # Learn the model on the dataset
acc_fig(t, m6, name6) # Draw the figure of how the real-time accuracy changes with the number of samples
%%time
# Use the Leveraging Bagging (LB) model
name7 = "LB model"
model7 = ensemble.LeveragingBaggingClassifier(model=tree.HoeffdingTreeClassifier(),n_models=3) # Define the model
t, m7 = adaptive_learning(model7, X_train, y_train, X_test, y_test) # Learn the model on the dataset
acc_fig(t, m7, name7) # Draw the figure of how the real-time accuracy changes with the number of samples
###Output
Accuracy: 97.65%
Precision: 98.1%
Recall: 99.42999999999999%
F1-score: 98.76%
CPU times: user 1min 3s, sys: 1.86 s, total: 1min 5s
Wall time: 2min 37s
###Markdown
PWPAE ensemble model learning
###Code
# Define the Performance Weighted Probability Averaging Ensemble (PWPAE) model
def PWPAE(X_train, y_train, X_test, y_test):
# Record the real-time accuracy of PWPAE and 4 base learners
metric = metrics.Accuracy()
metric1 = metrics.Accuracy()
metric2 = metrics.Accuracy()
metric3 = metrics.Accuracy()
metric4 = metrics.Accuracy()
i=0
t = []
m = []
m1 = []
m2 = []
m3 = []
m4 = []
yt = []
yp = []
hat1 = ensemble.AdaptiveRandomForestClassifier(n_models=3) # ARF-ADWIN
hat2 = ensemble.SRPClassifier(n_models=3) # SRP-ADWIN
hat3 = ensemble.AdaptiveRandomForestClassifier(n_models=3,drift_detector=DDM(),warning_detector=DDM()) # ARF-DDM
hat4 = ensemble.SRPClassifier(n_models=3,drift_detector=DDM(),warning_detector=DDM()) # SRP-DDM
# The four base learners learn the training set
for xi1, yi1 in stream.iter_pandas(X_train, y_train):
hat1.learn_one(xi1,yi1)
hat2.learn_one(xi1,yi1)
hat3.learn_one(xi1,yi1)
hat4.learn_one(xi1,yi1)
# Predict the test set
for xi, yi in stream.iter_pandas(X_test, y_test):
# The four base learner predict the labels
y_pred1= hat1.predict_one(xi)
y_prob1= hat1.predict_proba_one(xi)
hat1.learn_one(xi,yi)
y_pred2= hat2.predict_one(xi)
y_prob2= hat2.predict_proba_one(xi)
hat2.learn_one(xi,yi)
y_pred3= hat3.predict_one(xi)
y_prob3= hat3.predict_proba_one(xi)
hat3.learn_one(xi,yi)
y_pred4= hat4.predict_one(xi)
y_prob4= hat4.predict_proba_one(xi)
hat4.learn_one(xi,yi)
# Record their real-time accuracy
metric1 = metric1.update(yi, y_pred1)
metric2 = metric2.update(yi, y_pred2)
metric3 = metric3.update(yi, y_pred3)
metric4 = metric4.update(yi, y_pred4)
# Calculate the real-time error rates of four base learners
e1 = 1-metric1.get()
e2 = 1-metric2.get()
e3 = 1-metric3.get()
e4 = 1-metric4.get()
ep = 0.001 # The epsilon used to avoid dividing by 0
# Calculate the weight of each base learner by the reciprocal of its real-time error rate
ea = 1/(e1+ep)+1/(e2+ep)+1/(e3+ep)+1/(e4+ep)
w1 = 1/(e1+ep)/ea
w2 = 1/(e2+ep)/ea
w3 = 1/(e3+ep)/ea
w4 = 1/(e4+ep)/ea
# Make ensemble predictions by the classification probabilities
if y_pred1 == 1:
ypro10=1-y_prob1[1]
ypro11=y_prob1[1]
else:
ypro10=y_prob1[0]
ypro11=1-y_prob1[0]
if y_pred2 == 1:
ypro20=1-y_prob2[1]
ypro21=y_prob2[1]
else:
ypro20=y_prob2[0]
ypro21=1-y_prob2[0]
if y_pred3 == 1:
ypro30=1-y_prob3[1]
ypro31=y_prob3[1]
else:
ypro30=y_prob3[0]
ypro31=1-y_prob3[0]
if y_pred4 == 1:
ypro40=1-y_prob4[1]
ypro41=y_prob4[1]
else:
ypro40=y_prob4[0]
ypro41=1-y_prob4[0]
# Calculate the final probabilities of classes 0 & 1 to make predictions
y_prob_0 = w1*ypro10+w2*ypro20+w3*ypro30+w4*ypro40
y_prob_1 = w1*ypro11+w2*ypro21+w3*ypro31+w4*ypro41
if (y_prob_0>y_prob_1):
y_pred = 0
y_prob = y_prob_0
else:
y_pred = 1
y_prob = y_prob_1
# Update the real-time accuracy of the ensemble model
metric = metric.update(yi, y_pred)
t.append(i)
m.append(metric.get()*100)
yt.append(yi)
yp.append(y_pred)
i=i+1
print("Accuracy: "+str(round(accuracy_score(yt,yp),4)*100)+"%")
print("Precision: "+str(round(precision_score(yt,yp),4)*100)+"%")
print("Recall: "+str(round(recall_score(yt,yp),4)*100)+"%")
print("F1-score: "+str(round(f1_score(yt,yp),4)*100)+"%")
return t, m
%%time
# Use the Performance Weighted Probability Averaging Ensemble (PWPAE) model
name = "Proposed PWPAE model"
t, m = PWPAE(X_train, y_train, X_test, y_test) # Learn the model on the dataset
acc_fig(t, m, name) # Draw the figure of how the real-time accuracy changes with the number of samples
###Output
Accuracy: 99.06%
Precision: 99.1%
Recall: 99.91%
F1-score: 99.5%
CPU times: user 2min 6s, sys: 2.47 s, total: 2min 8s
Wall time: 4min 10s
###Markdown
Model comparison
###Code
# Draw a comprehensive figure to compare the performance of all models
plt.rcParams.update({'font.size': 30})
plt.figure(1,figsize=(24,15))
sns.set_style("darkgrid")
plt.clf()
# Plot the accuracy change of each learner
plt.plot(t,m,'-r',label=name+', Avg Accuracy: %.2f%%'%(m[-1]))
plt.plot(t,m1,'-b',label=name1+', Avg Accuracy: %.2f%%'%(m1[-1]))
plt.plot(t,m2,'-g',label=name2+', Avg Accuracy: %.2f%%'%(m2[-1]))
#plt.plot(t,m3,'orange',label=name3+', Avg Accuracy: %.2f%%'%(m3[-1]))
plt.plot(t,m4,'black',label=name4+', Avg Accuracy: %.2f%%'%(m4[-1]))
#plt.plot(t,m5,'magenta',label=name5+', Avg Accuracy: %.2f%%'%(m5[-1]))
plt.plot(t,m6,'grey',label=name6+', Avg Accuracy: %.2f%%'%(m6[-1]))
plt.plot(t,m7,'brown',label=name7+', Avg Accuracy: %.2f%%'%(m7[-1]))
# Draw the drift points/time
dr = [0,270,600]
for i in range(len(dr)):
if i!=0:
plt.text(dr[i]-500, 100.8, 'Drift '+str(i), c = "red", fontsize = 25)
plt.vlines(dr[i], 0, 100, colors = "red", linewidth=4, linestyles = "dashed")
plt.legend(loc='lower right')
plt.ylim(85, 102)
plt.title('IoTID20', fontsize=40)
plt.xlabel('Number of samples')
plt.ylabel('Accuracy (%)')
plt.draw()
###Output
_____no_output_____ |
harness.ipynb | ###Markdown
Using 2a method with head size ratio
###Code
run(videosArray,videosHeadSizeRatio,'2a')
###Output
plotting for Videos\high-score-cat.mp4 with ratio 0.044 with threshold 14
###Markdown
Using 2b method with head size ratio
###Code
#run(videosArray,videosHeadSizeRatio,'2b') - this method is not working. the mag vlaues become really small (less than 2) and
# when we count the precentage of movement (for instance cells above 10, we get 0)
###Output
_____no_output_____
###Markdown
Using 2c method with head size ratio
###Code
run(videosArray,videosHeadSizeRatio,'2c')
###Output
plotting for Videos\high-score-cat.mp4 with ratio 0.044 with threshold 10
###Markdown
Using 2a method with relative head size
###Code
run(videosArray,videosHeadSizeRelativeRatio,'2a')
###Output
plotting for Videos\high-score-cat.mp4 with ratio 2 with threshold 18.0
###Markdown
Using 2b method with relative head size
###Code
# run(videosArray,videosHeadSizeRelativeRatio,'2b')
###Output
_____no_output_____
###Markdown
Using 2c method with relative head size
###Code
run(videosArray,videosHeadSizeRelativeRatio,'2c')
###Output
plotting for Videos\high-score-cat.mp4 with ratio 2 with threshold 10
###Markdown
Using 2a method with head size ratio with same scale
###Code
run(videosArray,videosHeadSizeRatio,'2a',1)
###Output
plotting for Videos\high-score-cat.mp4 with ratio 0.044 with threshold 14
###Markdown
Using 2c method with head size ratio with same scale
###Code
run(videosArray,videosHeadSizeRatio,'2c',1)
#Sort results
# videosAndScore = dict(zip(videosArray,videosScore))
# {k: v for k, v in sorted(videosAndScore.items(), key=lambda item: item[1],reverse=True)}
###Output
_____no_output_____ |
week 4/Building+your+Deep+Neural+Network+-+Step+by+Step+v5.ipynb | ###Markdown
Building your Deep Neural Network: Step by StepWelcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!- In this notebook, you will implement all the functions required to build a deep neural network.- In the next assignment, you will use these functions to build a deep neural network for image classification.**After this assignment you will be able to:**- Use non-linear units like ReLU to improve your model- Build a deeper neural network (with more than 1 hidden layer)- Implement an easy-to-use neural network class**Notation**:- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example.- Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).Let's get started! 1 - PackagesLet's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the main package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- dnn_utils provides some necessary functions for this notebook.- testCases provides some test cases to assess the correctness of your functions- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed.
###Code
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v3 import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
/opt/conda/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
/opt/conda/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
###Markdown
2 - Outline of the AssignmentTo build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:- Initialize the parameters for a two-layer network and for an $L$-layer neural network.- Implement the forward propagation module (shown in purple in the figure below). - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). - We give you the ACTIVATION function (relu/sigmoid). - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.- Compute the loss.- Implement the backward propagation module (denoted in red in the figure below). - Complete the LINEAR part of a layer's backward propagation step. - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function- Finally update the parameters. **Figure 1****Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. 3 - InitializationYou will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. 3.1 - 2-layer Neural Network**Exercise**: Create and initialize the parameters of the 2-layer neural network.**Instructions**:- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.- Use zero initialization for the biases. Use `np.zeros(shape)`.
###Code
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1)) * 0.01
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1)) * 0.01
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters = initialize_parameters(3,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[ 0.01624345 -0.00611756 -0.00528172]
[-0.01072969 0.00865408 -0.02301539]]
b1 = [[ 0.]
[ 0.]]
W2 = [[ 0.01744812 -0.00761207]]
b2 = [[ 0.]]
###Markdown
**Expected output**: **W1** [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] **b1** [[ 0.] [ 0.]] **W2** [[ 0.01744812 -0.00761207]] **b2** [[ 0.]] 3.2 - L-layer Neural NetworkThe initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: **Shape of W** **Shape of b** **Activation** **Shape of Activation** **Layer 1** $(n^{[1]},12288)$ $(n^{[1]},1)$ $Z^{[1]} = W^{[1]} X + b^{[1]} $ $(n^{[1]},209)$ **Layer 2** $(n^{[2]}, n^{[1]})$ $(n^{[2]},1)$ $Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ $(n^{[2]}, 209)$ $\vdots$ $\vdots$ $\vdots$ $\vdots$ $\vdots$ **Layer L-1** $(n^{[L-1]}, n^{[L-2]})$ $(n^{[L-1]}, 1)$ $Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ $(n^{[L-1]}, 209)$ **Layer L** $(n^{[L]}, n^{[L-1]})$ $(n^{[L]}, 1)$ $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$ $(n^{[L]}, 209)$ Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\\ m & n & o \\ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\\ d & e & f \\ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \\ t \\ u\end{bmatrix}\tag{2}$$Then $WX + b$ will be:$$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u\end{bmatrix}\tag{3} $$ **Exercise**: Implement initialization for an L-layer Neural Network. **Instructions**:- The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.- Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`.- Use zeros initialization for the biases. Use `np.zeros(shape)`.- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).```python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1))```
###Code
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]
[-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]
[-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]
[-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]
b1 = [[ 0.]
[ 0.]
[ 0.]
[ 0.]]
W2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716]
[-0.01023785 -0.00712993 0.00625245 -0.00160513]
[-0.00768836 -0.00230031 0.00745056 0.01976111]]
b2 = [[ 0.]
[ 0.]
[ 0.]]
###Markdown
**Expected output**: **W1** [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]] **b1** [[ 0.] [ 0.] [ 0.] [ 0.]] **W2** [[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]] **b2** [[ 0.] [ 0.] [ 0.]] 4 - Forward propagation module 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:- LINEAR- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model)The linear forward module (vectorized over all the examples) computes the following equations:$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$where $A^{[0]} = X$. **Exercise**: Build the linear part of forward propagation.**Reminder**:The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.
###Code
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
### START CODE HERE ### (≈ 1 line of code)
Z = np.dot(W, A) + b
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
###Output
Z = [[ 3.26295337 -1.23429987]]
###Markdown
**Expected output**: **Z** [[ 3.26295337 -1.23429987]] 4.2 - Linear-Activation ForwardIn this notebook, you will use two activation functions:- **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` pythonA, activation_cache = sigmoid(Z)```- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:``` pythonA, activation_cache = relu(Z)``` For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.
###Code
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b);
A, activation_cache = sigmoid(Z);
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b);
A, activation_cache = relu(Z);
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
###Output
With sigmoid: A = [[ 0.96890023 0.11013289]]
With ReLU: A = [[ 3.43896131 0. ]]
###Markdown
**Expected output**: **With sigmoid: A ** [[ 0.96890023 0.11013289]] **With ReLU: A ** [[ 3.43896131 0. ]] **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID. **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model**Exercise**: Implement the forward propagation of the above model.**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.) **Tips**:- Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times- Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`.
###Code
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], "relu" )
caches.append(cache);
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid" )
caches.append(cache);
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case_2hidden()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
###Output
AL = [[ 0.03921668 0.70498921 0.19734387 0.04728177]]
Length of caches list = 3
###Markdown
**AL** [[ 0.03921668 0.70498921 0.19734387 0.04728177]] **Length of caches list ** 3 Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. 5 - Cost functionNow you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$
###Code
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = Y * np.log(AL) + (1-Y) * np.log(1-AL);
cost = - np.sum(cost, keepdims= True)/m;
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
###Output
cost = 0.414931599615397
###Markdown
**Expected Output**: **cost** 0.41493159961539694 6 - Backward propagation moduleJust like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. **Reminder**: **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:$$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$.This is why we talk about **backpropagation**.!-->Now, similar to forward propagation, you are going to build the backward propagation in three steps:- LINEAR backward- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) 6.1 - Linear backwardFor layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$. **Figure 4** The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:$$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$$$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$$$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ **Exercise**: Use the 3 formulas above to implement linear_backward().
###Code
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = np.dot(dZ, A_prev.T) / m;
db = np.sum(dZ, axis = 1, keepdims= True) / m ; # forgot the sum, let see what happens
dA_prev = np.dot(W.T, dZ);
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
#print("current db, b shape", db.shape, b.shape);
assert (db.shape == b.shape)
return dA_prev, dW, db
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
###Output
dA_prev = [[ 0.51822968 -0.19517421]
[-0.40506361 0.15255393]
[ 2.37496825 -0.89445391]]
dW = [[-0.10076895 1.40685096 1.64992505]]
db = [[ 0.50629448]]
###Markdown
**Expected Output**: **dA_prev** [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] **dW** [[-0.10076895 1.40685096 1.64992505]] **db** [[ 0.50629448]] 6.2 - Linear-Activation backwardNext, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. To help you implement `linear_activation_backward`, we provided two backward functions:- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:```pythondZ = sigmoid_backward(dA, activation_cache)```- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:```pythondZ = relu_backward(dA, activation_cache)```If $g(.)$ is the activation function, `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer.
###Code
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA, activation_cache);
dA_prev, dW, db = linear_backward(dZ, linear_cache);
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache);
### END CODE HERE ###
return dA_prev, dW, db
AL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
###Output
sigmoid:
dA_prev = [[ 0.11017994 0.01105339]
[ 0.09466817 0.00949723]
[-0.05743092 -0.00576154]]
dW = [[ 0.10266786 0.09778551 -0.01968084]]
db = [[-0.05729622]]
relu:
dA_prev = [[ 0.44090989 0. ]
[ 0.37883606 0. ]
[-0.2298228 0. ]]
dW = [[ 0.44513824 0.37371418 -0.10478989]]
db = [[-0.20837892]]
###Markdown
**Expected output with sigmoid:** dA_prev [[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] dW [[ 0.10266786 0.09778551 -0.01968084]] db [[-0.05729622]] **Expected output with relu:** dA_prev [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] dW [[ 0.44513824 0.37371418 -0.10478989]] db [[-0.20837892]] 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. **Figure 5** : Backward pass ** Initializing backpropagation**:To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$.To do so, use this formula (derived using calculus which you don't need in-depth knowledge of):```pythondAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) derivative of cost with respect to AL```You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`.**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model.
###Code
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
assert (dAL.shape == AL.shape)
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[L-1];
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, "sigmoid");
### END CODE HERE ###
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l];
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l+2)], current_cache, "relu");
grads["dA" + str(l + 1)] = dA_prev_temp;
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print_grads(grads)
###Output
dW1 = [[ 0.41010002 0.07807203 0.13798444 0.10502167]
[ 0. 0. 0. 0. ]
[ 0.05283652 0.01005865 0.01777766 0.0135308 ]]
db1 = [[-0.22007063]
[ 0. ]
[-0.02835349]]
dA1 = [[ 0.12913162 -0.44014127]
[-0.14175655 0.48317296]
[ 0.01663708 -0.05670698]]
###Markdown
**Expected Output** dW1 [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] db1 [[-0.22007063] [ 0. ] [-0.02835349]] dA1 [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] 6.4 - Update ParametersIn this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.**Instructions**:Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$.
###Code
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(L):
parameters["W" + str(l+1)] -= learning_rate * grads["dW" + str(l + 1)];
parameters["b" + str(l+1)] -= learning_rate * grads["db" + str(l + 1)];
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
###Output
W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008]
[-1.76569676 -0.80627147 0.51115557 -1.18258802]
[-1.0535704 -0.86128581 0.68284052 2.20374577]]
b1 = [[-0.04659241]
[-1.28888275]
[ 0.53405496]]
W2 = [[-0.55569196 0.0354055 1.32964895]]
b2 = [[-0.84610769]]
|
notebooks/hotel_bookings.ipynb | ###Markdown
*BUSINESS PROBLEM: explore a hotel booking dataset and predict cancellation* STAGE 1: preprecessing and EDA Load Data and Explore
###Code
import pandas as pd
df_hotel = pd.read_csv('~/Downloads/hotel_bookings.csv')
df_hotel.head(1)
# Check Columns
df_hotel.columns
###Output
_____no_output_____
###Markdown
CONSIDERATIONS: Our target feature to predict is 'is canceled'. We should treat and select all other features. First we need to explore it.
###Code
# Check missing values
import numpy as np
import pandas as pd
def missing_zero_values_table(df):
zero_val = (df == 0.00).astype(int).sum(axis=0)
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)
mz_table = mz_table.rename(
columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})
mz_table['Total Zero Missing Values'] = mz_table['Zero Values'] + mz_table['Missing Values']
mz_table['% Total Zero Missing Values'] = 100 * mz_table['Total Zero Missing Values'] / len(df)
mz_table['Data Type'] = df.dtypes
mz_table = mz_table[
mz_table.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
print ("Your selected dataframe has " + str(df.shape[1]) + " columns and " + str(df.shape[0]) + " Rows.\n"
"There are " + str(mz_table.shape[0]) +
" columns that have missing values.")
# mz_table.to_excel('D:/sampledata/missing_and_zero_values.xlsx', freeze_panes=(1,0), index = False)
return mz_table
missing_zero_values_table(df_hotel)
###Output
Your selected dataframe has 32 columns and 119390 Rows.
There are 4 columns that have missing values.
###Markdown
CONSIDERATIONS: country and children doesn't have many missing values ('0' in children means it's not a missing value); our problem is with company and agent, we should explore and treat it.
###Code
# count of id companies when made reservation
len(df_hotel['company'].unique())
# Aggregate and check distribution from the variable
df_company_agg = df_hotel.groupby('company')['company'].count().sort_values(ascending = False)
df_company_agg.head(5)
###Output
_____no_output_____
###Markdown
CONSIDERATIONS: we've too many missing values for company; probably those missing values mean that the reservation weren't scheduled by any company; it would be better to create a boolean feature to detect if it's a company reservation or not.
###Code
import numpy as np
# Create a new boolean feature considering if it's a company reservation or not
df_hotel['is_company'] = np.where(df_hotel['company'].isna(), False, True)
# Drop company column as we don't need it anymore and we can get rid from nan values
df_hotel.drop(['company'], axis=1)
# check the number of reservations by companies
df_hotel['is_company'].sum()
# Drop old column to clean our data (it will help plotting correlation matrix and fitting the model)
df_hotel = df_hotel.drop(['company'], axis=1)
###Output
_____no_output_____
###Markdown
CONSIDERATIONS: it's rare for companies to book!
###Code
# Create a new boolean feature considering if it's a reservation made by an agent or not
df_hotel['is_agent'] = np.where(df_hotel['agent'].isna(), False, True)
df_hotel['is_agent'].sum()
###Output
_____no_output_____
###Markdown
CONSIDERATIONS: booking by agents are much more common than by companies; but probably direct reservations are also important.
###Code
# Aggegate and check which market segments cancels more by type of hotel
df_hotel.groupby(['market_segment', 'is_canceled'])['hotel'].count()
###Output
_____no_output_____
###Markdown
CONSIDERATIONS: Most cancelations are from ONLINE booking and GROUPS.
###Code
# Check types of reservation status by cancelation type
df_hotel.groupby(['reservation_status', 'is_canceled'])['hotel'].count()
###Output
_____no_output_____
###Markdown
Transform Data
###Code
# Correct datetime
df_hotel[['reservation_status_date']] = df_hotel[['reservation_status_date']].astype('datetime64[ns]')
# Correct all categorical (except our target 'is_canceled') features including booleans (we can't use bool type or target variable as categorical to fit the model)
df_hotel[["hotel", "meal", "country",
"market_segment", "distribution_channel",
"reserved_room_type", "assigned_room_type",
"deposit_type", "customer_type",
"reservation_status"]] = df_hotel[["hotel", "meal", "country", "market_segment",
"distribution_channel", "reserved_room_type", "assigned_room_type",
"deposit_type", "customer_type",
"reservation_status",]].astype('category')
df_hotel['is_repeated_guest'] = df_hotel['is_repeated_guest'].astype(bool)
# Fill missing values from agent column and transform it to a categorical column
from statistics import mode
df_hotel[["agent"]] = df_hotel[["agent"]].astype(pd.Int32Dtype())
df_hotel_list = df_hotel[["agent"]].values.tolist()
def flatten(t):
return [item for sublist in t for item in sublist]
flat_hotel = flatten(df_hotel_list)
flat_mode = mode(flat_hotel)
df_hotel[["agent"]] = df_hotel[["agent"]].fillna(9)
df_hotel[["agent"]] = df_hotel[["agent"]].astype('int64')
# Fill na's from float column with the median
df_hotel['children'] = df_hotel['children'].fillna(df_hotel['children'].median()).astype('int64')
# Drop nan's rows from columns with inexpressive nan's count
df_hotel = df_hotel.dropna(subset = ['country', 'children'], axis = 0)
df_hotel.dtypes
###Output
_____no_output_____
###Markdown
CONSIDERATION 1: We could have explored more our columns to set which could be ordinal encoded (such as months or year) but the time it would consume could be too much and it's not guaranteed it would help us; \CONSIDERATION 2: We could also have done one hot encoding for our categorical columns, but expand our dataframe and it would be more expensive to train the model or scale it if we would deploy based on big data;\CONSIDERATION 3: We could have predicted our missing values with a regression model, but in our case it wasn't necessary; \CONSIDERATION 4: We just need to treat datetimes and we are ready to fit for modelling! Let's create new features!
###Code
# Create a new feature which corrects datetime from reservation_status_date
df_hotel[['reservation_status_date_dt']] = df_hotel[['reservation_status_date']].astype('datetime64[ns]')
# Create a new feature which contains the full date of arrival and convert it to datetime
df_hotel['arrivel_date'] = df_hotel['arrival_date_year'].astype(str) + '-' + df_hotel['arrival_date_month'] + '-' + df_hotel['arrival_date_day_of_month'].astype(str)
df_hotel[['arrivel_date_dt']] = df_hotel[['arrivel_date']].astype('datetime64[ns]')
#Check transformation
df_hotel[['arrivel_date','arrivel_date_dt', 'reservation_status_date', 'reservation_status_date_dt']].tail(5)
from datetime import timedelta
# Create a datetime feature which tells how many days before the reservations was cancelled and check if it matches
df_hotel['lead_time_dt'] = df_hotel['lead_time'].apply(lambda x: timedelta(days = x))
df_hotel['booking_date'] = df_hotel['arrivel_date_dt'] - df_hotel['lead_time_dt']
df_hotel[['booking_date', 'arrivel_date', 'lead_time', 'lead_time_dt']].head(5)
# Drop old columns after check
# df_hotel = df_hotel.drop(['arrivel_date', 'lead_time', 'reservation_status_date'], axis=1)
# Create a new feature to identify HIGH SEASON BY COUNTRY and if the reservation was made in high or low season
df_hotel['n_rows_country'] = df_hotel.groupby(['country'])['country'].transform('count')
# Filter by most relevant countries
df_hotel['grouped_country'] = np.where(df_hotel['n_rows_country'] < 5000, 'other', df_hotel['country'])
table = df_hotel.groupby(['grouped_country', 'arrival_date_year', 'arrival_date_month']).agg(n_canceled = pd.NamedAgg('is_canceled', 'sum'), n_total = pd.NamedAgg('is_canceled', 'count')).reset_index()
table['country_year_total'] = (table.groupby(['grouped_country', 'arrival_date_year'])['n_total'].transform('sum')) / 12
# Set season by trheshold
table['season_index'] = table['n_total'] / table['country_year_total']
table['high_season'] = np.where(table['season_index'] < 1, 0, 1)
table_agg = table.sort_values(['grouped_country', 'arrival_date_year']).groupby(['grouped_country', 'arrival_date_month']).agg(n_high = pd.NamedAgg('high_season', 'sum'), n_total = pd.NamedAgg('high_season', 'count')).reset_index()
table_agg['high_season'] = np.where(table_agg['n_high'] >= table_agg['n_total'], 1, 0)
table_agg = table_agg[['grouped_country', 'arrival_date_month', 'high_season']]
df_hotel['grouped_country'] = df_hotel['grouped_country'].astype('category')
# Merge table created to define high season with original table
df_hotel = df_hotel.merge(table_agg, how = 'left', on = ['grouped_country', 'arrival_date_month'])
df_hotel
df_hotel['grouped_country'] = df_hotel['grouped_country'].astype('category')
df_hotel['grouped_country'].dtypes
# BONUS: create new boolean feature which tells if it's deposit or not since it looks redundant
df_hotel['is_deposit'] = np.where(df_hotel['deposit_type'] == 'No Deposit', 0, 1)
df_hotel['deposit_type'].unique()
df_hotel['deposit_type'].value_counts()
# drop old column to help correlation plot and fitting the model
df_hotel = df_hotel.drop(['is_deposit', 'n_rows_country'], axis=1)
###Output
_____no_output_____
###Markdown
Plot, explore, transform and interpret
###Code
import seaborn as sns
sns.displot(df_hotel, x="is_canceled")
###Output
_____no_output_____
###Markdown
Imbalanced distribution of our target variable. \We could have even better models if we would have a more data-centric approach and do more feature engineering.
###Code
sns.clustermap(df_hotel.corr())
###Output
_____no_output_____
###Markdown
Correlation and dependencies between our features
###Code
import seaborn as sns
sns.boxplot(x = df_hotel['lead_time'] + 1)
###Output
_____no_output_____
###Markdown
CONSIDERATION 1:boxplotting we can see that most bookings (our interquartile interval) is somewhere in between 1 and 180 days; above 400 are our outliers; \CONSIDERATION 2: boxplotting we can see that most bookings (our interquartile interval) is somewhere in between 1 and 180 days; above 400 are our outliers; \CONSIDERATION 3: we can see that most reservations are somewhere in between 80 and 180 days before; \CONSIDERATION 4: we can see that we still have a considerable ammount of bookings somewehere in between 1 and 10;
###Code
df_hotel['log_lead_time'] = np.log(df_hotel['lead_time'] + 1)
sns.histplot(df_hotel['lead_time'] + 1, log_scale = True)
###Output
_____no_output_____
###Markdown
CONSIDERATION 1: On our histogram we've observed the same as before, a there's a considerable ammount of bookings betwen day 1 and 10; \CONSIDERATION 2: On our histogram we've observed an odd behaviour ('non-linearity'), a lot of bookings are on day 1; \CONSIDERATION 3: That means we've two populations and distributions, which we can use to create a new feature: who books at the last minute and who schedule;
###Code
# Creating new feature: reservation_type
df_hotel['reservation_type'] = np.where((df_hotel['lead_time'] + 1) < 10, 'last_minute', 'scheduled')
df_hotel.groupby(['reservation_type', 'is_canceled'])['reservation_type'].count()
enc_minute = {'last_minute': True, 'scheduled': False}
df_hotel['is_last_minute'] = df_hotel['reservation_type'].map(enc_minute).astype(bool)
df_hotel.dtypes
import seaborn as sns
import matplotlib.pyplot as plt
#Using Pearson Correlation
plt.figure(figsize=(22,20))
cor = df_hotel.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.show()
###Output
_____no_output_____
###Markdown
LIST OF RELEVANT CORRELATIONS TO A BUSINESS SPECIALIST INTERPRET: \ 1) Is canceled with lead time, special request, parking spaces and previous cancellations; some with booking changes; \ 2) Lead time with agent, days in waiting list, high season, adults and stays in week nights; \ 3) Arrival date year with average daily rate; \ 4) Adults and number of children with average daily rate; \ 5) Is repeated guest with previous booking not canceled, is company, average daily rate; \ 6) Previous booking not canceled with is company; \ 7) Average daily rate with number of children, adults, special requests; \ 8) Is company with previous booking not cancelled, is repeated guest, adult; \ 9) Is Last minute booking is related to stays in week nights; STAGE 2: Comparing two models Linear model: logistic regression
###Code
import statsmodels.formula.api as fsm
model = fsm.logit(formula = 'is_canceled ~ log_lead_time' , data = df_hotel)
fit = model.fit()
fit.summary()
df_hotel['pred_baseline'] = fit.predict()
sns.scatterplot(data = df_hotel, x = 'lead_time', y = 'pred_baseline')
import matplotlib.pyplot as plt
import statsmodels.formula.api as fsm
model = fsm.logit(formula = 'is_canceled ~ log_lead_time : reservation_type * is_company * high_season', data = df_hotel)
fit = model.fit()
fit.summary()
df_hotel['pred_m1'] = fit.predict()
fig,ax = plt.subplots(1,2, figsize = (16,8))
sns.scatterplot(data = df_hotel, x = 'lead_time', y = 'pred_m1', hue = 'is_company', size = 'high_season', ax = ax[0])
sns.scatterplot(data = df_hotel, x = 'log_lead_time', y = 'pred_m1', hue = 'is_company', size = 'high_season', ax = ax[1])
###Output
Optimization terminated successfully.
Current function value: 0.600648
Iterations 6
###Markdown
CONSIDERATION 1: We can see our break from two populations (last minute and scheduled) between day 0 and 10; \CONSIDERATION 2: We can assume that, when it is company, we've MUCH fewer cancellations; \CONSIDERATION 4: During high season, companies cancel more; \CONSIDERATION 5: During high season, cancellations done by non-companies slightly decrease; \CONSIDERATION 6: The gap between high season and low season cancellations get bigger as higher is the lead time; \CONSIDERATION 7: This gap is stronger when 'is company';
###Code
import matplotlib.pyplot as plt
import statsmodels.formula.api as fsm
model = fsm.logit(formula = 'is_canceled ~ log_lead_time : reservation_type * is_agent * high_season', data = df_hotel)
fit = model.fit()
fit.summary()
df_hotel['pred_cancel'] = fit.predict()
fig,ax = plt.subplots(1,2, figsize = (16,8))
sns.scatterplot(data = df_hotel, x = 'lead_time', y = 'pred_cancel', hue = 'is_agent', size = 'high_season', ax = ax[0])
sns.scatterplot(data = df_hotel, x = 'log_lead_time', y = 'pred_cancel', hue = 'is_agent', size = 'high_season', ax = ax[1])
###Output
Optimization terminated successfully.
Current function value: 0.601806
Iterations 7
###Markdown
CONSIDERATION 1: After 10 days, when the book is done by an agent, it's more probable to cancel than when it's not booked by an agent. We can plot a ROC curve to check the performance of a classification model at all classification thresholds, \considering the trade-off between sensitivity (True Positive Rate) and specificity (True Negative Rate).
###Code
from sklearn.metrics import f1_score
threshold_list = np.linspace(0.05, 0.95, 200)
f1_list = []
for threshold in threshold_list:
pred_label = np.where(df_hotel['pred_cancel'] < threshold, 0, 1)
f1 = f1_score(df_hotel['is_canceled'], pred_label)
f1_list.append(f1)
df_f1 = pd.DataFrame({'threshold':threshold_list, 'f1_score': f1_list})
df_f1[df_f1['f1_score'] == max(df_f1['f1_score'])]
bt = df_f1[df_f1['f1_score'] == max(df_f1['f1_score'])]['threshold'].values[0]
f1 = df_f1[df_f1['f1_score'] == max(df_f1['f1_score'])]['f1_score'].values[0]
title = "Best Threshold: " + str(round(bt, 2)) + " w/ F-1: " + str(round(f1, 2))
sns.lineplot(data=df_f1, x='threshold', y='f1_score').set_title(title)
###Output
_____no_output_____
###Markdown
F-1 score finds the best spot between Precision and Recall and it's reasonable to use with binary classification or with multiclass balanced data. \Most of the times our business problem determines if we should privilege Precision or Recall, but F-1 score is a good generalization to compare the performance of the models. \We can check and set the best threshold for our model based on the score.
###Code
from sklearn.metrics import cohen_kappa_score, precision_score, roc_curve
from sklearn.metrics import matthews_corrcoef, mean_squared_error, log_loss
from sklearn.metrics import f1_score, recall_score, roc_auc_score
threshold_list = np.linspace(0.05, 0.95, 200)
score_list = []
for threshold in threshold_list:
pred_label = np.where(df_hotel['pred_cancel'] < threshold, 0, 1)
score = cohen_kappa_score(df_hotel['is_canceled'], pred_label)
score_list.append(score)
df_score = pd.DataFrame({'threshold':threshold_list, 'score_score': score_list})
df_score[df_score['score_score'] == max(df_score['score_score'])]
bt = df_score[df_score['score_score'] == max(df_score['score_score'])]['threshold'].values[0]
score = df_score[df_score['score_score'] == max(df_score['score_score'])]['score_score'].values[0]
title = "Best Threshold: " + str(round(bt, 2)) + " w/ Kappa: " + str(round(score, 2))
sns.lineplot(data=df_score, x='threshold', y='score_score').set_title(title)
###Output
_____no_output_____
###Markdown
Cohen's Kappa score is a good measure for a classification model since it considers imbalanced data.
###Code
from sklearn.metrics import roc_curve
#Plot ROC_Curve
fpr, tpr, thresholds = roc_curve(df_hotel['is_canceled'], df_hotel['pred_cancel'])
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect=1)
sns.lineplot(x = fpr, y = fpr, ax = ax)
sns.lineplot(x = fpr, y = tpr, ax = ax)
###Output
_____no_output_____
###Markdown
An ROC curve shows the performance of one classification model at all classification thresholds. \It can be used to evaluate the strength of a model. ROC Curves can also be used to compare two models and we've this mind. CONCLUSIONS **We did a hard work to get here. We didn't achieve a reasonable score (0.6) for today parameters, though. But not all is about score.*** **Advantages** * Logistic regression is a simple linear model which is easy and fast to set; * It's not computationally expensive, you can run on ordinary hardware; * It's highly scalabe; * It's not a complex model, no black box, it's highly explainable and interpretable; you can even use it to explore more your data; * Excelent to plot, visualize and get insights that wouldn't be possible on complex models; * Excelent for risk analysis such as bookings, so we could infer the rate of daily room rate considering risk of cancellation;* **Disadvantage** * It's more handmade, demands more time, work and analytical capabilities from the data scientist who works as a craftsman; * It's sensible to outliers and you should be minimalist, to choose less and more important features and treat outliers without loosing data; * More dependant of a good feature engineering to get a better score; * You can also stratify you modelling and build regressions on top of regressions, but most of the times it's not worth the hard work; * You will never get a good result as state-of-art ensemble of hierarchical models and neural networks; Ensemble of hierarchical models: XGBoost Feature Selection: GridSearchCV
###Code
%%time
import numpy as np
import xgboost as xgb
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from datetime import datetime
# Automate best parameters search for XGBoost using GridSearch
folds = 3
param_comb = 20
# Choose parameters to compare
param_grid = {
"max_depth": [6, 10, 20, 30],
"min_child_weight": [1, 3, 10],
"gamma" : [0.2 , 0.7, 1, 2],
"alpha" : [1],
"learning_rate": [0.10, 1, 3],
"missing": [np.nan],
"num_parallel_tree": [1, 2],
# "use_rmm": True,
"colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7],
"scale_pos_weight": [1, 3],
"reg_lambda": [1, 5, 10, 50],
# "gradient_based": True,
"subsample": [0.1, 0.5],
}
# skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001)
# Halving Grid Search CV implementation
xgb_cl = xgb.XGBClassifier(n_estimators=1000,
objective='binary:logistic',
silent=True,
nthread=40,
tree_method='gpu_hist',
eval_metric='auc')
halving_cv = HalvingGridSearchCV(xgb_cl,
param_grid,
scoring="roc_auc",
#n_jobs=4,
min_resources="exhaust",
factor=3,
#scoring="neg_log_loss",
#cv=skf.split(X_train,Y_train),
verbose=1,
random_state=1001,
)
# # Here we go
# start_time = timer(None) # timing starts from this point for "start_time" variable
# halving_cv.fit(X_train, Y_train)
# # Return set of parameters with the best performance
# halving_cv.best_params_
# # Return the performance metric score
# halving_cv.best_score_
# model.dump_model('dump.raw.txt')
# timer(start_time) # timing ends here for "start_time" variable
###Output
CPU times: user 80.7 ms, sys: 12.2 ms, total: 92.9 ms
Wall time: 95.6 ms
###Markdown
**CONSIDERATION: We would like to automate the search for the best parameters, but it takes more time than we had. \ We decided to securely set the parameters on a conservative approach as suggested by the community, after empirical testing.** BEST MODEL TO COMPARE: XGboost using all features with binary logistic and avoiding LEAKAGE
###Code
%%time
import os
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from xgboost import XGBClassifier
from xgboost import XGBRegressor
from sklearn.metrics import cohen_kappa_score, precision_score
from sklearn.metrics import matthews_corrcoef, mean_squared_error, log_loss
from sklearn.metrics import f1_score, recall_score, roc_auc_score
# Define your features and your target to predict (selecting ALL columns WITHOUT LEAKAGE!)
X = df_hotel[['hotel', 'lead_time', 'arrival_date_year', 'arrival_date_month',
'stays_in_weekend_nights', 'stays_in_week_nights', 'adults', 'children',
'babies', 'meal','country', 'market_segment', 'distribution_channel',
'is_repeated_guest', 'previous_cancellations',
'previous_bookings_not_canceled', 'reserved_room_type',
'assigned_room_type', 'booking_changes', 'deposit_type',
'days_in_waiting_list', 'customer_type', 'adr',
'required_car_parking_spaces', 'total_of_special_requests',
'is_company','is_agent']]
Y = df_hotel['is_canceled']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=7)
Y_train = Y_train
Y_test = Y_test
full_pipeline = ColumnTransformer([('cat', OneHotEncoder(handle_unknown='ignore'), X_train.columns)], remainder='passthrough')
encoder = full_pipeline.fit(X_train)
X_train_enc = encoder.transform(X_train)
X_test_enc = encoder.transform(X_test)
# train the model
model = XGBRegressor(n_estimators= 200,
max_depth= 30, # Lower ratios avoid over-fitting. Default is 6.
objective = 'binary:logistic', # Default is reg:squarederror. 'multi:softprob' for multiclass and get proba.
#num_class = 2, # Use if softprob is set.
reg_lambda = 10, # Larger ratios avoid over-fitting. Default is 1.
gamma = 0.3, # Larger values avoid over-fitting. Default is 0. # Values from 0.3 to 0.8 if you have many columns (especially if you did one-hot encoding), or 0.8 to 1 if you only have a few columns.
alpha = 1, # Larger ratios avoid over-fitting. Default is 0.
learning_rate= 0.10, # Lower ratios avoid over-fitting. Default is 3.
colsample_bytree= 0.7, # Lower ratios avoid over-fitting.
scale_pos_weight = 1, # Default is 1. Control balance of positive and negative weights, for unbalanced classes.
subsample = 0.1, # Lower ratios avoid over-fitting. Default 1. 0.5 recommended. # 0.1 if using GPU.
min_child_weight = 3, # Larger ratios avoid over-fitting. Default is 1.
missing = np.nan, # Deal with missing values
num_parallel_tree = 2, # Parallel trees constructed during each iteration. Default is 1.
importance_type = 'weight',
eval_metric = 'auc',
#use_label_encoder = True,
#enable_categorical = True,
verbosity = 1,
nthread = -1, # Set -1 to use all threads.
#use_rmm = True, # Use GPU if available
tree_method = 'auto', # auto # 'gpu_hist'. Default is auto: analyze the data and chooses the fastest.
#gradient_based = True, # If True you can set subsample as low as 0.1. Only use with gpu_hist
)
# fit model
model.fit(X_train_enc, Y_train.values.ravel(),
# early_stopping_rounds=20
)
# check best ntree limit
display(model.best_ntree_limit)
# extract the training set predictions
preds_train = model.predict(X_train_enc,
ntree_limit=model.best_ntree_limit
)
# extract the test set predictions
preds_test = model.predict(X_test_enc,
ntree_limit=model.best_ntree_limit
)
# save model
output_dir = "models"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# save in JSON format
model.save_model(f'{output_dir}/hotel_xgboost.json')
# save in text format
model.save_model(f'{output_dir}/hotel_xgboost.txt')
print('FINISHED!')
###Output
_____no_output_____
###Markdown
Setting our parameters at a conservative approach and with a number of steps of 200 took about 4 minutes. \We'll use this 'step' pattern to compare other settings. \If we would put this model in production we probably should expand our model to train for more time considering learning rate and steps. **Plot & Score**
###Code
%%time
# Plot F1-Score and Threshold
from sklearn.metrics import f1_score
threshold_list = np.linspace(0.05, 0.95, 200)
f1_list = []
for threshold in threshold_list:
pred_label = np.where(preds_test < threshold, 0, 1)
f1 = f1_score(Y_test, pred_label)
f1_list.append(f1)
df_f1 = pd.DataFrame({'threshold':threshold_list, 'f1_score': f1_list})
df_f1[df_f1['f1_score'] == max(df_f1['f1_score'])]
bt = df_f1[df_f1['f1_score'] == max(df_f1['f1_score'])]['threshold'].values[0]
f1 = df_f1[df_f1['f1_score'] == max(df_f1['f1_score'])]['f1_score'].values[0]
title = "Best Threshold: " + str(round(bt, 2)) + " w/ F-1: " + str(round(f1, 2))
sns.lineplot(data=df_f1, x='threshold', y='f1_score').set_title(title)
plt.show()
# Plot Kappa Score and threshold
threshold_list = np.linspace(0.05, 0.95, 200)
score_list = []
for threshold in threshold_list:
pred_label = np.where(preds_test < threshold, 0, 1)
score = cohen_kappa_score(Y_test, pred_label)
score_list.append(score)
df_score = pd.DataFrame({'threshold':threshold_list, 'score_score': score_list})
df_score[df_score['score_score'] == max(df_score['score_score'])]
bt = df_score[df_score['score_score'] == max(df_score['score_score'])]['threshold'].values[0]
score = df_score[df_score['score_score'] == max(df_score['score_score'])]['score_score'].values[0]
title = "Best Threshold: " + str(round(bt, 2)) + " w/ Kappa: " + str(round(score, 2))
sns.lineplot(data=df_score, x='threshold', y='score_score').set_title(title)
plt.show()
from sklearn.metrics import roc_curve
#Plot ROC_Curve
fpr, tpr, thresholds = roc_curve(Y_test, preds_test)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect=1)
sns.lineplot(x = fpr, y = fpr, ax = ax)
sns.lineplot(x = fpr, y = tpr, ax = ax)
plt.show()
###Output
_____no_output_____
###Markdown
***Our ROC curve is MUCH better on all thresholds compared with our logistic regression! We've a nice and even curve which doesn't appear to be overfitting!***
###Code
from sklearn.metrics import roc_auc_score
best_preds = np.where(preds_test < bt, 0, 1)
print("Roc_auc = {}".format(roc_auc_score(Y_test, best_preds)))
print("Precision = {}".format(precision_score(Y_test, best_preds)))
print("Recall = {}".format(recall_score(Y_test, best_preds)))
print("F1 = {}".format(f1_score(Y_test, best_preds)))
print("Kappa_score = {}".format(cohen_kappa_score(Y_test, best_preds)))
print("Matthews_corrcoef = {}".format(matthews_corrcoef(Y_test, best_preds)))
print("Mean_squared_error_test = {}".format(mean_squared_error(Y_test, best_preds)))
print("Logloss_test = {}".format(log_loss(Y_test, best_preds)))
###Output
Roc_auc = 0.8492998797960843
Precision = 0.8289085545722714
Recall = 0.7955832389580973
F1 = 0.8119040739670615
Kappa_score = 0.7043968112863238
Matthews_corrcoef = 0.704763163688087
Mean_squared_error_test = 0.13687397502207646
Logloss_test = 4.727508371460749
###Markdown
**Now we've a reasonable predictive model**! It could be even improved findind best parameters. \Using the best threshold for Kappa score improved our overall scores compared with best F-1 score threshold, probably due to imbalanced class. XGBoost with manually selected features using Patsy and Softprob
###Code
%%time
import xgboost as xgb
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
import patsy
# Selecting features I manually found interesting analyzing matthew's correlation and using patsy to automatic interact between features.
y, X = patsy.dmatrices('is_canceled ~ hotel + lead_time + arrival_date_year + arrival_date_month + stays_in_weekend_nights + \
stays_in_week_nights + adults + children + babies + meal + country + market_segment + distribution_channel + \
is_repeated_guest + previous_cancellations + previous_bookings_not_canceled + reserved_room_type + \
assigned_room_type + booking_changes + deposit_type + days_in_waiting_list + customer_type + adr + \
required_car_parking_spaces + total_of_special_requests + is_company + is_agent', data = df_hotel)
# Display patsy features
#display(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)
D_train = xgb.DMatrix(X_train, label=Y_train)#, enable_categorical=True)
D_test = xgb.DMatrix(X_test, label=Y_test)#, enable_categorical=True)
param = {
'eta': 0.10, # Lower ratios avoid over-fitting. Default is 3.
'max_depth': 30, # Lower ratios avoid over-fitting. Default is 6.
"min_child_weight": 3, # Larger ratios avoid over-fitting. Default is 1.
"gamma": 0.3, # Larger values avoid over-fitting. Default is 0.
"colsample_bytree" : 0.7, # Lower ratios avoid over-fitting. Values from 0.3 to 0.8 if you have many columns (especially if you did one-hot encoding), or 0.8 to 1 if you only have a few columns.
"scale_pos_weight": 1, # Default is 1. Control balance of positive and negative weights, for unbalanced classes.
"reg_lambda": 10, # Larger ratios avoid over-fitting. Default is 1.
"alpha": 1, # Larger ratios avoid over-fitting. Default is 0.
'subsample':0.5, # Lower ratios avoid over-fitting. Default 1. 0.5 recommended.
'num_parallel_tree': 2, # Parallel trees constructed during each iteration. Default is 1.
'objective': 'multi:softprob', # Default is reg:squarederror. 'multi:softprob' for multiclass.
'num_class': 2, # Use if softprob is set.
'verbosity':1,
'eval_metric': 'auc',
'use_rmm':False, # Use GPU if available
'nthread':-1, # Set -1 to use all threads.
'tree_method': 'auto', # 'gpu_hist'. Default is auto: analyze the data and chooses the fastest.
'gradient_based': False, # If True you can set subsample as low as 0.1. Only use with gpu_hist
}
steps = 200 # The number of training iterations
model = xgb.train(param, D_train, steps)
import numpy as np
from sklearn.metrics import precision_score, recall_score, accuracy_score
preds = model.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
print("Precision = {}".format(precision_score(Y_test, best_preds)))
print("Recall = {}".format(recall_score(Y_test, best_preds)))
print("f1 = {}".format(f1_score(Y_test, best_preds)))
print("kappa_score = {}".format(cohen_kappa_score(Y_test, best_preds)))
print("matthews_corrcoef = {}".format(matthews_corrcoef(Y_test, best_preds)))
#print("mean_squared_error_train = {}".format(mean_squared_error(Y_train, best_preds)))
# print("mean_squared_error_test = {}".format(mean_squared_error(Y_test, best_preds)))
print("logloss_test = {}".format(log_loss(Y_test, best_preds)))
#print("logloss_train = {}".format(log_loss(Y_train, best_preds)))
# from xgboost import plot_importance
# import matplotlib.pyplot as pyplot
# plot_importance(model)
# pyplot.show()
###Output
[20:49:33] WARNING: ../src/learner.cc:576:
Parameters: { "gradient_based", "scale_pos_weight" } might not be used.
This could be a false alarm, with some parameters getting used by language bindings but
then being mistakenly passed down to XGBoost core, or some parameter actually being used
but getting flagged wrongly here. Please open an issue if you find any such cases.
Precision = 0.8658173592094297
Recall = 0.8151552516534021
f1 = 0.8397228637413394
kappa_score = 0.7480610552387499
matthews_corrcoef = 0.7489019304166113
logloss_test = 4.031812977527266
CPU times: user 1h 16min 6s, sys: 6.13 s, total: 1h 16min 12s
Wall time: 10min 13s
###Markdown
**Using patsy and softprob (contains predicted probability of each data point belonging to each class) improved our overall scores! And we didn't even explored our thresholds!** BONUS: Check for last_minute
###Code
%%time
from sklearn import datasets
import os
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from xgboost import XGBClassifier
from xgboost import XGBRegressor
from sklearn.metrics import cohen_kappa_score, precision_score
from sklearn.metrics import matthews_corrcoef, mean_squared_error, log_loss
from sklearn.metrics import f1_score, recall_score, roc_auc_score
import patsy
df_filter = df_hotel.loc[df_hotel['reservation_type'] == 'last_minute']
# Use patsy to automatic interact between features
y, X = patsy.dmatrices('is_canceled ~ hotel + lead_time + arrival_date_year + arrival_date_month + stays_in_weekend_nights + \
stays_in_week_nights + adults + children + babies + meal + country + market_segment + distribution_channel + \
is_repeated_guest + previous_cancellations + previous_bookings_not_canceled + reserved_room_type + \
assigned_room_type + booking_changes + deposit_type + days_in_waiting_list + customer_type + adr + \
required_car_parking_spaces + total_of_special_requests + is_company + is_agent', data = df_filter)
# Display patsy features
#display(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)
D_train = xgb.DMatrix(X_train, label=Y_train)#, enable_categorical=True)
D_test = xgb.DMatrix(X_test, label=Y_test)#, enable_categorical=True)
param = {
'eta': 0.10, # Lower ratios avoid over-fitting. Default is 3.
'max_depth': 30, # Lower ratios avoid over-fitting. Default is 6.
"min_child_weight": 3, # Larger ratios avoid over-fitting. Default is 1.
"gamma": 0.3, # Larger values avoid over-fitting. Default is 0.
"colsample_bytree" : 0.7, # Lower ratios avoid over-fitting. Values from 0.3 to 0.8 if you have many columns (especially if you did one-hot encoding), or 0.8 to 1 if you only have a few columns.
"scale_pos_weight": 1, # Default is 1. Control balance of positive and negative weights, for unbalanced classes.
"reg_lambda": 10, # Larger ratios avoid over-fitting. Default is 1.
"alpha": 1, # Larger ratios avoid over-fitting. Default is 0.
'subsample':0.5, # Lower ratios avoid over-fitting. Default 1. 0.5 recommended.
'num_parallel_tree': 2, # Parallel trees constructed during each iteration. Default is 1.
'objective': 'multi:softprob', # Default is reg:squarederror. 'multi:softprob' for multiclass.
'num_class': 2, # Use if softprob is set.
'verbosity':1,
'eval_metric': 'auc',
'use_rmm':False, # Use GPU if available
'nthread':-1, # Set -1 to use all threads.
'tree_method': 'auto', # 'gpu_hist'. Default is auto: analyze the data and chooses the fastest.
'gradient_based': False, # If True you can set subsample as low as 0.1. Only use with gpu_hist
}
steps = 200 # The number of training iterations
model = xgb.train(param, D_train, steps)
import numpy as np
from sklearn.metrics import precision_score, recall_score, accuracy_score
preds = model.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
print("Precision = {}".format(precision_score(Y_test, best_preds)))
print("Recall = {}".format(recall_score(Y_test, best_preds)))
print("f1 = {}".format(f1_score(Y_test, best_preds)))
print("kappa_score = {}".format(cohen_kappa_score(Y_test, best_preds)))
print("matthews_corrcoef = {}".format(matthews_corrcoef(Y_test, best_preds)))
# print("mean_squared_error_train = {}".format(mean_squared_error(Y_train, best_preds)))
print("mean_squared_error_test = {}".format(mean_squared_error(Y_test, best_preds)))
print("logloss_test = {}".format(log_loss(Y_test, best_preds)))
#print("logloss_train = {}".format(log_loss(Y_train, best_preds)))
# from xgboost import plot_importance
# import matplotlib.pyplot as pyplot
# plot_importance(model)
%%time
from sklearn import datasets
import os
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from xgboost import XGBClassifier
from xgboost import XGBRegressor
from sklearn.metrics import cohen_kappa_score, precision_score
from sklearn.metrics import matthews_corrcoef, mean_squared_error, log_loss
from sklearn.metrics import f1_score, recall_score, roc_auc_score
import patsy
df_filter = df_hotel.loc[df_hotel['reservation_type'] == 'scheduled']
# Use patsy to automatic interact between features
y, X = patsy.dmatrices('is_canceled ~ hotel + lead_time + arrival_date_year + arrival_date_month + stays_in_weekend_nights + \
stays_in_week_nights + adults + children + babies + meal + country + market_segment + distribution_channel + \
is_repeated_guest + previous_cancellations + previous_bookings_not_canceled + reserved_room_type + \
assigned_room_type + booking_changes + deposit_type + days_in_waiting_list + customer_type + adr + \
required_car_parking_spaces + total_of_special_requests + is_company + is_agent', data = df_filter)
# Display patsy features
#display(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)
D_train = xgb.DMatrix(X_train, label=Y_train)#, enable_categorical=True)
D_test = xgb.DMatrix(X_test, label=Y_test)#, enable_categorical=True)
param = {
'eta': 0.10, # Lower ratios avoid over-fitting. Default is 3.
'max_depth': 30, # Lower ratios avoid over-fitting. Default is 6.
"min_child_weight": 3, # Larger ratios avoid over-fitting. Default is 1.
"gamma": 0.3, # Larger values avoid over-fitting. Default is 0.
"colsample_bytree" : 0.7, # Lower ratios avoid over-fitting. Values from 0.3 to 0.8 if you have many columns (especially if you did one-hot encoding), or 0.8 to 1 if you only have a few columns.
"scale_pos_weight": 1, # Default is 1. Control balance of positive and negative weights, for unbalanced classes.
"reg_lambda": 10, # Larger ratios avoid over-fitting. Default is 1.
"alpha": 1, # Larger ratios avoid over-fitting. Default is 0.
'subsample':0.5, # Lower ratios avoid over-fitting. Default 1. 0.5 recommended.
'num_parallel_tree': 2, # Parallel trees constructed during each iteration. Default is 1.
'objective': 'multi:softprob', # Default is reg:squarederror. 'multi:softprob' for multiclass.
'num_class': 2, # Use if softprob is set.
'verbosity':1,
'eval_metric': 'auc',
'use_rmm':False, # Use GPU if available
'nthread':-1, # Set -1 to use all threads.
'tree_method': 'auto', # 'gpu_hist'. Default is auto: analyze the data and chooses the fastest.
'gradient_based': False, # If True you can set subsample as low as 0.1. Only use with gpu_hist
}
steps = 200 # The number of training iterations
model = xgb.train(param, D_train, steps)
import numpy as np
from sklearn.metrics import precision_score, recall_score, accuracy_score
preds = model.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
print("Precision = {}".format(precision_score(Y_test, best_preds)))
print("Recall = {}".format(recall_score(Y_test, best_preds)))
print("f1 = {}".format(f1_score(Y_test, best_preds)))
print("kappa_score = {}".format(cohen_kappa_score(Y_test, best_preds)))
print("matthews_corrcoef = {}".format(matthews_corrcoef(Y_test, best_preds)))
# print("mean_squared_error_train = {}".format(mean_squared_error(Y_train, best_preds)))
print("mean_squared_error_test = {}".format(mean_squared_error(Y_test, best_preds)))
print("logloss_test = {}".format(log_loss(Y_test, best_preds)))
# print("logloss_train = {}".format(log_loss(Y_train, best_preds)))
# from xgboost import plot_importance
# import matplotlib.pyplot as pyplot
# plot_importance(model)
###Output
[21:00:56] WARNING: ../src/learner.cc:576:
Parameters: { "gradient_based", "scale_pos_weight" } might not be used.
This could be a false alarm, with some parameters getting used by language bindings but
then being mistakenly passed down to XGBoost core, or some parameter actually being used
but getting flagged wrongly here. Please open an issue if you find any such cases.
Precision = 0.87559926244622
Recall = 0.8473709255293838
f1 = 0.8612538540596095
kappa_score = 0.7606381582627613
matthews_corrcoef = 0.7609421707752342
mean_squared_error_test = 0.1166751398068124
logloss_test = 4.029857703046233
CPU times: user 1h 38s, sys: 3.5 s, total: 1h 42s
Wall time: 8min 3s
|
TrashCode/Friendster_New_prepadata_ (1).ipynb | ###Markdown
Source des données http://socialcomputing.asu.edu/datasets/Friendster| Number of Nodes |Number of Edges |Missing Values? ||------|------|------||100199 |14067887|no|**Source:**N/A**Data Set Information:**2 files are included:1. nodes.csv-- it's the file of all the users. This file works as a dictionary of all the users in this data set. It's useful for fast reference. It containsall the node ids used in the dataset2. edges.csv-- this is the friendship network among the users. The users's friends are represented using edges. Here is an example. 1,2 This means user with id "1" is friend with user id "2".**Attribute Information:**Friendster is a social networking website.The service allows users to contact other members, maintain those contacts, and share online content and media with those contacts. This is the data set crawled by Stephen Booher ([email protected]) on Nov, 2010 from Friendster. This contains the friendship network crawled. For easier understanding, all the contents are organized in CSV file format.__Basic statistics__Number of users : 100,199Number of friendship pairs: 14,067,887 Découvertes des données - Lecture des fichiers téléchargés
###Code
#Lecture du fichier des liens
df_edges = pd.read_csv("/home/pb19121/datagraphx/edges_sans_boucle.csv",sep =',',header = None)
df_edges.columns = ['FROM', 'TO']
df_edges.to_csv('/home/pb19121/datagraphx/friendsterallfollowers.txt', sep = ' ',index = False, header= False)
df_edges.info()
df_edges.head(5)
#Lecture du fichier des noeuds
df_nodes = pd.read_csv("/home/pb19121/mydata/nodes.csv",header = None)
df_nodes.columns = ['NODE']
#Lecture du fichier des noeuds
df_nodes_att = pd.read_csv("/home/pb19121/datagraphx/WITH_ATT_NODES.csv",header = None)
df_nodes_att.columns = ['Node','name','Present']
df_nodes_att.info()
df_nodes_att.head(5)
###Output
_____no_output_____
###Markdown
- Vérification de l'exhaustivité et l'unicité des noeuds
###Code
pd.DataFrame(df_nodes.NODE.unique()).count()
df_TO =pd.DataFrame(df_edges.TO.unique())
df_TO.columns = ['NODE']
df_FROM = pd.DataFrame(df_edges.FROM.unique())
df_FROM.columns = ['NODE']
print("Sommets source :\n")
print(df_FROM.info())
print("Sommets cible : \n")
print(df_TO.info())
print(df_FROM.head(5) ,"\n", df_TO.head(5))
frames = [df_FROM, df_TO, df_nodes]
df_union_nodes =pd.concat(frames)
df_union_nodes.count()
pd.DataFrame(df_union_nodes.NODE.unique()).count()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5689497 entries, 0 to 5689496
Data columns (total 1 columns):
NODE int64
dtypes: int64(1)
memory usage: 43.4 MB
###Markdown
> **Remarque 1** Tous les noeuds ne figurent pas dans le dictionnaire _"nodes"_
###Code
liste = df_nodes.NODE
###Output
_____no_output_____
###Markdown
- Suppression des arretes dont les noeuds sont absents du fichiers nodes.csv
###Code
df_edges_isNOTnode = pd.DataFrame(columns = ['FROM', 'TO'])
df_edges_isNOTnode = df_edges[np.logical_not(df_edges['FROM'].isin(liste) )&(df_edges['TO'].isin(liste))]
df_edges_isNOTnode.info()
df_edges_isnode = pd.DataFrame(columns = ['FROM', 'TO'])
df_edges_isnode = df_edges[(df_edges['FROM'].isin(liste) )& (df_edges['TO'].isin(liste))]
df_edges_isnode.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 981920 entries, 90 to 14067886
Data columns (total 2 columns):
FROM 981920 non-null int64
TO 981920 non-null int64
dtypes: int64(2)
memory usage: 22.5 MB
###Markdown
Ajout d'attributs calculé sur les noeuds
###Code
import pickle
import seaborn as sns
import networkx as nx
import collections
import community
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
import numpy
G_full = nx.Graph()
G_isinDict = nx.Graph()
G_isNOTinDict = nx.Graph()
for item, row in df_edges_isnode.iterrows():
G_isinDict.add_edge(row['FROM'],row['TO'], weight=1)
for item, row in df_edges_isNOTnode.iterrows():
G_isNOTinDict.add_edge(row['FROM'],row['TO'], weight=1)
for item, row in df_edges.iterrows():
G_full.add_edge(row['FROM'],row['TO'], weight=1)
#_SAUVEGARDE DU GRAPHE DANS UN FICHIER
nx.write_edgelist(G_isinDict, "/home/pb19121/mydata/G_isinDict.edgelist")
#_SAUVEGARDE DU GRAPHE DANS UN FICHIER
nx.write_edgelist(G_full, "/home/pb19121/mydata/G_full.edgelist")
#_SAUVEGARDE DU GRAPHE DANS UN FICHIER
nx.write_edgelist(G_isNOTinDict, "/home/pb19121/mydata/G_isNOTinDict.edgelist")
#_SAUVEGARDE DU GRAPHE DANS UN FICHIER
nx.write_adjlist(G_isinDict, "/home/pb19121/mydata/G_isinDict.adjlist")
#_SAUVEGARDE DU GRAPHE DANS UN FICHIER
nx.write_adjlist(G_isNOTinDict, "/home/pb19121/mydata/G_isNOTinDict.adjlist")
#_SAUVEGARDE DU GRAPHE DANS UN FICHIER
nx.write_adjlist(G_full, "/home/pb19121/mydata/G_full.adjlist")
print(nx.info(G_isinDict))
print(nx.info(G_full))
print(nx.info(G_isNOTinDict))
###Output
Name:
Type: Graph
Number of nodes: 139584
Number of edges: 1379904
Average degree: 19.7717
###Markdown
Ajout d'attributs
###Code
# Calcul du degres des noeuds (graphe non orienté)
att_deg = G_isinDict.degree()
nx.set_node_attributes(G_isinDict, 'degres', att_deg)
# Calcul du degres des noeuds (graphe non orienté)
att_deg = G_isNOTinDict.degree()
nx.set_node_attributes(G_isNOTinDict, 'degres', att_deg)
att_cluster = nx.clustering(G_isNOTinDict)
type(att_deg)
nodeliste = G.nodes().to
###Output
_____no_output_____
###Markdown
Preparation des données Graphx
###Code
df_nodes_users = pd.DataFrame(columns = ['USERS'])
df_nodes_users["USERS"] = df_nodes.apply( lambda row :( str(row["NODE"])+",N"+str(row["NODE"])+",n"+str(row["NODE"])) , axis = 1)
df_nodes_users.head(10)
df_nodes_users.drop(["NODE"], axis = 1, inplace = True)
df_nodes_users.to_csv('/home/pb19121/datagraphx/friendsterusers.txt', sep = ' ',index = False, header= False)
df_union_nodes.to_csv('/home/pb19121/datagraphx/friendsterallusers.txt', sep = ' ',index = False, header= False)
df_edges_isnode.reindex(['TO', 'FROM'], axis=1).to_csv('/home/pb19121/datagraphx/friendsterfollowersReverse.txt', sep = ' ',index = False, header= False)
df_edges_isnode.to_csv('/home/pb19121/datagraphx/friendsterfollowers.txt', sep = ' ',index = False, header= False)
df_edges.reindex(['TO', 'FROM'], axis=1).to_csv('/home/pb19121/datagraphx/friendsterallfollowers.txt', sep = ' ',index = False, header= False)
###Output
_____no_output_____ |
book/3-linear-scikitlearn.ipynb | ###Markdown
Linear Fitting with SciKit Learn========================= Overview Questions How can I fit a linear equation using scikitlearn? How can I fit a linear equation with multiple variables using scikitlearn? Objectives: Slice a pandas dataframe to get `X` and `Y` values and convert them to NumPy Arrays. Use the `LinearRegression` model in scikitlearn to perform a linear fit. Keypoints: You must import and create the model you want to use from scikitlearn. SciKitLearn models require `X` and `Y` values that are at least two dimensional. Use `.reshape` on your NumPy arrays to make sure they are the correct dimension. Fit SciKitLearn models by giving them data and using the `fit` method. Use the `predict` method after fitting to make predictions In this lesson, we are going to use the [scikitlearn](https://scikit-learn.org/) library to do a linear fit. When it comes to fitting equations in Python, you will encounter a lot of options. In this workshop, we will work with the library scikitlearn, and later the library statsmodels.Another library you might encounter when doing fitting is [scipy](https://www.scipy.org/). While functionalities available from theses libraries can be similar in some cases, each has different strengths. Scipy is used for scientific applications. Statsmodels provides rigourous statics while scipy has a lot of functionalities around science and engineering applications. SciKitLearn, which we use in this section, is geared toward machine learning.For this lesson, we will just be doing linear fits. However, scikitlearrn has many different models built in, some of which we will see in the next session. SciKitLearn might not be the easiest library to use for analysis depending on your use case. However, we start with it here so we can better understand how to use the library for more complicated examples and models. [API of scikitlearn](https://scikit-learn.org/stable/developers/develop.html)
###Code
ls data
###Output
PubChemElements_all.csv potts_table1.csv potts_table2.csv
delaney-processed.csv potts_table1_clean.csv [1m[36mrxnpredict[m[m/
###Markdown
We start by reading in the data we worked to clean during the last session. We named this file `potts_table1_clean.csv`. Now that the data is clean, we should expect that it will load correctly with correct data types, and that we shouldn't have too many problems working with it.
###Code
import os
import pandas as pd
path = os.path.join("data", "potts_table1_clean.csv")
df = pd.read_csv(path)
df.head()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 37 entries, 0 to 36
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Compound 37 non-null object
1 log P 34 non-null float64
2 pi 37 non-null float64
3 Hd 37 non-null float64
4 Ha 37 non-null float64
5 MV 36 non-null float64
6 R_2 37 non-null float64
7 log K_oct 36 non-null float64
8 log K_hex 30 non-null float64
9 log K_hep 24 non-null float64
dtypes: float64(9), object(1)
memory usage: 3.0+ KB
###Markdown
Linear Regression using SciKitLearnSciKitLearn has a number of [linear models](https://scikit-learn.org/stable/modules/classes.html?highlight=linear_modelmodule-sklearn.linear_model) available. The purpose of this workshop is not to cover what linear models exist or in what context to use them. Rather, we are covering how to use these models in Python. We will do a simple linear fit using the ordinary least squares method. Preparing Data for Fitting with SciKitLearnBefore we do the fit, we will need to make sure our data is ready. Although this data is clean, we still have some missing `NaN` values. In the paper, the authors perform a multiple linear regression, meaning that they fit a line based on many variables. We will do this, but we will first fit a simple linear model with one variable.From our initial exploration with seaborn, we are going to decide to fit `log P` as a function of `MV`. SciKitLearn Models are going to require us to pass NumPy arrays to them as data. When we do this fit, it is also necessary to drop any values which are `NaN`. Check your understanding Prepare a dataframe which can be used for fitting. One approach to this would be to first slice the dataframe to have only the columns of interest, then to use `dropna` on the dataframe to drop the uneeded rows. Save your prepared data in a variable called `fit_data`. ```{admonition} Solution:class: dropdown```pythonfit_data = df[["log P", "MV"]]fit_data = fit_data.dropna(axis=0, how="any")```
###Code
fit_data = df[["log P", "MV"]]
fit_data = fit_data.dropna(axis=0, how="any")
fit_data.head()
###Output
_____no_output_____
###Markdown
```{note}We could have alternatively used the `dropna` function on the original dataframe with an additional argument of `subset` which says to only consider our two columns of interest. Then, we would have had a dataframe called `fit_data` which retained all of the columns, but had a value for every cell in both the `log P` and `MV` columns.```pythonfit_data = df.dropna(subset=["log P", "MV"], how="any")``````{admonition} When to use dropna:class: tipIt is important that when you use `dropna` on data you intend to fit that this is done at the same time with the argument `how=any`. This is because it is imperative that the values of X and Y match with one another and are of the same length.```
###Code
X = fit_data["MV"].to_numpy()
Y = fit_data["log P"].to_numpy()
###Output
_____no_output_____
###Markdown
SciKitLearn ModelsNow that we have prepared our X and Y variables, let's see how we would do a fit using scikitlearn.Typically when doing fitting with scikitlearn, the first thing you will do is to import the type of model you want to use. In our case, we are importing a `LinearRegression` model. This type of model performs ordinary least squares fitting. You will first import the model, then you will create a model object. After creation, you will give data to the model and tell it to perform a fit. Your model can then be used to make predictions.
###Code
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
Now that you have imported the model, you can read more about it either on the [SciKitLearn](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.htmlsklearn.linear_model.LinearRegression) website, or by using the built-in Python `help` function.
###Code
help(LinearRegression)
###Output
Help on class LinearRegression in module sklearn.linear_model._base:
class LinearRegression(sklearn.base.MultiOutputMixin, sklearn.base.RegressorMixin, LinearModel)
| LinearRegression(*, fit_intercept=True, normalize=False, copy_X=True, n_jobs=None, positive=False)
|
| Ordinary least squares Linear Regression.
|
| LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
| to minimize the residual sum of squares between the observed targets in
| the dataset, and the targets predicted by the linear approximation.
|
| Parameters
| ----------
| fit_intercept : bool, default=True
| Whether to calculate the intercept for this model. If set
| to False, no intercept will be used in calculations
| (i.e. data is expected to be centered).
|
| normalize : bool, default=False
| This parameter is ignored when ``fit_intercept`` is set to False.
| If True, the regressors X will be normalized before regression by
| subtracting the mean and dividing by the l2-norm.
| If you wish to standardize, please use
| :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
| on an estimator with ``normalize=False``.
|
| copy_X : bool, default=True
| If True, X will be copied; else, it may be overwritten.
|
| n_jobs : int, default=None
| The number of jobs to use for the computation. This will only provide
| speedup for n_targets > 1 and sufficient large problems.
| ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
| ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
| for more details.
|
| positive : bool, default=False
| When set to ``True``, forces the coefficients to be positive. This
| option is only supported for dense arrays.
|
| .. versionadded:: 0.24
|
| Attributes
| ----------
| coef_ : array of shape (n_features, ) or (n_targets, n_features)
| Estimated coefficients for the linear regression problem.
| If multiple targets are passed during the fit (y 2D), this
| is a 2D array of shape (n_targets, n_features), while if only
| one target is passed, this is a 1D array of length n_features.
|
| rank_ : int
| Rank of matrix `X`. Only available when `X` is dense.
|
| singular_ : array of shape (min(X, y),)
| Singular values of `X`. Only available when `X` is dense.
|
| intercept_ : float or array of shape (n_targets,)
| Independent term in the linear model. Set to 0.0 if
| `fit_intercept = False`.
|
| See Also
| --------
| Ridge : Ridge regression addresses some of the
| problems of Ordinary Least Squares by imposing a penalty on the
| size of the coefficients with l2 regularization.
| Lasso : The Lasso is a linear model that estimates
| sparse coefficients with l1 regularization.
| ElasticNet : Elastic-Net is a linear regression
| model trained with both l1 and l2 -norm regularization of the
| coefficients.
|
| Notes
| -----
| From the implementation point of view, this is just plain Ordinary
| Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares
| (scipy.optimize.nnls) wrapped as a predictor object.
|
| Examples
| --------
| >>> import numpy as np
| >>> from sklearn.linear_model import LinearRegression
| >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
| >>> # y = 1 * x_0 + 2 * x_1 + 3
| >>> y = np.dot(X, np.array([1, 2])) + 3
| >>> reg = LinearRegression().fit(X, y)
| >>> reg.score(X, y)
| 1.0
| >>> reg.coef_
| array([1., 2.])
| >>> reg.intercept_
| 3.0000...
| >>> reg.predict(np.array([[3, 5]]))
| array([16.])
|
| Method resolution order:
| LinearRegression
| sklearn.base.MultiOutputMixin
| sklearn.base.RegressorMixin
| LinearModel
| sklearn.base.BaseEstimator
| builtins.object
|
| Methods defined here:
|
| __init__(self, *, fit_intercept=True, normalize=False, copy_X=True, n_jobs=None, positive=False)
| Initialize self. See help(type(self)) for accurate signature.
|
| fit(self, X, y, sample_weight=None)
| Fit linear model.
|
| Parameters
| ----------
| X : {array-like, sparse matrix} of shape (n_samples, n_features)
| Training data
|
| y : array-like of shape (n_samples,) or (n_samples, n_targets)
| Target values. Will be cast to X's dtype if necessary
|
| sample_weight : array-like of shape (n_samples,), default=None
| Individual weights for each sample
|
| .. versionadded:: 0.17
| parameter *sample_weight* support to LinearRegression.
|
| Returns
| -------
| self : returns an instance of self.
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| __abstractmethods__ = frozenset()
|
| ----------------------------------------------------------------------
| Data descriptors inherited from sklearn.base.MultiOutputMixin:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.base.RegressorMixin:
|
| score(self, X, y, sample_weight=None)
| Return the coefficient of determination :math:`R^2` of the
| prediction.
|
| The coefficient :math:`R^2` is defined as :math:`(1 - \frac{u}{v})`,
| where :math:`u` is the residual sum of squares ``((y_true - y_pred)
| ** 2).sum()`` and :math:`v` is the total sum of squares ``((y_true -
| y_true.mean()) ** 2).sum()``. The best possible score is 1.0 and it
| can be negative (because the model can be arbitrarily worse). A
| constant model that always predicts the expected value of `y`,
| disregarding the input features, would get a :math:`R^2` score of
| 0.0.
|
| Parameters
| ----------
| X : array-like of shape (n_samples, n_features)
| Test samples. For some estimators this may be a precomputed
| kernel matrix or a list of generic objects instead with shape
| ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
| is the number of samples used in the fitting for the estimator.
|
| y : array-like of shape (n_samples,) or (n_samples, n_outputs)
| True values for `X`.
|
| sample_weight : array-like of shape (n_samples,), default=None
| Sample weights.
|
| Returns
| -------
| score : float
| :math:`R^2` of ``self.predict(X)`` wrt. `y`.
|
| Notes
| -----
| The :math:`R^2` score used when calling ``score`` on a regressor uses
| ``multioutput='uniform_average'`` from version 0.23 to keep consistent
| with default value of :func:`~sklearn.metrics.r2_score`.
| This influences the ``score`` method of all the multioutput
| regressors (except for
| :class:`~sklearn.multioutput.MultiOutputRegressor`).
|
| ----------------------------------------------------------------------
| Methods inherited from LinearModel:
|
| predict(self, X)
| Predict using the linear model.
|
| Parameters
| ----------
| X : array-like or sparse matrix, shape (n_samples, n_features)
| Samples.
|
| Returns
| -------
| C : array, shape (n_samples,)
| Returns predicted values.
|
| ----------------------------------------------------------------------
| Methods inherited from sklearn.base.BaseEstimator:
|
| __getstate__(self)
|
| __repr__(self, N_CHAR_MAX=700)
| Return repr(self).
|
| __setstate__(self, state)
|
| get_params(self, deep=True)
| Get parameters for this estimator.
|
| Parameters
| ----------
| deep : bool, default=True
| If True, will return the parameters for this estimator and
| contained subobjects that are estimators.
|
| Returns
| -------
| params : dict
| Parameter names mapped to their values.
|
| set_params(self, **params)
| Set the parameters of this estimator.
|
| The method works on simple estimators as well as on nested objects
| (such as :class:`~sklearn.pipeline.Pipeline`). The latter have
| parameters of the form ``<component>__<parameter>`` so that it's
| possible to update each component of a nested object.
|
| Parameters
| ----------
| **params : dict
| Estimator parameters.
|
| Returns
| -------
| self : estimator instance
| Estimator instance.
###Markdown
Before we do the fit, we first create the model. When we create this model, we specify settings for it such as if we want the linear model to have an intercept. It will have one by default, but if you wanted to do an ordinary least squares fit without an intercept, you would specify it when you create the model.After we create the model, we give it data and call the `fit` function. Then, the model will contain information about coefficients and an intercept.We will fit an equation for `log P` based on some other variable in the data frame. we have to get our data as numpy arrays. Next, we will create the linear model using `LinearRegression()`.To perform the fit, we do use the `fit` function on the linear model we created. As we have it now, it will not quite work. The error message is shown below for discussion.
###Code
linear_model = LinearRegression()
linear_model.fit(X, Y)
###Output
_____no_output_____
###Markdown
It is at this point that the array's **shape** becomes important. Typically if you print the shape of a pandas Series or a one dimensional slice of a NumPy array, you will see something like `(n, )`. For example,
###Code
X.shape
###Output
_____no_output_____
###Markdown
This array is one dimensional, mean that its shape is specified with only one number. You can sort of think of this as a vector. Even though the shape would not seem different with a shape of `(33, 1)`, it is necessary for fitting with scikitlearn.You will see that scikitlearn even tells us this in the error function.```Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.```In NumPy, when we use a `-1` in a dimension it basically translates into whatever number is required in order for the array to have the other specified dimensions. Otherwise, we would have to specify the number of rows for each reshape command. We don't really know or care about the number of rows, what's important for us in that the data be in a single column.
###Code
X = X.reshape(-1, 1)
X.shape
Y = Y.reshape(-1, 1)
linear_model.fit(X, Y)
###Output
_____no_output_____
###Markdown
The `linear_model` variable now contains our linear fit. We can check our fit coefficient and intercept.
###Code
print(f"The intercept is {linear_model.coef_} and the intercept is {linear_model.intercept_}.")
###Output
The intercept is [[0.02705618]] and the intercept is [-7.24687081].
###Markdown
Check your understanding Perform a second linear fit for `logP` vs `pi`. Save your variables as `X_pi`, `Y_pi`, and `linear_pi`. ```{admonition} Solution:class: dropdown```pythonfit_data_pi = df.dropna(subset=["log P", "pi"], how="any")linear_pi = LinearRegression()X_pi = fit_data_pi["pi"].to_numpy().reshape(-1, 1)Y_pi = fit_data_pi["log P"].to_numpy().reshape(-1, 1)linear_pi.fit(X_pi, Y_pi)print(f"The intercept is {linear_pi.coef_} and the intercept is {linear_pi.intercept_}.")```
###Code
fit_data_pi = df.dropna(subset=["log P", "pi"], how="any")
linear_pi = LinearRegression()
X_pi = fit_data_pi["pi"].to_numpy().reshape(-1, 1)
Y_pi = fit_data_pi["log P"].to_numpy().reshape(-1, 1)
linear_pi.fit(X_pi, Y_pi)
print(f"The intercept is {linear_pi.coef_} and the intercept is {linear_pi.intercept_}.")
###Output
The intercept is [[0.06416467]] and the intercept is [-5.59119251].
###Markdown
We might next be interested in how good these fits are, or fit metrics. We might evaluate that using an `R2` value. In scikitlearn for the linear model, this is accessible through `model.score`.
###Code
r2 = linear_model.score(X, Y)
print(f"The r2 score for log P vs MV is {r2}")
r2_pi = linear_pi.score(X_pi, Y_pi)
print(f"The r2 score for log P vs pi is {r2_pi}")
###Output
The r2 score for log P vs pi is 0.000529506196607854
###Markdown
Making Predictions
###Code
fit_data["model_prediction"] = linear_model.predict(X)
fit_data.head()
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib notebook
sns.set_theme(font_scale=1.25)
g = sns.lmplot(x="log P", y="model_prediction", data=fit_data)
g.ax.annotate(rf"$r^2$={r2_pi:.3f}", xy=(-6, -4))
g.tight_layout()
import numpy as np
# Predict for single made up value
linear_model.predict(np.array([0.75]).reshape(1, -1))
###Output
_____no_output_____
###Markdown
Multiple Linear Regression
###Code
fit_data = df[["log P", "MV", "pi", "Ha", "Hd", "R_2"]].copy()
fit_data.head()
fit_data.dropna(axis=0, inplace=True)
X = fit_data[["MV", "pi", "Ha", "Hd", "R_2"]].to_numpy()
Y = fit_data["log P"].to_numpy()
multiple_reg = LinearRegression().fit(X, Y)
print(multiple_reg.coef_)
print(multiple_reg.intercept_)
r2_m = multiple_reg.score(X, Y)
fit_data["model_prediction"] = multiple_reg.predict(X)
g = sns.lmplot(x="log P", y="model_prediction", data=fit_data)
g.ax.annotate(rf"$r^2$={r2_m:.3f}", xy=(-6, -4))
g.tight_layout()
g.savefig("session3.png", dpi=250)
###Output
_____no_output_____ |
Day_6/K_Nearest_Neighbors.ipynb | ###Markdown
K Nearest Neighbors Imports
###Code
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn import neighbors
###Output
_____no_output_____
###Markdown
Load Data
###Code
iris = datasets.load_iris()
###Output
_____no_output_____
###Markdown
Build Model Assign X and y to hold data and target
###Code
X = iris.data
y = iris.target
###Output
_____no_output_____
###Markdown
Split X and y into training and test sets
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state = 2)
###Output
_____no_output_____
###Markdown
Aside: Viualize data
###Code
# Shows the different classes present in training data
plt.scatter(X_train[:,0],X_train[:,1], c=y_train)
# Shows the points that we are trying to classify in "magenta", notice how they are scattered throughout?
# Our model will classify each of those magenta dots as belonging to one of the existing classes,
# based on the colored points around the point being tested.
plt.scatter(X_test[:,0], X_test[:,1], c='m')
plt.show()
###Output
_____no_output_____
###Markdown
Create Model
###Code
m = neighbors.KNeighborsClassifier(n_neighbors=5)
###Output
_____no_output_____
###Markdown
Fit model to training data
###Code
m.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Model Score
###Code
#Higher score is good! :)
m.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Make predictions!
###Code
predictions = m.predict(X_test)
###Output
_____no_output_____
###Markdown
"Eyeball" the accuracy of model.
###Code
print("predictions:", predictions)
print("actual clss:", y_test)
#I adjusted the model to 5 nearest neighbors, now its a perfect match!
###Output
_____no_output_____
###Markdown
Classification Report
###Code
classification_report = metrics.classification_report(y_test, predictions)
print(classification_report)
###Output
_____no_output_____
###Markdown
Confusion Matrix
###Code
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
print(confusion_matrix)
###Output
_____no_output_____ |
Parameter_Search_Graph_5years.ipynb | ###Markdown
Parameter Search
###Code
def ParameterSearch(X=None, Y=None, dateStart='2018-04-14', dateEnd='2018-06-01', Team_A=None, Team_B=None,
param_name=None, param_list=None, model=None, title=None, **other_setting):
total_acc = []
max_acc = -1
max_param_val = -1
total_len = len(param_list)
count_prog = 0
for val in param_list:
other_setting[param_name] = val
model.set_params(**other_setting)
model.fit(X, Y.values.ravel())
modelsLUT = {
'model': model
}
ret_acc = gamePrediction(dfFile, modelsLUT, dateStart, dateEnd, period, Team_A, Team_B, featureSel, 0)
for model_name in modelsLUT:
total_acc.append(ret_acc[model_name])
if(total_acc[-1] > max_acc):
max_acc = total_acc[-1]
max_param_val = val
count_prog += 1
print('Progress = {x}%'.format(x = count_prog/total_len*100))
print(f'max_acc = {max_acc}, with {param_name} = {max_param_val}')
plt.plot(param_list, total_acc)
plt.xlabel(f' {title} ')
plt.ylabel(' Accuracy ')
plt.show()
###Output
_____no_output_____
###Markdown
Logistic Regressor
###Code
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='C'
, param_list=[x/100 for x in range(1, 100, 5)]+[x for x in range(1, 1000, 50)], model=LogisticRegression()
, title = 'C', max_iter=300)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='max_iter'
, param_list=[0.01, 0.1, 1, 10, 100, 1000, 10000], model=LogisticRegression()
, title = 'max_iter', C=0.01)
###Output
Progress = 14.285714285714285%
Progress = 28.57142857142857%
Progress = 42.857142857142854%
Progress = 57.14285714285714%
Progress = 71.42857142857143%
Progress = 85.71428571428571%
Progress = 100.0%
max_acc = 0.7088607594936709, with max_iter = 10
###Markdown
Result : C = 0.01, max_iter = 10 SVM
###Code
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='C'
, param_list=[0.01, 0.1, 1, 10, 100, 1000, 10000], model=SVC()
, title='C, kernal=linear', kernel='linear', probability=True)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='C'
, param_list=[x/100 for x in range(1, 200, 5)], model=SVC()
, title='C, kernal=linear', kernel='linear', probability=True)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='C'
, param_list=[0.01, 0.1, 1, 10, 100, 1000, 10000]
, model=SVC(), title='C, kernal=rbf'
, kernel='rbf', gamma=1, probability=True)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='gamma'
, param_list=[0.01, 0.1, 1, 10, 100, 1000, 10000], model=SVC()
, title='gamma, kernal=rbf', C=0.01, kernel='rbf', probability=True)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='gamma'
, param_list=[x/100 for x in range(1, 100, 5)], model=SVC()
, title='gamma, kernal=rbf', C=0.01, kernel='rbf', probability=True)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='C'
, param_list=[0.01, 0.1, 1, 10, 100, 1000, 10000]
, model=SVC(), title='C, kernal=rbf'
, kernel='rbf', gamma=0.11, probability=True)
###Output
Progress = 14.285714285714285%
Progress = 28.57142857142857%
Progress = 42.857142857142854%
Progress = 57.14285714285714%
Progress = 71.42857142857143%
Progress = 85.71428571428571%
Progress = 100.0%
max_acc = 0.6835443037974683, with C = 0.01
###Markdown
Result = kernel='linear', C = 0.06 XGBClassifier
###Code
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='max_depth'
, param_list=[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31], model=xgb.XGBClassifier()
, title='max_depth', learning_rate=0.1, n_estimators=100)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='n_estimators'
, param_list=[1, 10, 100, 1000, 10000], model=xgb.XGBClassifier()
, title='n_estimators', learning_rate=0.1, max_depth=5)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='n_estimators'
, param_list=range(10, 500, 10), model=xgb.XGBClassifier()
, title='n_estimators', learning_rate=0.1, max_depth=5)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='max_depth'
, param_list=[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31], model=xgb.XGBClassifier()
, title='max_depth', learning_rate=0.1, n_estimators=230)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='learning_rate'
, param_list=[0.01, 0.1, 1, 10, 100, 1000, 10000], model=xgb.XGBClassifier()
, title='learning_rate', max_depth=5, n_estimators=230)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='learning_rate'
, param_list=[x/100 for x in range(1, 100, 5)], model=xgb.XGBClassifier()
, title='learning_rate', max_depth=5, n_estimators=230)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='max_depth'
, param_list=[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31], model=xgb.XGBClassifier()
, title='max_depth', learning_rate=0.06, n_estimators=230)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='n_estimators'
, param_list=range(10, 500, 10), model=xgb.XGBClassifier()
, title='n_estimators', learning_rate=0.06, max_depth=3)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='max_depth'
, param_list=[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31], model=xgb.XGBClassifier()
, title='max_depth', learning_rate=0.06, n_estimators=270)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='learning_rate'
, param_list=[0.01, 0.1, 1, 10, 1000, 10000], model=xgb.XGBClassifier()
, title='learning_rate', max_depth=3, n_estimators=270)
ParameterSearch(X, Y, dateStart='2018-04-14', dateEnd='2018-06-01'
, Team_A=None, Team_B=None, param_name='learning_rate'
, param_list=range(1000, 3000, 50), model=xgb.XGBClassifier()
, title='learning_rate', max_depth=3, n_estimators=270)
###Output
/home/coslate/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.
if diff:
|
scripts/d21-en/pytorch/chapter_natural-language-processing-pretraining/word-embedding-dataset.ipynb | ###Markdown
The Dataset for Pretraining Word Embedding:label:`sec_word2vec_data`In this section, we will introduce how to preprocess a dataset withnegative sampling :numref:`sec_approx_train` and load into minibatches forword2vec training. The dataset we use is [Penn Tree Bank (PTB)]( https://catalog.ldc.upenn.edu/LDC99T42), which is a small but commonly-used corpus. It takes samples from Wall Street Journal articles and includes training sets, validation sets, and test sets.First, import the packages and modules required for the experiment.
###Code
import math
import os
import random
import torch
from d2l import torch as d2l
###Output
_____no_output_____
###Markdown
Reading and Preprocessing the DatasetThis dataset has already been preprocessed. Each line of the dataset acts as a sentence. All the words in a sentence are separated by spaces. In the word embedding task, each word is a token.
###Code
#@save
d2l.DATA_HUB['ptb'] = (d2l.DATA_URL + 'ptb.zip',
'319d85e578af0cdc590547f26231e4e31cdf1e42')
#@save
def read_ptb():
data_dir = d2l.download_extract('ptb')
with open(os.path.join(data_dir, 'ptb.train.txt')) as f:
raw_text = f.read()
return [line.split() for line in raw_text.split('\n')]
sentences = read_ptb()
f'# sentences: {len(sentences)}'
###Output
Downloading ../data/ptb.zip from http://d2l-data.s3-accelerate.amazonaws.com/ptb.zip...
###Markdown
Next we build a vocabulary with words appeared not greater than 10 times mapped into a "<unk>" token. Note that the preprocessed PTB data also contains "<unk>" tokens presenting rare words.
###Code
vocab = d2l.Vocab(sentences, min_freq=10)
f'vocab size: {len(vocab)}'
###Output
_____no_output_____
###Markdown
SubsamplingIn text data, there are generally some words that appear at high frequencies, such "the", "a", and "in" in English. Generally speaking, in a context window, it is better to train the word embedding model when a word (such as "chip") and a lower-frequency word (such as "microprocessor") appear at the same time, rather than when a word appears with a higher-frequency word (such as "the"). Therefore, when training the word embedding model, we can perform subsampling on the words :cite:`Mikolov.Sutskever.Chen.ea.2013`. Specifically, each indexed word $w_i$ in the dataset will drop out at a certain probability. The dropout probability is given as:$$ P(w_i) = \max\left(1 - \sqrt{\frac{t}{f(w_i)}}, 0\right),$$Here, $f(w_i)$ is the ratio of the instances of word $w_i$ to the total number of words in the dataset, and the constant $t$ is a hyperparameter (set to $10^{-4}$ in this experiment). As we can see, it is only possible to drop out the word $w_i$ in subsampling when $f(w_i) > t$. The higher the word's frequency, the higher its dropout probability.
###Code
#@save
def subsampling(sentences, vocab):
# Map low frequency words into <unk>
sentences = [[vocab.idx_to_token[vocab[tk]] for tk in line]
for line in sentences]
# Count the frequency for each word
counter = d2l.count_corpus(sentences)
num_tokens = sum(counter.values())
# Return True if to keep this token during subsampling
def keep(token):
return (random.uniform(0, 1) < math.sqrt(
1e-4 / counter[token] * num_tokens))
# Now do the subsampling
return [[tk for tk in line if keep(tk)] for line in sentences]
subsampled = subsampling(sentences, vocab)
###Output
_____no_output_____
###Markdown
Compare the sequence lengths before and after sampling, we can see subsampling significantly reduced the sequence length.
###Code
d2l.set_figsize()
d2l.plt.hist([[len(line) for line in sentences],
[len(line) for line in subsampled]])
d2l.plt.xlabel('# tokens per sentence')
d2l.plt.ylabel('count')
d2l.plt.legend(['origin', 'subsampled']);
###Output
_____no_output_____
###Markdown
For individual tokens, the sampling rate of the high-frequency word "the" is less than 1/20.
###Code
def compare_counts(token):
return (f'# of "{token}": '
f'before={sum([line.count(token) for line in sentences])}, '
f'after={sum([line.count(token) for line in subsampled])}')
compare_counts('the')
###Output
_____no_output_____
###Markdown
But the low-frequency word "join" is completely preserved.
###Code
compare_counts('join')
###Output
_____no_output_____
###Markdown
Last, we map each token into an index to construct the corpus.
###Code
corpus = [vocab[line] for line in subsampled]
corpus[0:3]
###Output
_____no_output_____
###Markdown
Loading the DatasetNext we read the corpus with token indicies into data batches for training. Extracting Central Target Words and Context WordsWe use words with a distance from the central target word not exceeding the context window size as the context words of the given center target word. The following definition function extracts all the central target words and their context words. It uniformly and randomly samples an integer to be used as the context window size between integer 1 and the `max_window_size` (maximum context window).
###Code
#@save
def get_centers_and_contexts(corpus, max_window_size):
centers, contexts = [], []
for line in corpus:
# Each sentence needs at least 2 words to form a "central target word
# - context word" pair
if len(line) < 2:
continue
centers += line
for i in range(len(line)): # Context window centered at i
window_size = random.randint(1, max_window_size)
indices = list(
range(max(0, i - window_size),
min(len(line), i + 1 + window_size)))
# Exclude the central target word from the context words
indices.remove(i)
contexts.append([line[idx] for idx in indices])
return centers, contexts
###Output
_____no_output_____
###Markdown
Next, we create an artificial dataset containing two sentences of 7 and 3 words, respectively. Assume the maximum context window is 2 and print all the central target words and their context words.
###Code
tiny_dataset = [list(range(7)), list(range(7, 10))]
print('dataset', tiny_dataset)
for center, context in zip(*get_centers_and_contexts(tiny_dataset, 2)):
print('center', center, 'has contexts', context)
###Output
dataset [[0, 1, 2, 3, 4, 5, 6], [7, 8, 9]]
center 0 has contexts [1]
center 1 has contexts [0, 2]
center 2 has contexts [0, 1, 3, 4]
center 3 has contexts [2, 4]
center 4 has contexts [3, 5]
center 5 has contexts [3, 4, 6]
center 6 has contexts [5]
center 7 has contexts [8, 9]
center 8 has contexts [7, 9]
center 9 has contexts [7, 8]
###Markdown
We set the maximum context window size to 5. The following extracts all the central target words and their context words in the dataset.
###Code
all_centers, all_contexts = get_centers_and_contexts(corpus, 5)
f'# center-context pairs: {len(all_centers)}'
###Output
_____no_output_____
###Markdown
Negative SamplingWe use negative sampling for approximate training. For a central and context word pair, we randomly sample $K$ noise words ($K=5$ in the experiment). According to the suggestion in the Word2vec paper, the noise word sampling probability $P(w)$ is the ratio of the word frequency of $w$ to the total word frequency raised to the power of 0.75 :cite:`Mikolov.Sutskever.Chen.ea.2013`.We first define a class to draw a candidate according to the sampling weights. It caches a 10000 size random number bank instead of calling `random.choices` every time.
###Code
#@save
class RandomGenerator:
"""Draw a random int in [0, n] according to n sampling weights."""
def __init__(self, sampling_weights):
self.population = list(range(len(sampling_weights)))
self.sampling_weights = sampling_weights
self.candidates = []
self.i = 0
def draw(self):
if self.i == len(self.candidates):
self.candidates = random.choices(self.population,
self.sampling_weights, k=10000)
self.i = 0
self.i += 1
return self.candidates[self.i - 1]
generator = RandomGenerator([2, 3, 4])
[generator.draw() for _ in range(10)]
#@save
def get_negatives(all_contexts, corpus, K):
counter = d2l.count_corpus(corpus)
sampling_weights = [counter[i]**0.75 for i in range(len(counter))]
all_negatives, generator = [], RandomGenerator(sampling_weights)
for contexts in all_contexts:
negatives = []
while len(negatives) < len(contexts) * K:
neg = generator.draw()
# Noise words cannot be context words
if neg not in contexts:
negatives.append(neg)
all_negatives.append(negatives)
return all_negatives
all_negatives = get_negatives(all_contexts, corpus, 5)
###Output
_____no_output_____
###Markdown
Reading into BatchesWe extract all central target words `all_centers`, and the context words `all_contexts` and noise words `all_negatives` of each central target word from the dataset. We will read them in random minibatches.In a minibatch of data, the $i^\mathrm{th}$ example includes a central word and its corresponding $n_i$ context words and $m_i$ noise words. Since the context window size of each example may be different, the sum of context words and noise words, $n_i+m_i$, will be different. When constructing a minibatch, we concatenate the context words and noise words of each example, and add 0s for padding until the length of the concatenations are the same, that is, the length of all concatenations is $\max_i n_i+m_i$(`max_len`). In order to avoid the effect of padding on the loss function calculation, we construct the mask variable `masks`, each element of which corresponds to an element in the concatenation of context and noise words, `contexts_negatives`. When an element in the variable `contexts_negatives` is a padding, the element in the mask variable `masks` at the same position will be 0. Otherwise, it takes the value 1. In order to distinguish between positive and negative examples, we also need to distinguish the context words from the noise words in the `contexts_negatives` variable. Based on the construction of the mask variable, we only need to create a label variable `labels` with the same shape as the `contexts_negatives` variable and set the elements corresponding to context words (positive examples) to 1, and the rest to 0.Next, we will implement the minibatch reading function `batchify`. Its minibatch input `data` is a list whose length is the batch size, each element of which contains central target words `center`, context words `context`, and noise words `negative`. The minibatch data returned by this function conforms to the format we need, for example, it includes the mask variable.
###Code
#@save
def batchify(data):
max_len = max(len(c) + len(n) for _, c, n in data)
centers, contexts_negatives, masks, labels = [], [], [], []
for center, context, negative in data:
cur_len = len(context) + len(negative)
centers += [center]
contexts_negatives += [context + negative + [0] * (max_len - cur_len)]
masks += [[1] * cur_len + [0] * (max_len - cur_len)]
labels += [[1] * len(context) + [0] * (max_len - len(context))]
return (torch.tensor(centers).reshape(
(-1, 1)), torch.tensor(contexts_negatives), torch.tensor(masks),
torch.tensor(labels))
###Output
_____no_output_____
###Markdown
Construct two simple examples:
###Code
x_1 = (1, [2, 2], [3, 3, 3, 3])
x_2 = (1, [2, 2, 2], [3, 3])
batch = batchify((x_1, x_2))
names = ['centers', 'contexts_negatives', 'masks', 'labels']
for name, data in zip(names, batch):
print(name, '=', data)
###Output
centers = tensor([[1],
[1]])
contexts_negatives = tensor([[2, 2, 3, 3, 3, 3],
[2, 2, 2, 3, 3, 0]])
masks = tensor([[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0]])
labels = tensor([[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0]])
###Markdown
We use the `batchify` function just defined to specify the minibatch reading method in the `DataLoader` instance. Putting All Things TogetherLast, we define the `load_data_ptb` function that read the PTB dataset and return the data iterator.
###Code
#@save
def load_data_ptb(batch_size, max_window_size, num_noise_words):
num_workers = d2l.get_dataloader_workers()
sentences = read_ptb()
vocab = d2l.Vocab(sentences, min_freq=10)
subsampled = subsampling(sentences, vocab)
corpus = [vocab[line] for line in subsampled]
all_centers, all_contexts = get_centers_and_contexts(
corpus, max_window_size)
all_negatives = get_negatives(all_contexts, corpus, num_noise_words)
class PTBDataset(torch.utils.data.Dataset):
def __init__(self, centers, contexts, negatives):
assert len(centers) == len(contexts) == len(negatives)
self.centers = centers
self.contexts = contexts
self.negatives = negatives
def __getitem__(self, index):
return (self.centers[index], self.contexts[index],
self.negatives[index])
def __len__(self):
return len(self.centers)
dataset = PTBDataset(all_centers, all_contexts, all_negatives)
data_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True,
collate_fn=batchify,
num_workers=num_workers)
return data_iter, vocab
###Output
_____no_output_____
###Markdown
Let us print the first minibatch of the data iterator.
###Code
data_iter, vocab = load_data_ptb(512, 5, 5)
for batch in data_iter:
for name, data in zip(names, batch):
print(name, 'shape:', data.shape)
break
###Output
centers shape: torch.Size([512, 1])
contexts_negatives shape: torch.Size([512, 60])
masks shape: torch.Size([512, 60])
labels shape: torch.Size([512, 60])
|
III_DataEngineer_BDSE10/1905_MachineLearning/PySpark.ipynb | ###Markdown
Pandas vs. Pyspark
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
The Pandas DataFrame Object
###Code
%%time
flight = pd.read_csv("C:/temp/data2.csv")
flight.dtypes
flight.head()
flight['TaxiOut']
flight['TaxiOut'].mean()
###Output
_____no_output_____
###Markdown
Spark DataFrame
###Code
from pyspark.sql import SparkSession
# local mode
spark = SparkSession\
.builder\
.appName("demo")\
.getOrCreate()
%%time
df = spark.read.csv("file:///C:/temp/data2.csv", header=True, inferSchema=True)
df.printSchema()
df.head()
df.select('FlightDate','TaxiOut').show()
df.select('TaxiOut').show()
%%time
df.createOrReplaceTempView('flight')
spark.sql('select avg(TaxiOut) as average from flight').show()
###Output
+------------------+
| average|
+------------------+
|15.774657031107376|
+------------------+
Wall time: 14.6 s
|
Covid19_Data_Analysis/Covid19_Data_Analysis.ipynb | ###Markdown
Covid19 Data Analysis 1. Importing the modules
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
print('Modules are imported.')
###Output
Modules are imported.
###Markdown
2.1 Importing covid19 dataset importing "Covid19_dataset.csv" from "./datasets" folder.
###Code
# Import dataset
covid19_data = pd.read_csv("C:/Users/Arvind/Desktop/Covid19_Data_Analysis/datasets/covid19_dataset.csv")
# Print the first 5 rows of dataframe
covid19_data.head()
###Output
_____no_output_____
###Markdown
Let's check the shape of the dataframe
###Code
covid19_data.shape
###Output
_____no_output_____
###Markdown
2.2 Delete the useless columns
###Code
covid19_data.drop(["Lat","Long"], axis = 1, inplace = True)
covid19_data.head()
###Output
_____no_output_____
###Markdown
2.3 Aggregating the rows by the country
###Code
covid19_data_aggregated = covid19_data.groupby("Country/Region").sum()
covid19_data_aggregated.head()
covid19_data_aggregated.shape
###Output
_____no_output_____
###Markdown
2.4 Visualizing data related to a country for example ChinaVisualization always helps for better understanding of our data.
###Code
covid19_data_aggregated.loc["China"].plot()
covid19_data_aggregated.loc["Egypt"].plot()
covid19_data_aggregated.loc["Italy"].plot()
plt.legend()
###Output
_____no_output_____
###Markdown
3. Calculating a good measureWe need to find a good measure reperestend as a number, describing the spread of the virus in a country.
###Code
covid19_data_aggregated.loc['China'][:3].plot()
###Output
_____no_output_____
###Markdown
3.1 Caculating the first derivative of the curve
###Code
# Calculating the rate of curve by finding the 1st derivative
# This plot shows the change in infection rate day by day
covid19_data_aggregated.loc['China'].diff().plot()
###Output
_____no_output_____
###Markdown
3.2 Finding maxmimum infection rate for China
###Code
# In one day 15136 has been recorded
covid19_data_aggregated.loc['China'].diff().max()
covid19_data_aggregated.loc['Italy'].diff().max()
covid19_data_aggregated.loc['Egypt'].diff().max()
###Output
_____no_output_____
###Markdown
3.3 Finding maximum infection rate for all of the countries
###Code
# List of all countries
countries = list(covid19_data_aggregated.index)
# Calculate max infection rate for each country
max_infection_rates = []
for country in countries:
max_infection_rates.append(covid19_data_aggregated.loc[country].diff().max())
# Add max infection rate column to dataframe
covid19_data_aggregated["Maximum infection rate"] = max_infection_rates
covid19_data_aggregated.head()
###Output
_____no_output_____
###Markdown
3.4 Create a new dataframe with only needed column
###Code
covid19_data_processed = pd.DataFrame(covid19_data_aggregated["Maximum infection rate"])
covid19_data_processed.head()
###Output
_____no_output_____
###Markdown
Task4: - Importing the WorldHappinessReport.csv dataset- selecting needed columns for our analysis - join the datasets - calculate the correlations as the result of our analysis 4.1 Importing the dataset
###Code
happiness_report_data = pd.read_csv("C:/Users/Arvind/Desktop/Covid19_Data_Analysis/datasets/worldwide_happiness_report.csv")
happiness_report_data.head()
###Output
_____no_output_____
###Markdown
4.2 Delete the useless columns
###Code
drop = ["Score","Overall rank","Generosity", "Perceptions of corruption"]
happiness_report_data.drop(drop, axis = 1, inplace = True)
happiness_report_data.head()
###Output
_____no_output_____
###Markdown
4.3 Changing the indices of the dataframe
###Code
happiness_report_data.set_index("Country or region", inplace = True)
happiness_report_data.head()
###Output
_____no_output_____
###Markdown
4.4 joining both dataset Corona Dataset :
###Code
covid19_data_processed.head()
covid19_data_processed.shape
###Output
_____no_output_____
###Markdown
World happiness report Dataset :
###Code
happiness_report_data.head()
happiness_report_data.shape
# Use inner join to join the two datasets
# Because their rows aren't the same
data = covid19_data_processed.join(happiness_report_data, how = "inner")
data.head()
###Output
_____no_output_____
###Markdown
4.5 Correlation Matrix
###Code
data.corr()
###Output
_____no_output_____
###Markdown
5. Visualization of the resultsOur Analysis is not finished unless we visualize the results in terms figures and graphs so that everyone can understand what you get out of our analysis.
###Code
data.head()
###Output
_____no_output_____
###Markdown
5.1 Plotting GDP vs Maximum Infection Rate
###Code
x = data["GDP per capita"]
y = data["Maximum infection rate"]
y_scaled = np.log(y)
sns.scatterplot(x, y_scaled)
sns.regplot(x,y_scaled)
# The analysis shows that People who are living in developed countries are more prone to getting infected from coronavirus as compared to less developed countries
###Output
C:\Users\Arvind\anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
warnings.warn(
###Markdown
5.2 Plotting Social Support vs Maximum Infection Rate
###Code
x = data["Social support"]
y = data["Maximum infection rate"]
y_scaled = np.log(y)
sns.scatterplot(x, y_scaled)
sns.regplot(x,y_scaled)
###Output
C:\Users\Arvind\anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
warnings.warn(
###Markdown
5.3 Plotting Healthy life Expectancy vs Maximum Infection Rate
###Code
x = data["Healthy life expectancy"]
y = data["Maximum infection rate"]
y_scaled = np.log(y)
sns.scatterplot(x, y_scaled)
sns.regplot(x,y_scaled)
###Output
C:\Users\Arvind\anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
warnings.warn(
###Markdown
5.4 Plotting Freedom To Make Life Choices vs Maximum Infection Rate
###Code
x = data["Freedom to make life choices"]
y = data["Maximum infection rate"]
y_scaled = np.log(y)
sns.scatterplot(x, y_scaled)
sns.regplot(x,y_scaled)
###Output
C:\Users\Arvind\anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
warnings.warn(
|
diffpriv/ecg_train_2conv_diffpriv_ep1.ipynb | ###Markdown
1D-CNN Model for ECG Classification- The model used has 2 Conv. layers and 2 FC layers.- This code repeat running the training process and produce all kinds of data which can be given, such as data needed for drawing loss and accuracy graph through epochs, and maximum test accuracy for each run. Get permission of Google Drive access
###Code
root_path = '.'
###Output
_____no_output_____
###Markdown
File name settings
###Code
data_dir = 'mitdb'
train_name = 'train_ecg.hdf5'
test_name = 'test_ecg.hdf5'
all_name = 'all_ecg.hdf5'
model_dir = 'model'
model_name = 'conv2'
model_ext = '.pth'
csv_dir = 'csv'
csv_ext = '.csv'
csv_name = 'conv2'
csv_accs_name = 'accs_conv2'
###Output
_____no_output_____
###Markdown
Import required packages
###Code
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
import numpy as np
import pandas as pd
import h5py
from tqdm import tqdm
import matplotlib.pyplot as plt
from diffprivlib.mechanisms import Laplace
###Output
_____no_output_____
###Markdown
GPU settings
###Code
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
print(torch.cuda.get_device_name(0))
###Output
_____no_output_____
###Markdown
Define `ECG` `Dataset` class
###Code
class ECG(Dataset):
def __init__(self, mode='train'):
if mode == 'train':
with h5py.File(os.path.join(root_path, data_dir, train_name), 'r') as hdf:
self.x = hdf['x_train'][:]
self.y = hdf['y_train'][:]
elif mode == 'test':
with h5py.File(os.path.join(root_path, data_dir, test_name), 'r') as hdf:
self.x = hdf['x_test'][:]
self.y = hdf['y_test'][:]
elif mode == 'all':
with h5py.File(os.path.join(root_path, data_dir, all_name), 'r') as hdf:
self.x = hdf['x'][:]
self.y = hdf['y'][:]
else:
raise ValueError('Argument of mode should be train, test, or all.')
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return torch.tensor(self.x[idx], dtype=torch.float), torch.tensor(self.y[idx])
###Output
_____no_output_____
###Markdown
Make Batch Generator Batch sizeYou can change it if you want.
###Code
batch_size = 32
###Output
_____no_output_____
###Markdown
`DataLoader` for batch generating`torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)`
###Code
train_dataset = ECG(mode='train')
test_dataset = ECG(mode='test')
train_loader = DataLoader(train_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
###Output
_____no_output_____
###Markdown
Size check for single batch
###Code
x_train, y_train = next(iter(train_loader))
print(x_train.size())
print(y_train.size())
###Output
torch.Size([32, 1, 128])
torch.Size([32])
###Markdown
Number of total batches
###Code
total_batch = len(train_loader)
print(total_batch)
###Output
414
###Markdown
Pytorch layer modules for **Conv1D** Network `Conv1d` layer- `torch.nn.Conv1d(in_channels, out_channels, kernel_size)` `MaxPool1d` layer- `torch.nn.MaxPool1d(kernel_size, stride=None)`- Parameter `stride` follows `kernel_size`. `ReLU` layer- `torch.nn.ReLU()` `Linear` layer- `torch.nn.Linear(in_features, out_features, bias=True)` `Softmax` layer- `torch.nn.Softmax(dim=None)`- Parameter `dim` is usually set to `1`. Training process settings
###Code
run = 1
epoch = 400
lr = 0.001
epsilon = 1
min_diff = 1e-5
###Output
_____no_output_____
###Markdown
Construct 1D CNN ECG classification model
###Code
class ECGConv1D(nn.Module):
def __init__(self):
super(ECGConv1D, self).__init__()
self.conv1 = nn.Conv1d(1, 16, 7, padding=3) # 128 x 16
self.relu1 = nn.LeakyReLU()
self.pool1 = nn.MaxPool1d(2) # 64 x 16
self.conv2 = nn.Conv1d(16, 16, 5, padding=2) # 64 x 16
self.relu2 = nn.LeakyReLU()
self.pool2 = nn.MaxPool1d(2) # 32 x 16
self.linear3 = nn.Linear(32 * 16, 128)
self.relu3 = nn.LeakyReLU()
self.linear4 = nn.Linear(128, 5)
self.softmax4 = nn.Softmax(dim=1)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
# differential privacy
x_data = x.data.cpu().numpy()
bound = np.dstack((np.max(x_data, axis=0), np.min(x_data, axis=0)))
for i in range(bound.shape[0]):
for j in range(bound.shape[1]):
interval = bound[i][j]
if interval[0] - interval[1] < min_diff:
interval[0] += min_diff * 0.5
interval[1] -= min_diff * 0.5
for j in range(x_data.shape[1]):
for k in range(x_data.shape[2]):
dp = Laplace()
dp = dp.set_epsilon(epsilon)
dp = dp.set_sensitivity(bound[j][k][0] - bound[j][k][1])
for i in range(x_data.shape[0]):
x_data[i][j][k] = dp.randomise(x_data[i][j][k])
x.data = torch.tensor(x_data).requires_grad_(True).to(device)
x = x.view(-1, 32 * 16)
x = self.linear3(x)
x = self.relu3(x)
x = self.linear4(x)
x = self.softmax4(x)
return x
###Output
_____no_output_____
###Markdown
Traning function
###Code
def train(nrun, model):
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=lr)
train_losses = list()
train_accs = list()
test_losses = list()
test_accs = list()
best_test_acc = 0 # best test accuracy
for e in range(epoch):
print("Epoch {} - ".format(e+1), end='')
# train
train_loss = 0.0
correct, total = 0, 0
for _, batch in enumerate(train_loader):
x, label = batch # get feature and label from a batch
x, label = x.to(device), label.to(device) # send to device
optimizer.zero_grad() # init all grads to zero
output = model(x) # forward propagation
loss = criterion(output, label) # calculate loss
loss.backward() # backward propagation
optimizer.step() # weight update
train_loss += loss.item()
correct += torch.sum(output.argmax(dim=1) == label).item()
total += len(label)
train_losses.append(train_loss / len(train_loader))
train_accs.append(correct / total)
print("loss: {:.4f}, acc: {:.2f}%".format(train_losses[-1], train_accs[-1]*100), end=' / ')
# test
with torch.no_grad():
test_loss = 0.0
correct, total = 0, 0
for _, batch in enumerate(test_loader):
x, label = batch
x, label = x.to(device), label.to(device)
output = model(x)
loss = criterion(output, label)
test_loss += loss.item()
correct += torch.sum(output.argmax(dim=1) == label).item()
total += len(label)
test_losses.append(test_loss / len(test_loader))
test_accs.append(correct / total)
print("test_loss: {:.4f}, test_acc: {:.2f}%".format(test_losses[-1], test_accs[-1]*100))
# save model that has best validation accuracy
if test_accs[-1] > best_test_acc:
best_test_acc = test_accs[-1]
torch.save(model.state_dict(), os.path.join(root_path, model_dir, '_'.join([model_name, str(nrun), 'best']) + model_ext))
# save model for each 10 epochs
if (e + 1) % 10 == 0:
torch.save(model.state_dict(), os.path.join(root_path, model_dir, '_'.join([model_name, str(nrun), str(e+1)]) + model_ext))
return train_losses, train_accs, test_losses, test_accs
###Output
_____no_output_____
###Markdown
Training process Repeat for 10 times
###Code
best_test_accs = list()
for i in range(run):
print('Run', i+1)
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
ecgnet = ECGConv1D() # init new model
train_losses, train_accs, test_losses, test_accs = train(i, ecgnet.to(device)) # train
best_test_accs.append(max(test_accs)) # get best test accuracy
best_test_acc_epoch = np.array(test_accs).argmax() + 1
print('Best test accuracy {:.2f}% in epoch {}.'.format(best_test_accs[-1]*100, best_test_acc_epoch))
print('-' * 100)
df = pd.DataFrame({ # save model training process into csv file
'loss': train_losses,
'test_loss': test_losses,
'acc': train_accs,
'test_acc': test_accs
})
df.to_csv(os.path.join(root_path, csv_dir, '_'.join([csv_name, str(i+1)]) + csv_ext))
df = pd.DataFrame({'best_test_acc': best_test_accs}) # save best test accuracy of each run
df.to_csv(os.path.join(root_path, csv_dir, csv_accs_name + csv_ext))
###Output
Run 1
Epoch 1 - loss: 1.4663, acc: 41.82% / test_loss: 1.4075, test_acc: 48.89%
Epoch 2 - loss: 1.4083, acc: 48.94% / test_loss: 1.4236, test_acc: 47.40%
Epoch 3 - loss: 1.4148, acc: 48.49% / test_loss: 1.4119, test_acc: 48.74%
Epoch 4 - loss: 1.4099, acc: 49.09% / test_loss: 1.4112, test_acc: 49.08%
Epoch 5 - loss: 1.4264, acc: 47.51% / test_loss: 1.4097, test_acc: 49.23%
Epoch 6 - loss: 1.4201, acc: 48.23% / test_loss: 1.3970, test_acc: 50.59%
Epoch 7 - loss: 1.4198, acc: 48.34% / test_loss: 1.4181, test_acc: 48.51%
Epoch 8 - loss: 1.4112, acc: 49.21% / test_loss: 1.4052, test_acc: 49.86%
Epoch 9 - loss: 1.4205, acc: 48.27% / test_loss: 1.4241, test_acc: 47.90%
Epoch 10 - loss: 1.4392, acc: 46.38% / test_loss: 1.4298, test_acc: 47.35%
Epoch 11 - loss: 1.4187, acc: 48.48% / test_loss: 1.4195, test_acc: 48.43%
Epoch 12 - loss: 1.4256, acc: 47.87% / test_loss: 1.4206, test_acc: 48.32%
Epoch 13 - loss: 1.4326, acc: 47.12% / test_loss: 1.4191, test_acc: 48.49%
Epoch 14 - loss: 1.4392, acc: 46.49% / test_loss: 1.4408, test_acc: 46.34%
Epoch 15 - loss: 1.4254, acc: 47.88% / test_loss: 1.4179, test_acc: 48.66%
Epoch 16 - loss: 1.4346, acc: 46.95% / test_loss: 1.4150, test_acc: 48.93%
Epoch 17 - loss: 1.4412, acc: 46.30% / test_loss: 1.4439, test_acc: 46.08%
Epoch 18 - loss: 1.4347, acc: 46.96% / test_loss: 1.4213, test_acc: 48.24%
Epoch 19 - loss: 1.5004, acc: 40.38% / test_loss: 1.5005, test_acc: 40.42%
Epoch 20 - loss: 1.4951, acc: 40.95% / test_loss: 1.4926, test_acc: 41.19%
Epoch 21 - loss: 1.4762, acc: 42.82% / test_loss: 1.4228, test_acc: 48.16%
Epoch 22 - loss: 1.4253, acc: 47.92% / test_loss: 1.4175, test_acc: 48.68%
Epoch 23 - loss: 1.4403, acc: 46.39% / test_loss: 1.4583, test_acc: 44.62%
Epoch 24 - loss: 1.4618, acc: 44.27% / test_loss: 1.4335, test_acc: 47.10%
Epoch 25 - loss: 1.4447, acc: 45.96% / test_loss: 1.4426, test_acc: 46.20%
Epoch 26 - loss: 1.4366, acc: 46.76% / test_loss: 1.4457, test_acc: 45.90%
Epoch 27 - loss: 1.4421, acc: 46.27% / test_loss: 1.4317, test_acc: 47.31%
Epoch 28 - loss: 1.4369, acc: 46.76% / test_loss: 1.4507, test_acc: 45.40%
Epoch 29 - loss: 1.4499, acc: 45.49% / test_loss: 1.4669, test_acc: 43.79%
Epoch 30 - loss: 1.4393, acc: 46.53% / test_loss: 1.4391, test_acc: 46.56%
Epoch 31 - loss: 1.4401, acc: 46.45% / test_loss: 1.5109, test_acc: 39.38%
Epoch 32 - loss: 1.4392, acc: 46.54% / test_loss: 1.4213, test_acc: 48.34%
Epoch 33 - loss: 1.4291, acc: 47.57% / test_loss: 1.4206, test_acc: 48.43%
Epoch 34 - loss: 1.4256, acc: 47.90% / test_loss: 1.4509, test_acc: 45.38%
Epoch 35 - loss: 1.4724, acc: 43.24% / test_loss: 1.4679, test_acc: 43.68%
Epoch 36 - loss: 1.4575, acc: 44.72% / test_loss: 1.4257, test_acc: 47.89%
Epoch 37 - loss: 1.4328, acc: 47.19% / test_loss: 1.4390, test_acc: 46.58%
Epoch 38 - loss: 1.4621, acc: 44.26% / test_loss: 1.4392, test_acc: 46.56%
Epoch 39 - loss: 1.4574, acc: 44.72% / test_loss: 1.4392, test_acc: 46.55%
Epoch 40 - loss: 1.4324, acc: 47.23% / test_loss: 1.4259, test_acc: 47.89%
Epoch 41 - loss: 1.4415, acc: 46.32% / test_loss: 1.4448, test_acc: 45.99%
Epoch 42 - loss: 1.4550, acc: 44.97% / test_loss: 1.5006, test_acc: 40.42%
Epoch 43 - loss: 1.4562, acc: 44.85% / test_loss: 1.4398, test_acc: 46.50%
Epoch 44 - loss: 1.4478, acc: 45.70% / test_loss: 1.4604, test_acc: 44.43%
Epoch 45 - loss: 1.4502, acc: 45.44% / test_loss: 1.4233, test_acc: 48.15%
Epoch 46 - loss: 1.4385, acc: 46.62% / test_loss: 1.4236, test_acc: 48.10%
Epoch 47 - loss: 1.4728, acc: 43.20% / test_loss: 1.5071, test_acc: 39.76%
Epoch 48 - loss: 1.4839, acc: 42.08% / test_loss: 1.4713, test_acc: 43.34%
Epoch 49 - loss: 1.4549, acc: 44.99% / test_loss: 1.4318, test_acc: 47.30%
Epoch 50 - loss: 1.4510, acc: 45.36% / test_loss: 1.4439, test_acc: 46.09%
Epoch 51 - loss: 1.4444, acc: 46.05% / test_loss: 1.4511, test_acc: 45.36%
Epoch 52 - loss: 1.4548, acc: 45.01% / test_loss: 1.4606, test_acc: 44.39%
Epoch 53 - loss: 1.4993, acc: 40.54% / test_loss: 1.4886, test_acc: 41.62%
Epoch 54 - loss: 1.4899, acc: 41.47% / test_loss: 1.4530, test_acc: 45.18%
Epoch 55 - loss: 1.4711, acc: 43.37% / test_loss: 1.4875, test_acc: 41.73%
Epoch 56 - loss: 1.5009, acc: 40.38% / test_loss: 1.4991, test_acc: 40.57%
Epoch 57 - loss: 1.4698, acc: 43.50% / test_loss: 1.4629, test_acc: 44.20%
Epoch 58 - loss: 1.4564, acc: 44.84% / test_loss: 1.4491, test_acc: 45.56%
Epoch 59 - loss: 1.4664, acc: 43.83% / test_loss: 1.4640, test_acc: 44.08%
Epoch 60 - loss: 1.4804, acc: 42.43% / test_loss: 1.4427, test_acc: 46.21%
Epoch 61 - loss: 1.4432, acc: 46.16% / test_loss: 1.4360, test_acc: 46.89%
Epoch 62 - loss: 1.4432, acc: 46.15% / test_loss: 1.4475, test_acc: 45.72%
Epoch 63 - loss: 1.4553, acc: 44.94% / test_loss: 1.4311, test_acc: 47.37%
Epoch 64 - loss: 1.4765, acc: 42.84% / test_loss: 1.5340, test_acc: 37.07%
Epoch 65 - loss: 1.5220, acc: 38.27% / test_loss: 1.5364, test_acc: 36.85%
Epoch 66 - loss: 1.5083, acc: 39.65% / test_loss: 1.4818, test_acc: 42.30%
Epoch 67 - loss: 1.4713, acc: 43.35% / test_loss: 1.4557, test_acc: 44.90%
Epoch 68 - loss: 1.4717, acc: 43.31% / test_loss: 1.5169, test_acc: 38.78%
Epoch 69 - loss: 1.4508, acc: 45.39% / test_loss: 1.4661, test_acc: 43.87%
Epoch 70 - loss: 1.4570, acc: 44.79% / test_loss: 1.4899, test_acc: 41.49%
Epoch 71 - loss: 1.5056, acc: 39.92% / test_loss: 1.4760, test_acc: 42.88%
Epoch 72 - loss: 1.4845, acc: 42.02% / test_loss: 1.4980, test_acc: 40.66%
Epoch 73 - loss: 1.4933, acc: 41.16% / test_loss: 1.4753, test_acc: 42.94%
Epoch 74 - loss: 1.4635, acc: 44.13% / test_loss: 1.4480, test_acc: 45.69%
Epoch 75 - loss: 1.4697, acc: 43.51% / test_loss: 1.5182, test_acc: 38.66%
Epoch 76 - loss: 1.4957, acc: 40.90% / test_loss: 1.4569, test_acc: 44.80%
Epoch 77 - loss: 1.4703, acc: 43.45% / test_loss: 1.4761, test_acc: 42.87%
Epoch 78 - loss: 1.4802, acc: 42.46% / test_loss: 1.4668, test_acc: 43.79%
Epoch 79 - loss: 1.4784, acc: 42.65% / test_loss: 1.4441, test_acc: 46.06%
Epoch 80 - loss: 1.4821, acc: 42.27% / test_loss: 1.4941, test_acc: 41.08%
Epoch 81 - loss: 1.4816, acc: 42.33% / test_loss: 1.4740, test_acc: 43.08%
Epoch 82 - loss: 1.4815, acc: 42.34% / test_loss: 1.4530, test_acc: 45.19%
Epoch 83 - loss: 1.4868, acc: 41.80% / test_loss: 1.5220, test_acc: 38.29%
Epoch 84 - loss: 1.5052, acc: 39.95% / test_loss: 1.5028, test_acc: 40.20%
Epoch 85 - loss: 1.5130, acc: 39.18% / test_loss: 1.5336, test_acc: 37.12%
Epoch 86 - loss: 1.5421, acc: 36.27% / test_loss: 1.5320, test_acc: 37.29%
Epoch 87 - loss: 1.5050, acc: 39.97% / test_loss: 1.4766, test_acc: 42.82%
Epoch 88 - loss: 1.4879, acc: 41.69% / test_loss: 1.4640, test_acc: 44.07%
Epoch 89 - loss: 1.4923, acc: 41.25% / test_loss: 1.4696, test_acc: 43.52%
Epoch 90 - loss: 1.4562, acc: 44.86% / test_loss: 1.4675, test_acc: 43.73%
Epoch 91 - loss: 1.4597, acc: 44.51% / test_loss: 1.4563, test_acc: 44.85%
Epoch 92 - loss: 1.4591, acc: 44.57% / test_loss: 1.4489, test_acc: 45.59%
Epoch 93 - loss: 1.4484, acc: 45.63% / test_loss: 1.4412, test_acc: 46.36%
Epoch 94 - loss: 1.4750, acc: 42.97% / test_loss: 1.5308, test_acc: 37.40%
Epoch 95 - loss: 1.4618, acc: 44.30% / test_loss: 1.4547, test_acc: 45.01%
Epoch 96 - loss: 1.4429, acc: 46.18% / test_loss: 1.4297, test_acc: 47.51%
Epoch 97 - loss: 1.4297, acc: 47.52% / test_loss: 1.4335, test_acc: 47.13%
Epoch 98 - loss: 1.5115, acc: 39.34% / test_loss: 1.5295, test_acc: 37.53%
Epoch 99 - loss: 1.4649, acc: 43.99% / test_loss: 1.4487, test_acc: 45.61%
Epoch 100 - loss: 1.4705, acc: 43.43% / test_loss: 1.5228, test_acc: 38.20%
Epoch 101 - loss: 1.4798, acc: 42.49% / test_loss: 1.4455, test_acc: 45.93%
Epoch 102 - loss: 1.4494, acc: 45.54% / test_loss: 1.4505, test_acc: 45.44%
Epoch 103 - loss: 1.4918, acc: 41.30% / test_loss: 1.5221, test_acc: 38.26%
Epoch 104 - loss: 1.4993, acc: 40.56% / test_loss: 1.4966, test_acc: 40.82%
Epoch 105 - loss: 1.4857, acc: 41.91% / test_loss: 1.4506, test_acc: 45.42%
Epoch 106 - loss: 1.4371, acc: 46.76% / test_loss: 1.4292, test_acc: 47.56%
Epoch 107 - loss: 1.4427, acc: 46.22% / test_loss: 1.4409, test_acc: 46.39%
Epoch 108 - loss: 1.4598, acc: 44.51% / test_loss: 1.4591, test_acc: 44.57%
Epoch 109 - loss: 1.4805, acc: 42.43% / test_loss: 1.4811, test_acc: 42.38%
###Markdown
Print the best test accuracy of each run
###Code
for i, a in enumerate(best_test_accs):
print('Run {}: {:.2f}%'.format(i+1, a*100))
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
ax[0].plot(train_losses)
ax[0].plot(test_losses)
ax[0].set_xticks([0, 50, 100, 150, 200, 250, 300, 350, 400])
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Loss')
ax[0].grid()
ax[0].legend(['train', 'test'], loc='upper right')
ax[1].plot(np.asarray(train_accs) * 100)
ax[1].plot(np.asarray(test_accs) * 100)
ax[1].set_xticks([0, 50, 100, 150, 200, 250, 300, 350, 400])
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Accuracy (%)')
ax[1].set_ylim(top=100)
ax[1].grid()
ax[1].legend(['train', 'test'], loc='upper left')
fig.savefig('conv2_split.pdf', bbox_inches='tight')
###Output
_____no_output_____ |
udemy-ds-bc/TensorFlow_FILES/ANNs/04-Keras-Project-Exercise-Solutions.ipynb | ###Markdown
Copyright by Pierian Data Inc. Created by Jose Marcial Portilla. Keras API Project Exercise - Solutions The DataWe will be using a subset of the LendingClub DataSet obtained from Kaggle: https://www.kaggle.com/wordsforthewise/lending-club NOTE: Do not download the full zip from the link! We provide a special version of this file that has some extra feature engineering for you to do. You won't be able to follow along with the original file!LendingClub is a US peer-to-peer lending company, headquartered in San Francisco, California.[3] It was the first peer-to-peer lender to register its offerings as securities with the Securities and Exchange Commission (SEC), and to offer loan trading on a secondary market. LendingClub is the world's largest peer-to-peer lending platform. Our GoalGiven historical data on loans given out with information on whether or not the borrower defaulted (charge-off), can we build a model thatcan predict wether or nor a borrower will pay back their loan? This way in the future when we get a new potential customer we can assess whether or not they are likely to pay back the loan. Keep in mind classification metrics when evaluating the performance of your model!The "loan_status" column contains our label. Data Overview ---------There are many LendingClub data sets on Kaggle. Here is the information on this particular data set: LoanStatNew Description 0 loan_amnt The listed amount of the loan applied for by the borrower. If at some point in time, the credit department reduces the loan amount, then it will be reflected in this value. 1 term The number of payments on the loan. Values are in months and can be either 36 or 60. 2 int_rate Interest Rate on the loan 3 installment The monthly payment owed by the borrower if the loan originates. 4 grade LC assigned loan grade 5 sub_grade LC assigned loan subgrade 6 emp_title The job title supplied by the Borrower when applying for the loan.* 7 emp_length Employment length in years. Possible values are between 0 and 10 where 0 means less than one year and 10 means ten or more years. 8 home_ownership The home ownership status provided by the borrower during registration or obtained from the credit report. Our values are: RENT, OWN, MORTGAGE, OTHER 9 annual_inc The self-reported annual income provided by the borrower during registration. 10 verification_status Indicates if income was verified by LC, not verified, or if the income source was verified 11 issue_d The month which the loan was funded 12 loan_status Current status of the loan 13 purpose A category provided by the borrower for the loan request. 14 title The loan title provided by the borrower 15 zip_code The first 3 numbers of the zip code provided by the borrower in the loan application. 16 addr_state The state provided by the borrower in the loan application 17 dti A ratio calculated using the borrower’s total monthly debt payments on the total debt obligations, excluding mortgage and the requested LC loan, divided by the borrower’s self-reported monthly income. 18 earliest_cr_line The month the borrower's earliest reported credit line was opened 19 open_acc The number of open credit lines in the borrower's credit file. 20 pub_rec Number of derogatory public records 21 revol_bal Total credit revolving balance 22 revol_util Revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit. 23 total_acc The total number of credit lines currently in the borrower's credit file 24 initial_list_status The initial listing status of the loan. Possible values are – W, F 25 application_type Indicates whether the loan is an individual application or a joint application with two co-borrowers 26 mort_acc Number of mortgage accounts. 27 pub_rec_bankruptcies Number of public record bankruptcies ------- Starter Code Note: We also provide feature information on the data as a .csv file for easy lookup throughout the notebook:
###Code
import tensorflow as tf
print(tf__version)
import pandas as pd
data_info = pd.read_csv('../DATA/lending_club_info.csv',index_col='LoanStatNew')
print(data_info.loc['revol_util']['Description'])
def feat_info(col_name):
print(data_info.loc[col_name]['Description'])
feat_info('mort_acc')
###Output
Number of mortgage accounts.
###Markdown
Loading the data and other imports
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# might be needed depending on your version of Jupyter
%matplotlib inline
df = pd.read_csv('../DATA/lending_club_loan_two.csv')
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 396030 entries, 0 to 396029
Data columns (total 27 columns):
loan_amnt 396030 non-null float64
term 396030 non-null object
int_rate 396030 non-null float64
installment 396030 non-null float64
grade 396030 non-null object
sub_grade 396030 non-null object
emp_title 373103 non-null object
emp_length 377729 non-null object
home_ownership 396030 non-null object
annual_inc 396030 non-null float64
verification_status 396030 non-null object
issue_d 396030 non-null object
loan_status 396030 non-null object
purpose 396030 non-null object
title 394275 non-null object
dti 396030 non-null float64
earliest_cr_line 396030 non-null object
open_acc 396030 non-null float64
pub_rec 396030 non-null float64
revol_bal 396030 non-null float64
revol_util 395754 non-null float64
total_acc 396030 non-null float64
initial_list_status 396030 non-null object
application_type 396030 non-null object
mort_acc 358235 non-null float64
pub_rec_bankruptcies 395495 non-null float64
address 396030 non-null object
dtypes: float64(12), object(15)
memory usage: 81.6+ MB
###Markdown
Project Tasks**Complete the tasks below! Keep in mind is usually more than one way to complete the task! Enjoy**----------- Section 1: Exploratory Data Analysis**OVERALL GOAL: Get an understanding for which variables are important, view summary statistics, and visualize the data**---- **TASK: Since we will be attempting to predict loan_status, create a countplot as shown below.**
###Code
# CODE HERE
sns.countplot(x='loan_status',data=df)
###Output
_____no_output_____
###Markdown
**TASK: Create a histogram of the loan_amnt column.**
###Code
# CODE HERE
plt.figure(figsize=(12,4))
sns.distplot(df['loan_amnt'],kde=False,bins=40)
plt.xlim(0,45000)
###Output
_____no_output_____
###Markdown
**TASK: Let's explore correlation between the continuous feature variables. Calculate the correlation between all continuous numeric variables using .corr() method.**
###Code
# CODE HERE
df.corr()
###Output
_____no_output_____
###Markdown
**TASK: Visualize this using a heatmap. Depending on your version of matplotlib, you may need to manually adjust the heatmap.*** [Heatmap info](https://seaborn.pydata.org/generated/seaborn.heatmap.htmlseaborn.heatmap)* [Help with resizing](https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot)
###Code
# CODE HERE
plt.figure(figsize=(12,7))
sns.heatmap(df.corr(),annot=True,cmap='viridis')
plt.ylim(10, 0)
###Output
_____no_output_____
###Markdown
**TASK: You should have noticed almost perfect correlation with the "installment" feature. Explore this feature further. Print out their descriptions and perform a scatterplot between them. Does this relationship make sense to you? Do you think there is duplicate information here?**
###Code
# CODE HERE
feat_info('installment')
feat_info('loan_amnt')
sns.scatterplot(x='installment',y='loan_amnt',data=df,)
###Output
_____no_output_____
###Markdown
**TASK: Create a boxplot showing the relationship between the loan_status and the Loan Amount.**
###Code
# CODE HERE
sns.boxplot(x='loan_status',y='loan_amnt',data=df)
###Output
_____no_output_____
###Markdown
**TASK: Calculate the summary statistics for the loan amount, grouped by the loan_status.**
###Code
# CODE HERE
df.groupby('loan_status')['loan_amnt'].describe()
###Output
_____no_output_____
###Markdown
**TASK: Let's explore the Grade and SubGrade columns that LendingClub attributes to the loans. What are the unique possible grades and subgrades?**
###Code
# CODE HERE
sorted(df['grade'].unique())
sorted(df['sub_grade'].unique())
###Output
_____no_output_____
###Markdown
**TASK: Create a countplot per grade. Set the hue to the loan_status label.**
###Code
sns.countplot(x='grade',data=df,hue='loan_status')
###Output
_____no_output_____
###Markdown
**TASK: Display a count plot per subgrade. You may need to resize for this plot and reorder the x axis. Feel free to edit the color palette. Explore both all loans made per subgrade as well being separated based on the loan_status**
###Code
#CODE HERE
plt.figure(figsize=(12,4))
subgrade_order = sorted(df['sub_grade'].unique())
sns.countplot(x='sub_grade',data=df,order = subgrade_order,palette='coolwarm' )
# CODE HERE
plt.figure(figsize=(12,4))
subgrade_order = sorted(df['sub_grade'].unique())
sns.countplot(x='sub_grade',data=df,order = subgrade_order,palette='coolwarm' ,hue='loan_status')
###Output
_____no_output_____
###Markdown
**TASK: It looks like F and G subgrades don't get paid back that often. Isloate those and recreate the countplot just for those subgrades.**
###Code
# CODE HERE
f_and_g = df[(df['grade']=='G') | (df['grade']=='F')]
plt.figure(figsize=(12,4))
subgrade_order = sorted(f_and_g['sub_grade'].unique())
sns.countplot(x='sub_grade',data=f_and_g,order = subgrade_order,hue='loan_status')
###Output
_____no_output_____
###Markdown
**TASK: Create a new column called 'load_repaid' which will contain a 1 if the loan status was "Fully Paid" and a 0 if it was "Charged Off".**
###Code
# CODE HERE
df['loan_status'].unique()
df['loan_repaid'] = df['loan_status'].map({'Fully Paid':1,'Charged Off':0})
df[['loan_repaid','loan_status']]
###Output
_____no_output_____
###Markdown
**CHALLENGE TASK: (Note this is hard, but can be done in one line!) Create a bar plot showing the correlation of the numeric features to the new loan_repaid column. [Helpful Link](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.bar.html)**
###Code
#CODE HERE
df.corr()['loan_repaid'].sort_values().drop('loan_repaid').plot(kind='bar')
###Output
_____no_output_____
###Markdown
------ Section 2: Data PreProcessing**Section Goals: Remove or fill any missing data. Remove unnecessary or repetitive features. Convert categorical string features to dummy variables.**
###Code
df.head()
###Output
_____no_output_____
###Markdown
Missing Data**Let's explore this missing data columns. We use a variety of factors to decide whether or not they would be useful, to see if we should keep, discard, or fill in the missing data.** **TASK: What is the length of the dataframe?**
###Code
# CODE HERE
len(df)
###Output
_____no_output_____
###Markdown
**TASK: Create a Series that displays the total count of missing values per column.**
###Code
# CODE HERE
df.isnull().sum()
###Output
_____no_output_____
###Markdown
**TASK: Convert this Series to be in term of percentage of the total DataFrame**
###Code
# CODE HERE
100* df.isnull().sum()/len(df)
###Output
_____no_output_____
###Markdown
**TASK: Let's examine emp_title and emp_length to see whether it will be okay to drop them. Print out their feature information using the feat_info() function from the top of this notebook.**
###Code
# CODE HERE
feat_info('emp_title')
print('\n')
feat_info('emp_length')
###Output
The job title supplied by the Borrower when applying for the loan.*
Employment length in years. Possible values are between 0 and 10 where 0 means less than one year and 10 means ten or more years.
###Markdown
**TASK: How many unique employment job titles are there?**
###Code
# CODE HERE
df['emp_title'].nunique()
df['emp_title'].value_counts()
###Output
_____no_output_____
###Markdown
**TASK: Realistically there are too many unique job titles to try to convert this to a dummy variable feature. Let's remove that emp_title column.**
###Code
# CODE HERE
df = df.drop('emp_title',axis=1)
###Output
_____no_output_____
###Markdown
**TASK: Create a count plot of the emp_length feature column. Challenge: Sort the order of the values.**
###Code
# CODE HERE
sorted(df['emp_length'].dropna().unique())
emp_length_order = [ '< 1 year',
'1 year',
'2 years',
'3 years',
'4 years',
'5 years',
'6 years',
'7 years',
'8 years',
'9 years',
'10+ years']
plt.figure(figsize=(12,4))
sns.countplot(x='emp_length',data=df,order=emp_length_order)
###Output
_____no_output_____
###Markdown
**TASK: Plot out the countplot with a hue separating Fully Paid vs Charged Off**
###Code
# CODE HERE
plt.figure(figsize=(12,4))
sns.countplot(x='emp_length',data=df,order=emp_length_order,hue='loan_status')
###Output
_____no_output_____
###Markdown
**CHALLENGE TASK: This still doesn't really inform us if there is a strong relationship between employment length and being charged off, what we want is the percentage of charge offs per category. Essentially informing us what percent of people per employment category didn't pay back their loan. There are a multitude of ways to create this Series. Once you've created it, see if visualize it with a [bar plot](https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.plot.html). This may be tricky, refer to solutions if you get stuck on creating this Series.**
###Code
# CODE HERE
emp_co = df[df['loan_status']=="Charged Off"].groupby("emp_length").count()['loan_status']
emp_fp = df[df['loan_status']=="Fully Paid"].groupby("emp_length").count()['loan_status']
emp_len = emp_co/emp_fp
emp_len
emp_len.plot(kind='bar')
###Output
_____no_output_____
###Markdown
**TASK: Charge off rates are extremely similar across all employment lengths. Go ahead and drop the emp_length column.**
###Code
# CODE HERE
df = df.drop('emp_length',axis=1)
###Output
_____no_output_____
###Markdown
**TASK: Revisit the DataFrame to see what feature columns still have missing data.**
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
**TASK: Review the title column vs the purpose column. Is this repeated information?**
###Code
# CODE HERE
df['purpose'].head(10)
df['title'].head(10)
###Output
_____no_output_____
###Markdown
**TASK: The title column is simply a string subcategory/description of the purpose column. Go ahead and drop the title column.**
###Code
# CODE HERE
df = df.drop('title',axis=1)
###Output
_____no_output_____
###Markdown
---**NOTE: This is one of the hardest parts of the project! Refer to the solutions video if you need guidance, feel free to fill or drop the missing values of the mort_acc however you see fit! Here we're going with a very specific approach.**---**TASK: Find out what the mort_acc feature represents**
###Code
# CODE HERE
feat_info('mort_acc')
###Output
Number of mortgage accounts.
###Markdown
**TASK: Create a value_counts of the mort_acc column.**
###Code
# CODE HERE
df['mort_acc'].value_counts()
###Output
_____no_output_____
###Markdown
**TASK: There are many ways we could deal with this missing data. We could attempt to build a simple model to fill it in, such as a linear model, we could just fill it in based on the mean of the other columns, or you could even bin the columns into categories and then set NaN as its own category. There is no 100% correct approach! Let's review the other columsn to see which most highly correlates to mort_acc**
###Code
print("Correlation with the mort_acc column")
df.corr()['mort_acc'].sort_values()
###Output
Correlation with the mort_acc column
###Markdown
**TASK: Looks like the total_acc feature correlates with the mort_acc , this makes sense! Let's try this fillna() approach. We will group the dataframe by the total_acc and calculate the mean value for the mort_acc per total_acc entry. To get the result below:**
###Code
print("Mean of mort_acc column per total_acc")
df.groupby('total_acc').mean()['mort_acc']
###Output
Mean of mort_acc column per total_acc
###Markdown
**CHALLENGE TASK: Let's fill in the missing mort_acc values based on their total_acc value. If the mort_acc is missing, then we will fill in that missing value with the mean value corresponding to its total_acc value from the Series we created above. This involves using an .apply() method with two columns. Check out the link below for more info, or review the solutions video/notebook.**[Helpful Link](https://stackoverflow.com/questions/13331698/how-to-apply-a-function-to-two-columns-of-pandas-dataframe)
###Code
# CODE HERE
total_acc_avg = df.groupby('total_acc').mean()['mort_acc']
total_acc_avg[2.0]
def fill_mort_acc(total_acc,mort_acc):
'''
Accepts the total_acc and mort_acc values for the row.
Checks if the mort_acc is NaN , if so, it returns the avg mort_acc value
for the corresponding total_acc value for that row.
total_acc_avg here should be a Series or dictionary containing the mapping of the
groupby averages of mort_acc per total_acc values.
'''
if np.isnan(mort_acc):
return total_acc_avg[total_acc]
else:
return mort_acc
df['mort_acc'] = df.apply(lambda x: fill_mort_acc(x['total_acc'], x['mort_acc']), axis=1)
df.isnull().sum()
###Output
_____no_output_____
###Markdown
**TASK: revol_util and the pub_rec_bankruptcies have missing data points, but they account for less than 0.5% of the total data. Go ahead and remove the rows that are missing those values in those columns with dropna().**
###Code
# CODE HERE
df = df.dropna()
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Categorical Variables and Dummy Variables**We're done working with the missing data! Now we just need to deal with the string values due to the categorical columns.****TASK: List all the columns that are currently non-numeric. [Helpful Link](https://stackoverflow.com/questions/22470690/get-list-of-pandas-dataframe-columns-based-on-data-type)**[Another very useful method call](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.select_dtypes.html)
###Code
# CODE HERE
df.select_dtypes(['object']).columns
###Output
_____no_output_____
###Markdown
---**Let's now go through all the string features to see what we should do with them.**--- term feature**TASK: Convert the term feature into either a 36 or 60 integer numeric data type using .apply() or .map().**
###Code
# CODE HERE
df['term'].value_counts()
# Or just use .map()
df['term'] = df['term'].apply(lambda term: int(term[:3]))
###Output
_____no_output_____
###Markdown
grade feature**TASK: We already know grade is part of sub_grade, so just drop the grade feature.**
###Code
# CODE HERE
df = df.drop('grade',axis=1)
###Output
_____no_output_____
###Markdown
**TASK: Convert the subgrade into dummy variables. Then concatenate these new columns to the original dataframe. Remember to drop the original subgrade column and to add drop_first=True to your get_dummies call.**
###Code
# CODE HERE
subgrade_dummies = pd.get_dummies(df['sub_grade'],drop_first=True)
df = pd.concat([df.drop('sub_grade',axis=1),subgrade_dummies],axis=1)
df.columns
df.select_dtypes(['object']).columns
###Output
_____no_output_____
###Markdown
verification_status, application_type,initial_list_status,purpose **TASK: Convert these columns: ['verification_status', 'application_type','initial_list_status','purpose'] into dummy variables and concatenate them with the original dataframe. Remember to set drop_first=True and to drop the original columns.**
###Code
# CODE HERE
dummies = pd.get_dummies(df[['verification_status', 'application_type','initial_list_status','purpose' ]],drop_first=True)
df = df.drop(['verification_status', 'application_type','initial_list_status','purpose'],axis=1)
df = pd.concat([df,dummies],axis=1)
###Output
_____no_output_____
###Markdown
home_ownership**TASK:Review the value_counts for the home_ownership column.**
###Code
#CODE HERE
df['home_ownership'].value_counts()
###Output
_____no_output_____
###Markdown
**TASK: Convert these to dummy variables, but [replace](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html) NONE and ANY with OTHER, so that we end up with just 4 categories, MORTGAGE, RENT, OWN, OTHER. Then concatenate them with the original dataframe. Remember to set drop_first=True and to drop the original columns.**
###Code
#CODE HERE
df['home_ownership']=df['home_ownership'].replace(['NONE', 'ANY'], 'OTHER')
dummies = pd.get_dummies(df['home_ownership'],drop_first=True)
df = df.drop('home_ownership',axis=1)
df = pd.concat([df,dummies],axis=1)
###Output
_____no_output_____
###Markdown
address**TASK: Let's feature engineer a zip code column from the address in the data set. Create a column called 'zip_code' that extracts the zip code from the address column.**
###Code
#CODE HERE
df['zip_code'] = df['address'].apply(lambda address:address[-5:])
###Output
_____no_output_____
###Markdown
**TASK: Now make this zip_code column into dummy variables using pandas. Concatenate the result and drop the original zip_code column along with dropping the address column.**
###Code
dummies = pd.get_dummies(df['zip_code'],drop_first=True)
df = df.drop(['zip_code','address'],axis=1)
df = pd.concat([df,dummies],axis=1)
###Output
_____no_output_____
###Markdown
issue_d **TASK: This would be data leakage, we wouldn't know beforehand whether or not a loan would be issued when using our model, so in theory we wouldn't have an issue_date, drop this feature.**
###Code
#CODE HERE
df = df.drop('issue_d',axis=1)
###Output
_____no_output_____
###Markdown
earliest_cr_line**TASK: This appears to be a historical time stamp feature. Extract the year from this feature using a .apply function, then convert it to a numeric feature. Set this new data to a feature column called 'earliest_cr_year'.Then drop the earliest_cr_line feature.**
###Code
#CODE HERE
df['earliest_cr_year'] = df['earliest_cr_line'].apply(lambda date:int(date[-4:]))
df = df.drop('earliest_cr_line',axis=1)
df.select_dtypes(['object']).columns
###Output
_____no_output_____
###Markdown
Train Test Split **TASK: Import train_test_split from sklearn.**
###Code
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
**TASK: drop the load_status column we created earlier, since its a duplicate of the loan_repaid column. We'll use the loan_repaid column since its already in 0s and 1s.**
###Code
# CODE HERE
df = df.drop('loan_status',axis=1)
###Output
_____no_output_____
###Markdown
**TASK: Set X and y variables to the .values of the features and label.**
###Code
#CODE HERE
X = df.drop('loan_repaid',axis=1).values
y = df['loan_repaid'].values
###Output
_____no_output_____
###Markdown
-------- OPTIONAL Grabbing a Sample for Training Time OPTIONAL: Use .sample() to grab a sample of the 490k+ entries to save time on training. Highly recommended for lower RAM computers or if you are not using GPU.--------
###Code
# df = df.sample(frac=0.1,random_state=101)
print(len(df))
###Output
395219
###Markdown
**TASK: Perform a train/test split with test_size=0.2 and a random_state of 101.**
###Code
#CODE HERE
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=101)
###Output
_____no_output_____
###Markdown
Normalizing the Data**TASK: Use a MinMaxScaler to normalize the feature data X_train and X_test. Recall we don't want data leakge from the test set so we only fit on the X_train data.**
###Code
# CODE HERE
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
Creating the Model**TASK: Run the cell below to import the necessary Keras functions.**
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation,Dropout
from tensorflow.keras.constraints import max_norm
###Output
_____no_output_____
###Markdown
**TASK: Build a sequential model to will be trained on the data. You have unlimited options here, but here is what the solution uses: a model that goes 78 --> 39 --> 19--> 1 output neuron. OPTIONAL: Explore adding [Dropout layers](https://keras.io/layers/core/) [1](https://en.wikipedia.org/wiki/Dropout_(neural_networks)) [2](https://towardsdatascience.com/machine-learning-part-20-dropout-keras-layers-explained-8c9f6dc4c9ab)**
###Code
# CODE HERE
model = Sequential()
# Choose whatever number of layers/neurons you want.
# https://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw
# Remember to compile()
model = Sequential()
# https://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw
# input layer
model.add(Dense(78, activation='relu'))
model.add(Dropout(0.2))
# hidden layer
model.add(Dense(39, activation='relu'))
model.add(Dropout(0.2))
# hidden layer
model.add(Dense(19, activation='relu'))
model.add(Dropout(0.2))
# output layer
model.add(Dense(units=1,activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam')
###Output
_____no_output_____
###Markdown
**TASK: Fit the model to the training data for at least 25 epochs. Also add in the validation data for later plotting. Optional: add in a batch_size of 256.**
###Code
# CODE HERE
model.fit(x=X_train,
y=y_train,
epochs=25,
batch_size=256,
validation_data=(X_test, y_test),
)
###Output
Train on 316175 samples, validate on 79044 samples
Epoch 1/25
316175/316175 [==============================] - 4s 13us/sample - loss: 0.2959 - val_loss: 0.2652
Epoch 2/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2652 - val_loss: 0.2643
Epoch 3/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2628 - val_loss: 0.2626
Epoch 4/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2613 - val_loss: 0.2621
Epoch 5/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2609 - val_loss: 0.2621
Epoch 6/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2603 - val_loss: 0.2618
Epoch 7/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2600 - val_loss: 0.2616
Epoch 8/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2595 - val_loss: 0.2616
Epoch 9/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2593 - val_loss: 0.2620
Epoch 10/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2589 - val_loss: 0.2609
Epoch 11/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2588 - val_loss: 0.2613
Epoch 12/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2584 - val_loss: 0.2607
Epoch 13/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2581 - val_loss: 0.2613
Epoch 14/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2580 - val_loss: 0.2605
Epoch 15/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2580 - val_loss: 0.2607
Epoch 16/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2574 - val_loss: 0.2609
Epoch 17/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2575 - val_loss: 0.2606
Epoch 18/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2573 - val_loss: 0.2614
Epoch 19/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2572 - val_loss: 0.2611
Epoch 20/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2567 - val_loss: 0.2606
Epoch 21/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2569 - val_loss: 0.2606
Epoch 22/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2565 - val_loss: 0.2608
Epoch 23/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2564 - val_loss: 0.2612
Epoch 24/25
316175/316175 [==============================] - 3s 10us/sample - loss: 0.2561 - val_loss: 0.2609
Epoch 25/25
316175/316175 [==============================] - 3s 11us/sample - loss: 0.2560 - val_loss: 0.2612
###Markdown
**TASK: OPTIONAL: Save your model.**
###Code
# CODE HERE
from tensorflow.keras.models import load_model
model.save('full_data_project_model.h5')
###Output
_____no_output_____
###Markdown
Section 3: Evaluating Model Performance.**TASK: Plot out the validation loss versus the training loss.**
###Code
# CODE HERE
losses = pd.DataFrame(model.history.history)
losses[['loss','val_loss']].plot()
###Output
_____no_output_____
###Markdown
**TASK: Create predictions from the X_test set and display a classification report and confusion matrix for the X_test set.**
###Code
# CODE HERE
from sklearn.metrics import classification_report,confusion_matrix
predictions = model.predict_classes(X_test)
print(classification_report(y_test,predictions))
confusion_matrix(y_test,predictions)
###Output
_____no_output_____
###Markdown
**TASK: Given the customer below, would you offer this person a loan?**
###Code
import random
random.seed(101)
random_ind = random.randint(0,len(df))
new_customer = df.drop('loan_repaid',axis=1).iloc[random_ind]
new_customer
# CODE HERE
model.predict_classes(new_customer.values.reshape(1,78))
###Output
_____no_output_____
###Markdown
**TASK: Now check, did this person actually end up paying back their loan?**
###Code
# CODE HERE
df.iloc[random_ind]['loan_repaid']
###Output
_____no_output_____ |
PDR_ResNet152V2_Testing.ipynb | ###Markdown
Plant Disease Recognition using ResNet152V2 on modified version of PlantVillage Dataset. Importing necessary libraries
###Code
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.layers import Input, Dense, Flatten
from tensorflow.keras.applications.resnet_v2 import ResNet152V2 as PretrainedModel, preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import BatchNormalization
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys, os
###Output
_____no_output_____
###Markdown
Downloading and unzipping the modified dataset available on Gdrive. If you don`t have gdown module, install it using pip.
###Code
#capture command here suppresses the large output
%%capture
!gdown --id 1Mj6wsKBZN2ycAyyIMs2lI361deuCJqBI --output pv0.zip
!unzip pv0.zip
###Output
_____no_output_____
###Markdown
Check if the folder has been unzipped.
###Code
!ls
###Output
_____no_output_____
###Markdown
Setting up path for datagenerators from keras
###Code
train_path = '/content/pv0/train'
valid_path = '/content/pv0/test'
# useful for getting number of files
image_files = glob(train_path + '/*/*.JPG')
valid_image_files = glob(valid_path + '/*/*.JPG')
# useful for getting number of classes
folders = glob(train_path + '/*')
len(folders)
###Output
_____no_output_____
###Markdown
Specify input image size.
###Code
IMAGE_SIZE = [256, 256]
#sneek peek at a random image
plt.imshow(image.load_img(np.random.choice(image_files)))
plt.show()
###Output
_____no_output_____
###Markdown
Configuring the pretrainned model as per our needs.
###Code
ptm = PretrainedModel(
input_shape=IMAGE_SIZE + [3],
weights='imagenet',
include_top=False)
# freeze pretrained model weights
ptm.trainable = False
K = len(folders) # number of classes
#model definition
x = Flatten()(ptm.output)
x= BatchNormalization()(x)
x= Dense(512,activation='relu')(x)
x = Dense(K, activation='softmax')(x)
# create a model object
model = Model(inputs=ptm.input, outputs=x)
# view the structure of the model
model.summary()
#view the number of layers in the model
len(model.layers)
# create an instance of ImageDataGenerator
#Keras generators returns one-hot encoded labels and provides data augmentation.
gen_train = ImageDataGenerator(
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
preprocessing_function=preprocess_input
)
gen_test = ImageDataGenerator(
preprocessing_function=preprocess_input
)
#batch size is the number of examples that are run through the model at once.
batch_size = 300
# create generators
train_generator = gen_train.flow_from_directory(
train_path,
shuffle=True,
target_size=IMAGE_SIZE,
batch_size=batch_size,
)
valid_generator = gen_test.flow_from_directory(
valid_path,
target_size=IMAGE_SIZE,
batch_size=batch_size,
)
###Output
_____no_output_____
###Markdown
Since Keras no longer provides some metrics within itself, so we define those metrics ourselves. Here, we are defining F1_score, Precision and Recall.
###Code
from keras import backend as Ke
def recall_m(y_true, y_pred):
true_positives = Ke.sum(Ke.round(Ke.clip(y_true * y_pred, 0, 1)))
possible_positives = Ke.sum(Ke.round(Ke.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + Ke.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = Ke.sum(Ke.round(Ke.clip(y_true * y_pred, 0, 1)))
predicted_positives = Ke.sum(Ke.round(Ke.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + Ke.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+Ke.epsilon()))
###Output
_____no_output_____
###Markdown
This block is for creating a lr scheduler, since the lr scheduler was not as effective as using adam directly, it is left for experimentation.
###Code
# from keras.optimizers import SGD
# import math
# def step_decay(epoch):
# initial_lrate = 1e-4
# drop = 0.5
# epochs_drop = 10.0
# lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
# return lrate
# sgd = SGD(lr=0.0, momentum=0.9)
# # learning schedule callback
# from keras.callbacks import LearningRateScheduler
# lrate = LearningRateScheduler(step_decay)
# callbacks_list = [lrate]
###Output
_____no_output_____
###Markdown
Compiling our model with loss, optimizer and metrics (including our custom defined ones).
###Code
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy',f1_m,precision_m, recall_m]
)
###Output
_____no_output_____
###Markdown
The fit function is called for starting our training.
###Code
# fit the model
r = model.fit(
train_generator,
validation_data=valid_generator,
epochs=5,
steps_per_epoch=int(np.ceil(len(image_files) / batch_size)),
validation_steps=int(np.ceil(len(valid_image_files) / batch_size)),
)
###Output
_____no_output_____
###Markdown
Saving our Model in HD5 format.
###Code
model.save("model.h5")
print("Saved model to disk")
###Output
_____no_output_____
###Markdown
Graphs for our metrics
###Code
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
# f1_score
plt.plot(r.history['f1_m'], label='train f1_m')
plt.plot(r.history['val_f1_m'], label='val f1_m')
plt.legend()
plt.show()
# precision
plt.plot(r.history['precision_m'], label='train precision_m')
plt.plot(r.history['val_precision_m'], label='val precision_m')
plt.legend()
plt.show()
# recall
plt.plot(r.history['recall_m'], label='train recall_m')
plt.plot(r.history['val_recall_m'], label='val recall_m')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Next we evaluate the model on our test set again.
###Code
#Load saved model from training
from keras.models import load_model
amod= load_model('/content/model.h5')
# evaluate the model
valid_generator = gen_test.flow_from_directory(valid_path,target_size=IMAGE_SIZE,batch_size=batch_size,)
loss, accuracy, f1_score, precision, recall = amod.evaluate(valid_generator, steps=int(np.ceil(len(valid_image_files)/ batch_size)))
###Output
_____no_output_____
###Markdown
Printing our metrics
###Code
print('loss : ',loss)
print('accuracy : ',accuracy)
print('f1_score :',f1_score)
print('precision:',precision)
print('recall :',recall)
###Output
_____no_output_____ |
T/Linear+Model+Wine+Data.ipynb | ###Markdown
OLS - Ordinary Least Squares
###Code
lm1.params
print(lm1.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: quality R-squared: 0.366
Model: OLS Adj. R-squared: 0.361
Method: Least Squares F-statistic: 75.01
Date: Fri, 18 Jan 2019 Prob (F-statistic): 5.96e-133
Time: 07:23:16 Log-Likelihood: -1400.0
No. Observations: 1439 AIC: 2824.
Df Residuals: 1427 BIC: 2887.
Df Model: 11
Covariance Type: nonrobust
========================================================================================
coef std err t P>|t| [0.025 0.975]
----------------------------------------------------------------------------------------
Intercept 26.3869 22.052 1.197 0.232 -16.871 69.644
fixed_acidity 0.0296 0.028 1.070 0.285 -0.025 0.084
volatile_acidity -1.0464 0.125 -8.341 0.000 -1.292 -0.800
citric_acid -0.1220 0.156 -0.783 0.434 -0.428 0.184
residual_sugar 0.0171 0.016 1.053 0.292 -0.015 0.049
chlorides -1.7726 0.430 -4.124 0.000 -2.616 -0.929
free_sulfur_dioxide 0.0038 0.002 1.628 0.104 -0.001 0.008
total_sulfur_dioxide -0.0035 0.001 -4.550 0.000 -0.005 -0.002
density -22.7863 22.516 -1.012 0.312 -66.954 21.382
pH -0.2531 0.199 -1.272 0.204 -0.643 0.137
sulphates 0.8855 0.116 7.612 0.000 0.657 1.114
alcohol 0.2668 0.027 9.720 0.000 0.213 0.321
==============================================================================
Omnibus: 25.784 Durbin-Watson: 1.763
Prob(Omnibus): 0.000 Jarque-Bera (JB): 39.208
Skew: -0.168 Prob(JB): 3.06e-09
Kurtosis: 3.735 Cond. No. 1.14e+05
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.14e+05. This might indicate that there are
strong multicollinearity or other numerical problems.
|
1_Goniometro/Python/Dra.MarianaV2.ipynb | ###Markdown
Requirments ```pythonimport time Defualt python libraryimport serial pip install pyserialimport matplotlib.pyplot as plt pip install matplotlib import numpy as np pip install numpyimport xlrd pip install xlrdimport xlsxwriter pip install xlscwriterimport cv2 pip install pyton-cv``` How to retive data from any microcontroller? DataAdquistion is compatbile with any microcontroller with UART capabilites. In order to send information make sure to send information in the following way:```c"[sending label]: value,value,value,value"```Example:```cs:182.00,184.00,184.00,188.00,187.00,179.00```The default sendig label is s (for send)
###Code
from DataAdquisition import bcolors,DataAdquisition,UserInputValidator
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import json
def MainLoopProtocol():
keys = ["sensor_0","sensor_1","sensor_2","sensor_3","sensor_4","sensor_5"] # List containig number of sensors
Data1,Data2,Data3 = [" ", " ", " "] # Intialize empty values to prevent error
Microcontrolador = DataAdquisition(com="COM12",baudrate=38400,timeout=1) # instantiate Communication with protocol
### First Adquisition ###
message = bcolors.OKGREEN + "Instruction: Balance the weight on both legs Duration: 20s"+ bcolors.ENDC
Forward = UserInputValidator(message) # Wait for user input
if Forward:
OPENED = Microcontrolador.initiateSerialCommunication() # initate Communication with protocol
Data1 = Microcontrolador.Dynamicprotocol(duration=20,message="Adquiring Data",keys=keys)
Microcontrolador.CloseSerialCommunication()
print(" ")
### Second Adquisition ###
message= bcolors.OKGREEN + "Instrucion: Stand on the prosthesis Duration: 20s" + bcolors.ENDC
Forward = UserInputValidator(message) # Wait for user input
if Forward:
OPENED = Microcontrolador.initiateSerialCommunication() # initate Communication with protocol
Data2 = Microcontrolador.Dynamicprotocol(duration=20,message="Adquiring Data",keys=keys)
Microcontrolador.CloseSerialCommunication()
print(" ")
### Third Adquisition ###
message = bcolors.OKGREEN + "Instrucion: Balance the weight on both legs Duration: 20s" + bcolors.ENDC
Forward = UserInputValidator(message) # Wait for user input
if Forward:
OPENED = Microcontrolador.initiateSerialCommunication() # initate Communication with protocol
Data3 = Microcontrolador.Dynamicprotocol(duration=20,message="Adquiring Data",keys=keys)
Microcontrolador.CloseSerialCommunication()
print(" ")
return Data1,Data2,Data3
if __name__ == "__main__":
Data1,Data2,Data3 = MainLoopProtocol()
plt.plot(Data1["sensor_1"])
plt.plot(Data1["sensor_1"])
plt.plot(Data2["sensor_1"])
Data1.keys()
plt.plot(Data2["time"],Data2["sensor_1"])
len(Data2["sensor_1"])
1996/30
def saveDictionary(data: dict,filename:str = "dfnone") -> None:
### Handle Filename ###
if filename == "dfnone":
filename = "report_" + (datetime.now()).strftime("%H_%M_%S") + ".json"
elif filename != "dfnone":
if ".json" in filename:
pass
else:
filename = filename + ".json"
with open(filename, "w") as handler:
json.dump(data,handler)
print(f"Data has been saved as {filename}")
def OpenDictionary(filename:str) -> dict:
with open(filename, "r") as handler:
data = json.load(handler)
return data
a_file = open("data.json", "r")
output = a_file.read()
data = OpenDictionary("report_22_21_45.json")
with open(filename, "r") as handler:
data = json.load(handler)
return data
###Output
_____no_output_____ |
notebooks/validation_sun.ipynb | ###Markdown
Reproduce [Chaplin 2010](https://ui.adsabs.harvard.edu/abs/2010ApJ...713L.169C/abstract) Figure 1
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import kplr
import numpy as np
import sys
sys.path.insert(0, '../')
from astropy.io import fits
# data = fits.getdata('ftp://ftp.pmodwrc.ch/pub/data/irradiance/virgo/'
# '1-minute_Data/VIRGO_1min_0083-7404.fits', cache=False)
data = fits.getdata('../data/VIRGO_1min_0083-7404.fits.gz', cache=False)
from shocksgo import interpolate_missing_data
import numpy as np
time = np.arange(len(data))
#flux = data[data != 99]
times, fluxes = interpolate_missing_data(time[data != -99], data[data != -99])
fluxes /= np.median(fluxes)
plt.plot(times, fluxes)
len(fluxes)//20
#from scipy.signal import periodogram
import os
from shocksgo import power_spectrum
freqs, powers = power_spectrum(fluxes[:len(fluxes)//10], d=60)
freqs *= 1e6
# periodogram_path = 'periodogram.npy'
# if os.path.exists(periodogram_path):
# freqs, powers = np.load(periodogram_path)
# else:
# freqs, powers = periodogram(fluxes, fs=1/60)
# freqs *= 1e6
# np.save(periodogram_path, np.vstack([freqs, powers]))
plt.loglog(freqs, powers, marker=',', lw=0)
plt.xlim([0.1, 1e4])
plt.ylim([1e-10, 1e-2])
from scipy.stats import binned_statistic
cutoff_freq = 1e5
bs = binned_statistic(np.log(freqs[freqs != 0]), powers[freqs != 0], statistic=np.nanmedian, bins=1000)
bincenters = 0.5 * (bs.bin_edges[:-1] + bs.bin_edges[1:])
binned_power = bs.statistic[np.exp(bincenters) < cutoff_freq]
binned_freq = np.exp(bincenters)[np.exp(bincenters) < cutoff_freq]
plt.loglog(freqs, powers, ',', alpha=0.5)
plt.loglog(binned_freq, binned_power)
plt.xlabel('Freq [$\mu$Hz]')
plt.ylabel('Power')
plt.xlim([1e2, 6e3])
plt.ylim([1e-10, 1e-4])
from scipy.ndimage import gaussian_filter1d
plt.semilogy(freqs, powers, ',', alpha=0.5)
# plt.semilogy(freqs[np.argsort(freqs)], gaussian_filter1d(powers[np.argsort(freqs)], 100))
plt.semilogy(binned_freq, binned_power)
plt.xlabel('Freq [$\mu$Hz]')
plt.ylabel('Power')
plt.xlim([2000, 4000])
plt.ylim([1e-10, 1e-4])
from shocksgo import generate_solar_fluxes
from astropy.constants import M_sun, L_sun
import astropy.units as u
times, fluxes, kernel = generate_solar_fluxes(duration=10*u.min)
psd = kernel.get_psd(2*np.pi*freqs*1e-6) /2/np.pi
fig, ax = plt.subplots(1, 2, figsize=(8, 2.5))
# ax[0].loglog(binned_freq, binned_power)
ax[0].loglog(freqs, powers, marker=',', lw=0, alpha=0.3, rasterized=True, color='k')
ax[0].loglog(freqs, psd, color='r')
ax[0].set_xlim([1e-2, 1e4])
ax[0].set_ylim([1e-10, 1e-2])
ax[0].set_xlabel('Frequency [$\mu$Hz]')
ax[0].set_ylabel('Power [(flux)$^2$/Hz]')
# ax[1].semilogy(binned_freq, binned_power)
ax[1].semilogy(freqs, powers, marker=',', lw=0, alpha=0.3, rasterized=True, color='k')
ax[1].semilogy(freqs, psd, color='r')
ax[1].set_xlim([2000, 4000])
ax[1].set_ylim([1e-10, 1e-6])
ax[1].set_xlabel('Frequency [$\mu$Hz]')
ax[1].set_ylabel('Power [(flux)$^2$/Hz]')
for s in ['right', 'top']:
for axis in ax:
axis.spines[s].set_visible(False)
fig.tight_layout()
fig.suptitle('Sun (SOHO/VIRGO SPM)', va='bottom')
fig.savefig('paper_plots/sun.pdf', bbox_inches='tight', dpi=300)
durations = []
for i in np.arange(2, 7):
duration = %timeit -o -r 1 generate_solar_fluxes(duration=10**i * u.min)
durations.append(duration)
number_points = 10**np.arange(2, 7)
duration_best = np.array([duration.best for duration in durations])
plt.figure(figsize=(3, 3))
plt.loglog(number_points, duration_best, color='k')
plt.xlabel('Sim. Duration [min]')
plt.ylabel('Runtime [s]')
plt.grid(ls=':')
for s in ['right', 'top']:
plt.gca().spines[s].set_visible(False)
plt.savefig('paper_plots/runtime.pdf', bbox_inches='tight')
###Output
_____no_output_____ |
project-brainwave/project-brainwave-transfer-learning.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Model Development This example shows how to build, train, evaluate and deploy a model running on FPGA. Only Windows is supported. We use TensorFlow and Keras to build our model. We are going to use transfer learning, with ResNet152 as a featurizer. We don't use the last layer of ResNet152 in this case and instead add and train our own classification layer.We will use the Kaggle Cats and Dogs dataset to train the classifier. The dataset can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory ("~/catsanddogs").Please set up your environment as described in the [quick start](project-brainwave-quickstart.ipynb).
###Code
import os
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Model ConstructionLoad the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run quickly, but doesn't create a very accurate classifier. You can improve the classifier by using more of the dataset.
###Code
import glob
import imghdr
datadir = os.path.expanduser("~/catsanddogs")
cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))
dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))
# Limit the data set to make the notebook execute quickly.
cat_files = cat_files[:64]
dog_files = dog_files[:64]
# The data set has a few images that are not jpeg. Remove them.
cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']
dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']
if(not len(cat_files) or not len(dog_files)):
print("Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to " + datadir)
raise ValueError("Data not found")
else:
print(cat_files[0])
print(dog_files[0])
# constructing a numpy array as labels
image_paths = cat_files + dog_files
total_files = len(cat_files) + len(dog_files)
labels = np.zeros(total_files)
labels[len(cat_files):] = 1
###Output
_____no_output_____
###Markdown
We need to preprocess the input file to get it into the form expected by ResNet152. We've provided a default implementation of the preprocessing that you can use.
###Code
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.contrib.brainwave.models.utils as utils
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
###Output
_____no_output_____
###Markdown
Alternatively, if you would like to customize the preprocessing, you can write your own preprocessor using TensorFlow operations.The input to the classifier we are training is the set of features produced by ResNet50. To train the classifier we need to featurize the images using ResNet50. You can also run the featurizer locally on CPU or GPU. We import the featurizer as frozen, so that we are only training the classifier.
###Code
from azureml.contrib.brainwave.models import QuantizedResnet152
model_path = os.path.expanduser('~/models')
bwmodel = QuantizedResnet152(model_path, is_frozen = True)
print(bwmodel.version)
###Output
_____no_output_____
###Markdown
Calling import_graph_def on the featurizer will create a service that runs the featurizer on FPGA.
###Code
features = bwmodel.import_graph_def(input_tensor=image_tensors)
###Output
_____no_output_____
###Markdown
Pre-compute featuresLoad the data set and compute the features. These can be precomputed because they don't change during training. This can take a while to run on CPU.
###Code
from tqdm import tqdm
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def read_files(files):
contents = []
for path in files:
with open(path, 'rb') as f:
contents.append(f.read())
return contents
feature_list = []
with tf.Session() as sess:
for chunk in tqdm(chunks(image_paths, 5)):
contents = read_files(chunk)
result = sess.run([features], feed_dict={in_images: contents})
feature_list.extend(result[0])
feature_results = np.array(feature_list)
print(feature_results.shape)
###Output
_____no_output_____
###Markdown
Add and Train the classifierWe use Keras to define and train a simple classifier.
###Code
from keras.models import Sequential
from keras.layers import Dropout, Dense, Flatten
from keras import optimizers
FC_SIZE = 1024
NUM_CLASSES = 2
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, 1, 2048,)))
model.add(Dense(FC_SIZE, activation='relu', input_dim=(1, 1, 2048,)))
model.add(Flatten())
model.add(Dense(NUM_CLASSES, activation='sigmoid', input_dim=FC_SIZE))
model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare the train and test data.
###Code
from sklearn.model_selection import train_test_split
onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])
X_train, X_test, y_train, y_test = train_test_split(feature_results, onehot_labels, random_state=42, shuffle=True)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Train the classifier.
###Code
model.fit(X_train, y_train, epochs=16, batch_size=32)
###Output
_____no_output_____
###Markdown
Test the ClassifierLet's test the classifier and see how well it does. Since we only trained on a few images, we are not expecting to win a Kaggle competition, but it will likely get most of the images correct.
###Code
from numpy import argmax
y_probs = model.predict(X_test)
y_prob_max = np.argmax(y_probs, 1)
y_test_max = np.argmax(y_test, 1)
print(y_prob_max)
print(y_test_max)
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
import itertools
import matplotlib
from matplotlib import pyplot as plt
# compute a bunch of classification metrics
def classification_metrics(y_true, y_pred, y_prob):
cm_dict = {}
cm_dict['Accuracy'] = accuracy_score(y_true, y_pred)
cm_dict['Precision'] = precision_score(y_true, y_pred)
cm_dict['Recall'] = recall_score(y_true, y_pred)
cm_dict['F1'] = f1_score(y_true, y_pred)
cm_dict['AUC'] = roc_auc_score(y_true, y_prob[:,0])
cm_dict['Confusion Matrix'] = confusion_matrix(y_true, y_pred).tolist()
return cm_dict
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""Plots a confusion matrix.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
New BSD License - see appendix
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0: cm_min = 0
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_dict = classification_metrics(y_test_max, y_prob_max, y_probs)
for m in cm_dict:
print(m, cm_dict[m])
cm = np.asarray(cm_dict['Confusion Matrix'])
plot_confusion_matrix(cm, ['fail','pass'], normalize=False)
###Output
_____no_output_____
###Markdown
Service DefinitionLike in the QuickStart notebook our service definition pipeline consists of three stages. Because the preprocessing and featurizing stage don't contain any variables, we can use a default session.Here we use the Keras classifier as the final stage.
###Code
from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage, KerasStage
model_def = ModelDefinition()
model_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors))
model_def.pipeline.append(BrainWaveStage(tf.Session(), bwmodel))
model_def.pipeline.append(KerasStage(model))
model_def_path = os.path.join(datadir, 'save', 'model_def')
model_def.save(model_def_path)
print(model_def_path)
###Output
_____no_output_____
###Markdown
Deploy
###Code
from azureml.core.model import Model
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
model_name = "catsanddogs-model"
service_name = "modelbuild-service"
registered_model = Model.register(ws, model_def_path, model_name)
###Output
_____no_output_____
###Markdown
The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model.
###Code
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage
try:
service = Webservice(ws, service_name)
except WebserviceException:
image_config = BrainwaveImage.image_configuration()
deployment_config = BrainwaveWebservice.deploy_configuration()
service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config)
service.wait_for_deployment(true)
###Output
_____no_output_____
###Markdown
The service is now running in Azure and ready to serve requests. We can check the address and port.
###Code
print(service.ipAddress + ':' + str(service.port))
###Output
_____no_output_____
###Markdown
ClientThere is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service.
###Code
from azureml.contrib.brainwave.client import PredictionClient
client = PredictionClient(service.ipAddress, service.port)
###Output
_____no_output_____
###Markdown
You can adapt the client [code](../../pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C [client](../../sample-clients/csharp).The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). RequestLet's see how our service does on a few images. It may get a few wrong.
###Code
# Specify an image to classify
print('CATS')
for image_file in cat_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[0] > results[1] else 'WRONG '
print(result + str(results))
print('DOGS')
for image_file in dog_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[1] > results[0] else 'WRONG '
print(result + str(results))
###Output
_____no_output_____
###Markdown
CleanupRun the cell below to delete your service. In the [next notebook](project-brainwave-custom-weights.ipynb) you will learn how to retrain all the weights of one of the models
###Code
service.delete()
registered_model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Model Development This example shows how to build, train, evaluate and deploy a model running on FPGA. Only Windows is supported. We use TensorFlow and Keras to build our model. We are going to use transfer learning, with ResNet152 as a featurizer. We don't use the last layer of ResNet152 in this case and instead add and train our own classification layer.We will use the Kaggle Cats and Dogs dataset to train the classifier. The dataset can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory ("~/catsanddogs").Please set up your environment as described in the [quick start](project-brainwave-quickstart.ipynb).
###Code
import os
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Model ConstructionLoad the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run quickly, but doesn't create a very accurate classifier. You can improve the classifier by using more of the dataset.
###Code
import glob
import imghdr
datadir = os.path.expanduser("~/catsanddogs")
cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))
dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))
# Limit the data set to make the notebook execute quickly.
cat_files = cat_files[:64]
dog_files = dog_files[:64]
# The data set has a few images that are not jpeg. Remove them.
cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']
dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']
if(not len(cat_files) or not len(dog_files)):
print("Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to " + datadir)
raise ValueError("Data not found")
else:
print(cat_files[0])
print(dog_files[0])
# constructing a numpy array as labels
image_paths = cat_files + dog_files
total_files = len(cat_files) + len(dog_files)
labels = np.zeros(total_files)
labels[len(cat_files):] = 1
###Output
_____no_output_____
###Markdown
We need to preprocess the input file to get it into the form expected by ResNet152. We've provided a default implementation of the preprocessing that you can use.
###Code
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.contrib.brainwave.models.utils as utils
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
###Output
_____no_output_____
###Markdown
Alternatively, if you would like to customize the preprocessing, you can write your own preprocessor using TensorFlow operations.The input to the classifier we are training is the set of features produced by ResNet50. To train the classifier we need to featurize the images using ResNet50. You can also run the featurizer locally on CPU or GPU. We import the featurizer as frozen, so that we are only training the classifier.
###Code
from azureml.contrib.brainwave.models import QuantizedResnet152
model_path = os.path.expanduser('~/models')
bwmodel = QuantizedResnet152(model_path, is_frozen = True)
print(bwmodel.version)
###Output
_____no_output_____
###Markdown
Calling import_graph_def on the featurizer will create a service that runs the featurizer on FPGA.
###Code
features = bwmodel.import_graph_def(input_tensor=image_tensors)
###Output
_____no_output_____
###Markdown
Pre-compute featuresLoad the data set and compute the features. These can be precomputed because they don't change during training. This can take a while to run on CPU.
###Code
from tqdm import tqdm
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def read_files(files):
contents = []
for path in files:
with open(path, 'rb') as f:
contents.append(f.read())
return contents
feature_list = []
with tf.Session() as sess:
for chunk in tqdm(chunks(image_paths, 5)):
contents = read_files(chunk)
result = sess.run([features], feed_dict={in_images: contents})
feature_list.extend(result[0])
feature_results = np.array(feature_list)
print(feature_results.shape)
###Output
_____no_output_____
###Markdown
Add and Train the classifierWe use Keras to define and train a simple classifier.
###Code
from keras.models import Sequential
from keras.layers import Dropout, Dense, Flatten
from keras import optimizers
FC_SIZE = 1024
NUM_CLASSES = 2
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, 1, 2048,)))
model.add(Dense(FC_SIZE, activation='relu', input_dim=(1, 1, 2048,)))
model.add(Flatten())
model.add(Dense(NUM_CLASSES, activation='sigmoid', input_dim=FC_SIZE))
model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare the train and test data.
###Code
from sklearn.model_selection import train_test_split
onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])
X_train, X_test, y_train, y_test = train_test_split(feature_results, onehot_labels, random_state=42, shuffle=True)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Train the classifier.
###Code
model.fit(X_train, y_train, epochs=16, batch_size=32)
###Output
_____no_output_____
###Markdown
Test the ClassifierLet's test the classifier and see how well it does. Since we only trained on a few images, we are not expecting to win a Kaggle competition, but it will likely get most of the images correct.
###Code
from numpy import argmax
y_probs = model.predict(X_test)
y_prob_max = np.argmax(y_probs, 1)
y_test_max = np.argmax(y_test, 1)
print(y_prob_max)
print(y_test_max)
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
import itertools
import matplotlib
from matplotlib import pyplot as plt
# compute a bunch of classification metrics
def classification_metrics(y_true, y_pred, y_prob):
cm_dict = {}
cm_dict['Accuracy'] = accuracy_score(y_true, y_pred)
cm_dict['Precision'] = precision_score(y_true, y_pred)
cm_dict['Recall'] = recall_score(y_true, y_pred)
cm_dict['F1'] = f1_score(y_true, y_pred)
cm_dict['AUC'] = roc_auc_score(y_true, y_prob[:,0])
cm_dict['Confusion Matrix'] = confusion_matrix(y_true, y_pred).tolist()
return cm_dict
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""Plots a confusion matrix.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
New BSD License - see appendix
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0: cm_min = 0
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_dict = classification_metrics(y_test_max, y_prob_max, y_probs)
for m in cm_dict:
print(m, cm_dict[m])
cm = np.asarray(cm_dict['Confusion Matrix'])
plot_confusion_matrix(cm, ['fail','pass'], normalize=False)
###Output
_____no_output_____
###Markdown
Service DefinitionLike in the QuickStart notebook our service definition pipeline consists of three stages. Because the preprocessing and featurizing stage don't contain any variables, we can use a default session.Here we use the Keras classifier as the final stage.
###Code
from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage, KerasStage
model_def = ModelDefinition()
model_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors))
model_def.pipeline.append(BrainWaveStage(tf.Session(), bwmodel))
model_def.pipeline.append(KerasStage(model))
model_def_path = os.path.join(datadir, 'save', 'model_def')
model_def.save(model_def_path)
print(model_def_path)
###Output
_____no_output_____
###Markdown
Deploy
###Code
from azureml.core.model import Model
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
model_name = "catsanddogs-model"
service_name = "modelbuild-service"
registered_model = Model.register(ws, model_def_path, model_name)
###Output
_____no_output_____
###Markdown
The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model.
###Code
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage
try:
service = Webservice(ws, service_name)
except WebserviceException:
image_config = BrainwaveImage.image_configuration()
deployment_config = BrainwaveWebservice.deploy_configuration()
service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config)
service.wait_for_deployment(True)
###Output
_____no_output_____
###Markdown
The service is now running in Azure and ready to serve requests. We can check the address and port.
###Code
print(service.ipAddress + ':' + str(service.port))
###Output
_____no_output_____
###Markdown
ClientThere is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service.
###Code
from azureml.contrib.brainwave.client import PredictionClient
client = PredictionClient(service.ipAddress, service.port)
###Output
_____no_output_____
###Markdown
You can adapt the client [code](../../pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C [client](../../sample-clients/csharp).The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). RequestLet's see how our service does on a few images. It may get a few wrong.
###Code
# Specify an image to classify
print('CATS')
for image_file in cat_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[0] > results[1] else 'WRONG '
print(result + str(results))
print('DOGS')
for image_file in dog_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[1] > results[0] else 'WRONG '
print(result + str(results))
###Output
_____no_output_____
###Markdown
CleanupRun the cell below to delete your service. In the [next notebook](project-brainwave-custom-weights.ipynb) you will learn how to retrain all the weights of one of the models
###Code
service.delete()
registered_model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Model Development This example shows how to build, train, evaluate and deploy a model running on FPGA. Only Windows is supported. We use TensorFlow and Keras to build our model. We are going to use transfer learning, with ResNet152 as a featurizer. We don't use the last layer of ResNet152 in this case and instead add and train our own classification layer.We will use the Kaggle Cats and Dogs dataset to train the classifier. The dataset can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory ("~/catsanddogs").Please set up your environment as described in the [quick start](project-brainwave-quickstart.ipynb).
###Code
import os
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Model ConstructionLoad the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run quickly, but doesn't create a very accurate classifier. You can improve the classifier by using more of the dataset.
###Code
import glob
import imghdr
datadir = os.path.expanduser("~/catsanddogs")
cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))
dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))
# Limit the data set to make the notebook execute quickly.
cat_files = cat_files[:64]
dog_files = dog_files[:64]
# The data set has a few images that are not jpeg. Remove them.
cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']
dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']
if(not len(cat_files) or not len(dog_files)):
print("Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to " + datadir)
raise ValueError("Data not found")
else:
print(cat_files[0])
print(dog_files[0])
# constructing a numpy array as labels
image_paths = cat_files + dog_files
total_files = len(cat_files) + len(dog_files)
labels = np.zeros(total_files)
labels[len(cat_files):] = 1
###Output
_____no_output_____
###Markdown
We need to preprocess the input file to get it into the form expected by ResNet152. We've provided a default implementation of the preprocessing that you can use.
###Code
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.contrib.brainwave.models.utils as utils
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
###Output
_____no_output_____
###Markdown
Alternatively, if you would like to customize the preprocessing, you can write your own preprocessor using TensorFlow operations.The input to the classifier we are training is the set of features produced by ResNet50. To train the classifier we need to featurize the images using ResNet50. You can also run the featurizer locally on CPU or GPU. We import the featurizer as frozen, so that we are only training the classifier.
###Code
from azureml.contrib.brainwave.models import QuantizedResnet152
model_path = os.path.expanduser('~/models')
bwmodel = QuantizedResnet152(model_path, is_frozen = True)
print(bwmodel.version)
###Output
_____no_output_____
###Markdown
Calling import_graph_def on the featurizer will create a service that runs the featurizer on FPGA.
###Code
features = bwmodel.import_graph_def(input_tensor=image_tensors)
###Output
_____no_output_____
###Markdown
Pre-compute featuresLoad the data set and compute the features. These can be precomputed because they don't change during training. This can take a while to run on CPU.
###Code
from tqdm import tqdm
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def read_files(files):
contents = []
for path in files:
with open(path, 'rb') as f:
contents.append(f.read())
return contents
feature_list = []
with tf.Session() as sess:
for chunk in tqdm(chunks(image_paths, 5)):
contents = read_files(chunk)
result = sess.run([features], feed_dict={in_images: contents})
feature_list.extend(result[0])
feature_results = np.array(feature_list)
print(feature_results.shape)
###Output
_____no_output_____
###Markdown
Add and Train the classifierWe use Keras to define and train a simple classifier.
###Code
from keras.models import Sequential
from keras.layers import Dropout, Dense, Flatten
from keras import optimizers
FC_SIZE = 1024
NUM_CLASSES = 2
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, 1, 2048,)))
model.add(Dense(FC_SIZE, activation='relu', input_dim=(1, 1, 2048,)))
model.add(Flatten())
model.add(Dense(NUM_CLASSES, activation='sigmoid', input_dim=FC_SIZE))
model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare the train and test data.
###Code
from sklearn.model_selection import train_test_split
onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])
X_train, X_test, y_train, y_test = train_test_split(feature_results, onehot_labels, random_state=42, shuffle=True)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Train the classifier.
###Code
model.fit(X_train, y_train, epochs=16, batch_size=32)
###Output
_____no_output_____
###Markdown
Test the ClassifierLet's test the classifier and see how well it does. Since we only trained on a few images, we are not expecting to win a Kaggle competition, but it will likely get most of the images correct.
###Code
from numpy import argmax
y_probs = model.predict(X_test)
y_prob_max = np.argmax(y_probs, 1)
y_test_max = np.argmax(y_test, 1)
print(y_prob_max)
print(y_test_max)
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
import itertools
import matplotlib
from matplotlib import pyplot as plt
# compute a bunch of classification metrics
def classification_metrics(y_true, y_pred, y_prob):
cm_dict = {}
cm_dict['Accuracy'] = accuracy_score(y_true, y_pred)
cm_dict['Precision'] = precision_score(y_true, y_pred)
cm_dict['Recall'] = recall_score(y_true, y_pred)
cm_dict['F1'] = f1_score(y_true, y_pred)
cm_dict['AUC'] = roc_auc_score(y_true, y_prob[:,0])
cm_dict['Confusion Matrix'] = confusion_matrix(y_true, y_pred).tolist()
return cm_dict
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""Plots a confusion matrix.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
New BSD License - see appendix
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0: cm_min = 0
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_dict = classification_metrics(y_test_max, y_prob_max, y_probs)
for m in cm_dict:
print(m, cm_dict[m])
cm = np.asarray(cm_dict['Confusion Matrix'])
plot_confusion_matrix(cm, ['fail','pass'], normalize=False)
###Output
_____no_output_____
###Markdown
Service DefinitionLike in the QuickStart notebook our service definition pipeline consists of three stages. Because the preprocessing and featurizing stage don't contain any variables, we can use a default session.Here we use the Keras classifier as the final stage.
###Code
from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage, KerasStage
model_def = ModelDefinition()
model_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors))
model_def.pipeline.append(BrainWaveStage(tf.Session(), bwmodel))
model_def.pipeline.append(KerasStage(model))
model_def_path = os.path.join(datadir, 'save', 'model_def')
model_def.save(model_def_path)
print(model_def_path)
###Output
_____no_output_____
###Markdown
Deploy
###Code
from azureml.core.model import Model
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
model_name = "catsanddogs-model"
service_name = "modelbuild-service"
registered_model = Model.register(ws, model_def_path, model_name)
###Output
_____no_output_____
###Markdown
The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model.
###Code
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage
try:
service = Webservice(ws, service_name)
except WebserviceException:
image_config = BrainwaveImage.image_configuration()
deployment_config = BrainwaveWebservice.deploy_configuration()
service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config)
service.wait_for_deployment(true)
###Output
_____no_output_____
###Markdown
The service is now running in Azure and ready to serve requests. We can check the address and port.
###Code
print(service.ipAddress + ':' + str(service.port))
###Output
_____no_output_____
###Markdown
ClientThere is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service.
###Code
from azureml.contrib.brainwave.client import PredictionClient
client = PredictionClient(service.ipAddress, service.port)
###Output
_____no_output_____
###Markdown
You can adapt the client [code](../../pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C [client](../../sample-clients/csharp).The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). RequestLet's see how our service does on a few images. It may get a few wrong.
###Code
# Specify an image to classify
print('CATS')
for image_file in cat_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[0] > results[1] else 'WRONG '
print(result + str(results))
print('DOGS')
for image_file in dog_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[1] > results[0] else 'WRONG '
print(result + str(results))
###Output
_____no_output_____
###Markdown
CleanupRun the cell below to delete your service. In the [next notebook](project-brainwave-custom-weights.ipynb) you will learn how to retrain all the weights of one of the models
###Code
service.delete()
registered_model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Model Development This example shows how to build, train, evaluate and deploy a model running on FPGA. Only Windows is supported. We use TensorFlow and Keras to build our model. We are going to use transfer learning, with ResNet152 as a featurizer. We don't use the last layer of ResNet152 in this case and instead add and train our own classification layer.We will use the Kaggle Cats and Dogs dataset to train the classifier. The dataset can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory ("~/catsanddogs").Please set up your environment as described in the [quick start](project-brainwave-quickstart.ipynb).
###Code
import os
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Model ConstructionLoad the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run quickly, but doesn't create a very accurate classifier. You can improve the classifier by using more of the dataset.
###Code
import glob
import imghdr
datadir = os.path.expanduser("~/catsanddogs")
cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))
dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))
# Limit the data set to make the notebook execute quickly.
cat_files = cat_files[:64]
dog_files = dog_files[:64]
# The data set has a few images that are not jpeg. Remove them.
cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']
dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']
if(not len(cat_files) or not len(dog_files)):
print("Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to " + datadir)
raise ValueError("Data not found")
else:
print(cat_files[0])
print(dog_files[0])
# constructing a numpy array as labels
image_paths = cat_files + dog_files
total_files = len(cat_files) + len(dog_files)
labels = np.zeros(total_files)
labels[len(cat_files):] = 1
###Output
_____no_output_____
###Markdown
We need to preprocess the input file to get it into the form expected by ResNet152. We've provided a default implementation of the preprocessing that you can use.
###Code
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.contrib.brainwave.models.utils as utils
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
###Output
_____no_output_____
###Markdown
Alternatively, if you would like to customize the preprocessing, you can write your own preprocessor using TensorFlow operations.The input to the classifier we are training is the set of features produced by ResNet50. To train the classifier we need to featurize the images using ResNet50. You can also run the featurizer locally on CPU or GPU. We import the featurizer as frozen, so that we are only training the classifier.
###Code
from azureml.contrib.brainwave.models import QuantizedResnet152
model_path = os.path.expanduser('~/models')
bwmodel = QuantizedResnet152(model_path, is_frozen = True)
print(bwmodel.version)
###Output
_____no_output_____
###Markdown
Calling import_graph_def on the featurizer will create a service that runs the featurizer on FPGA.
###Code
features = bwmodel.import_graph_def(input_tensor=image_tensors)
###Output
_____no_output_____
###Markdown
Pre-compute featuresLoad the data set and compute the features. These can be precomputed because they don't change during training. This can take a while to run on CPU.
###Code
from tqdm import tqdm
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def read_files(files):
contents = []
for path in files:
with open(path, 'rb') as f:
contents.append(f.read())
return contents
feature_list = []
with tf.Session() as sess:
for chunk in tqdm(chunks(image_paths, 5)):
contents = read_files(chunk)
result = sess.run([features], feed_dict={in_images: contents})
feature_list.extend(result[0])
feature_results = np.array(feature_list)
print(feature_results.shape)
###Output
_____no_output_____
###Markdown
Add and Train the classifierWe use Keras to define and train a simple classifier.
###Code
from keras.models import Sequential
from keras.layers import Dropout, Dense, Flatten
from keras import optimizers
FC_SIZE = 1024
NUM_CLASSES = 2
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, 1, 2048,)))
model.add(Dense(FC_SIZE, activation='relu', input_dim=(1, 1, 2048,)))
model.add(Flatten())
model.add(Dense(NUM_CLASSES, activation='sigmoid', input_dim=FC_SIZE))
model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare the train and test data.
###Code
from sklearn.model_selection import train_test_split
onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])
X_train, X_test, y_train, y_test = train_test_split(feature_results, onehot_labels, random_state=42, shuffle=True)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Train the classifier.
###Code
model.fit(X_train, y_train, epochs=16, batch_size=32)
###Output
_____no_output_____
###Markdown
Test the ClassifierLet's test the classifier and see how well it does. Since we only trained on a few images, we are not expecting to win a Kaggle competition, but it will likely get most of the images correct.
###Code
from numpy import argmax
y_probs = model.predict(X_test)
y_prob_max = np.argmax(y_probs, 1)
y_test_max = np.argmax(y_test, 1)
print(y_prob_max)
print(y_test_max)
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
import itertools
import matplotlib
from matplotlib import pyplot as plt
# compute a bunch of classification metrics
def classification_metrics(y_true, y_pred, y_prob):
cm_dict = {}
cm_dict['Accuracy'] = accuracy_score(y_true, y_pred)
cm_dict['Precision'] = precision_score(y_true, y_pred)
cm_dict['Recall'] = recall_score(y_true, y_pred)
cm_dict['F1'] = f1_score(y_true, y_pred)
cm_dict['AUC'] = roc_auc_score(y_true, y_prob[:,0])
cm_dict['Confusion Matrix'] = confusion_matrix(y_true, y_pred).tolist()
return cm_dict
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""Plots a confusion matrix.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
New BSD License - see appendix
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0: cm_min = 0
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_dict = classification_metrics(y_test_max, y_prob_max, y_probs)
for m in cm_dict:
print(m, cm_dict[m])
cm = np.asarray(cm_dict['Confusion Matrix'])
plot_confusion_matrix(cm, ['fail','pass'], normalize=False)
###Output
_____no_output_____
###Markdown
Service DefinitionLike in the QuickStart notebook our service definition pipeline consists of three stages. Because the preprocessing and featurizing stage don't contain any variables, we can use a default session.Here we use the Keras classifier as the final stage.
###Code
from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage, KerasStage
model_def = ModelDefinition()
model_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors))
model_def.pipeline.append(BrainWaveStage(tf.Session(), bwmodel))
model_def.pipeline.append(KerasStage(model))
model_def_path = os.path.join(datadir, 'save', 'model_def')
model_def.save(model_def_path)
print(model_def_path)
###Output
_____no_output_____
###Markdown
Deploy
###Code
from azureml.core.model import Model
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
model_name = "catsanddogs-model"
service_name = "modelbuild-service"
registered_model = Model.register(ws, model_def_path, model_name)
###Output
_____no_output_____
###Markdown
The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model.
###Code
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage
try:
service = Webservice(ws, service_name)
except WebserviceException:
image_config = BrainwaveImage.image_configuration()
deployment_config = BrainwaveWebservice.deploy_configuration()
service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config)
service.wait_for_deployment(True)
###Output
_____no_output_____
###Markdown
The service is now running in Azure and ready to serve requests. We can check the address and port.
###Code
print(service.ipAddress + ':' + str(service.port))
###Output
_____no_output_____
###Markdown
ClientThere is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service.
###Code
from azureml.contrib.brainwave.client import PredictionClient
client = PredictionClient(service.ipAddress, service.port)
###Output
_____no_output_____
###Markdown
You can adapt the client [code](../../pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C [client](../../sample-clients/csharp).The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). RequestLet's see how our service does on a few images. It may get a few wrong.
###Code
# Specify an image to classify
print('CATS')
for image_file in cat_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[0] > results[1] else 'WRONG '
print(result + str(results))
print('DOGS')
for image_file in dog_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[1] > results[0] else 'WRONG '
print(result + str(results))
###Output
_____no_output_____
###Markdown
CleanupRun the cell below to delete your service. In the [next notebook](project-brainwave-custom-weights.ipynb) you will learn how to retrain all the weights of one of the models
###Code
service.delete()
registered_model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Model Development This example shows how to build, train, evaluate and deploy a model running on FPGA. Only Windows is supported. We use TensorFlow and Keras to build our model. We are going to use transfer learning, with ResNet152 as a featurizer. We don't use the last layer of ResNet152 in this case and instead add and train our own classification layer.We will use the Kaggle Cats and Dogs dataset to train the classifier. The dataset can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory ("~/catsanddogs").Please set up your environment as described in the [quick start](project-brainwave-quickstart.ipynb).
###Code
import os
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Model ConstructionLoad the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run quickly, but doesn't create a very accurate classifier. You can improve the classifier by using more of the dataset.
###Code
import glob
import imghdr
datadir = os.path.expanduser("~/catsanddogs")
cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))
dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))
# Limit the data set to make the notebook execute quickly.
cat_files = cat_files[:64]
dog_files = dog_files[:64]
# The data set has a few images that are not jpeg. Remove them.
cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']
dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']
if(not len(cat_files) or not len(dog_files)):
print("Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to " + datadir)
raise ValueError("Data not found")
else:
print(cat_files[0])
print(dog_files[0])
# constructing a numpy array as labels
image_paths = cat_files + dog_files
total_files = len(cat_files) + len(dog_files)
labels = np.zeros(total_files)
labels[len(cat_files):] = 1
###Output
_____no_output_____
###Markdown
We need to preprocess the input file to get it into the form expected by ResNet152. We've provided a default implementation of the preprocessing that you can use.
###Code
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.contrib.brainwave.models.utils as utils
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
###Output
_____no_output_____
###Markdown
Alternatively, if you would like to customize the preprocessing, you can write your own preprocessor using TensorFlow operations.The input to the classifier we are training is the set of features produced by ResNet50. To train the classifier we need to featurize the images using ResNet50. You can also run the featurizer locally on CPU or GPU. We import the featurizer as frozen, so that we are only training the classifier.
###Code
from azureml.contrib.brainwave.models import QuantizedResnet152
model_path = os.path.expanduser('~/models')
bwmodel = QuantizedResnet152(model_path, is_frozen = True)
print(bwmodel.version)
###Output
_____no_output_____
###Markdown
Calling import_graph_def on the featurizer will create a service that runs the featurizer on FPGA.
###Code
features = bwmodel.import_graph_def(input_tensor=image_tensors)
###Output
_____no_output_____
###Markdown
Pre-compute featuresLoad the data set and compute the features. These can be precomputed because they don't change during training. This can take a while to run on CPU.
###Code
from tqdm import tqdm
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def read_files(files):
contents = []
for path in files:
with open(path, 'rb') as f:
contents.append(f.read())
return contents
feature_list = []
with tf.Session() as sess:
for chunk in tqdm(chunks(image_paths, 5)):
contents = read_files(chunk)
result = sess.run([features], feed_dict={in_images: contents})
feature_list.extend(result[0])
feature_results = np.array(feature_list)
print(feature_results.shape)
###Output
_____no_output_____
###Markdown
Add and Train the classifierWe use Keras to define and train a simple classifier.
###Code
from keras.models import Sequential
from keras.layers import Dropout, Dense, Flatten
from keras import optimizers
FC_SIZE = 1024
NUM_CLASSES = 2
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, 1, 2048,)))
model.add(Dense(FC_SIZE, activation='relu', input_dim=(1, 1, 2048,)))
model.add(Flatten())
model.add(Dense(NUM_CLASSES, activation='sigmoid', input_dim=FC_SIZE))
model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare the train and test data.
###Code
from sklearn.model_selection import train_test_split
onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])
X_train, X_test, y_train, y_test = train_test_split(feature_results, onehot_labels, random_state=42, shuffle=True)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Train the classifier.
###Code
model.fit(X_train, y_train, epochs=16, batch_size=32)
###Output
_____no_output_____
###Markdown
Test the ClassifierLet's test the classifier and see how well it does. Since we only trained on a few images, we are not expecting to win a Kaggle competition, but it will likely get most of the images correct.
###Code
from numpy import argmax
y_probs = model.predict(X_test)
y_prob_max = np.argmax(y_probs, 1)
y_test_max = np.argmax(y_test, 1)
print(y_prob_max)
print(y_test_max)
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
import itertools
import matplotlib
from matplotlib import pyplot as plt
# compute a bunch of classification metrics
def classification_metrics(y_true, y_pred, y_prob):
cm_dict = {}
cm_dict['Accuracy'] = accuracy_score(y_true, y_pred)
cm_dict['Precision'] = precision_score(y_true, y_pred)
cm_dict['Recall'] = recall_score(y_true, y_pred)
cm_dict['F1'] = f1_score(y_true, y_pred)
cm_dict['AUC'] = roc_auc_score(y_true, y_prob[:,0])
cm_dict['Confusion Matrix'] = confusion_matrix(y_true, y_pred).tolist()
return cm_dict
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""Plots a confusion matrix.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
New BSD License - see appendix
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0: cm_min = 0
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_dict = classification_metrics(y_test_max, y_prob_max, y_probs)
for m in cm_dict:
print(m, cm_dict[m])
cm = np.asarray(cm_dict['Confusion Matrix'])
plot_confusion_matrix(cm, ['fail','pass'], normalize=False)
###Output
_____no_output_____
###Markdown
Service DefinitionLike in the QuickStart notebook our service definition pipeline consists of three stages. Because the preprocessing and featurizing stage don't contain any variables, we can use a default session.Here we use the Keras classifier as the final stage.
###Code
from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage, KerasStage
model_def = ModelDefinition()
model_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors))
model_def.pipeline.append(BrainWaveStage(tf.Session(), bwmodel))
model_def.pipeline.append(KerasStage(model))
model_def_path = os.path.join(datadir, 'save', 'model_def')
model_def.save(model_def_path)
print(model_def_path)
###Output
_____no_output_____
###Markdown
Deploy
###Code
from azureml.core.model import Model
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
model_name = "catsanddogs-model"
service_name = "modelbuild-service"
registered_model = Model.register(ws, model_def_path, model_name)
###Output
_____no_output_____
###Markdown
The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model.
###Code
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage
try:
service = Webservice(ws, service_name)
except WebserviceException:
image_config = BrainwaveImage.image_configuration()
deployment_config = BrainwaveWebservice.deploy_configuration()
service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config)
service.wait_for_deployment(true)
###Output
_____no_output_____
###Markdown
The service is now running in Azure and ready to serve requests. We can check the address and port.
###Code
print(service.ipAddress + ':' + str(service.port))
###Output
_____no_output_____
###Markdown
ClientThere is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service.
###Code
from azureml.contrib.brainwave.client import PredictionClient
client = PredictionClient(service.ipAddress, service.port)
###Output
_____no_output_____
###Markdown
You can adapt the client [code](../../pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C [client](../../sample-clients/csharp).The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). RequestLet's see how our service does on a few images. It may get a few wrong.
###Code
# Specify an image to classify
print('CATS')
for image_file in cat_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[0] > results[1] else 'WRONG '
print(result + str(results))
print('DOGS')
for image_file in dog_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[1] > results[0] else 'WRONG '
print(result + str(results))
###Output
_____no_output_____
###Markdown
CleanupRun the cell below to delete your service. In the [next notebook](project-brainwave-custom-weights.ipynb) you will learn how to retrain all the weights of one of the models
###Code
service.delete()
registered_model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Model Development This example shows how to build, train, evaluate and deploy a model running on FPGA. Only Windows is supported. We use TensorFlow and Keras to build our model. We are going to use transfer learning, with ResNet152 as a featurizer. We don't use the last layer of ResNet152 in this case and instead add and train our own classification layer.We will use the Kaggle Cats and Dogs dataset to train the classifier. The dataset can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory ("~/catsanddogs").Please set up your environment as described in the [quick start](project-brainwave-quickstart.ipynb).
###Code
import os
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Model ConstructionLoad the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run quickly, but doesn't create a very accurate classifier. You can improve the classifier by using more of the dataset.
###Code
import glob
import imghdr
datadir = os.path.expanduser("~/catsanddogs")
cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))
dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))
# Limit the data set to make the notebook execute quickly.
cat_files = cat_files[:64]
dog_files = dog_files[:64]
# The data set has a few images that are not jpeg. Remove them.
cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']
dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']
if(not len(cat_files) or not len(dog_files)):
print("Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to " + datadir)
raise ValueError("Data not found")
else:
print(cat_files[0])
print(dog_files[0])
# constructing a numpy array as labels
image_paths = cat_files + dog_files
total_files = len(cat_files) + len(dog_files)
labels = np.zeros(total_files)
labels[len(cat_files):] = 1
###Output
_____no_output_____
###Markdown
We need to preprocess the input file to get it into the form expected by ResNet152. We've provided a default implementation of the preprocessing that you can use.
###Code
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.contrib.brainwave.models.utils as utils
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
###Output
_____no_output_____
###Markdown
Alternatively, if you would like to customize the preprocessing, you can write your own preprocessor using TensorFlow operations.The input to the classifier we are training is the set of features produced by ResNet50. To train the classifier we need to featurize the images using ResNet50. You can also run the featurizer locally on CPU or GPU. We import the featurizer as frozen, so that we are only training the classifier.
###Code
from azureml.contrib.brainwave.models import QuantizedResnet152
model_path = os.path.expanduser('~/models')
bwmodel = QuantizedResnet152(model_path, is_frozen = True)
print(bwmodel.version)
###Output
_____no_output_____
###Markdown
Calling import_graph_def on the featurizer will create a service that runs the featurizer on FPGA.
###Code
features = bwmodel.import_graph_def(input_tensor=image_tensors)
###Output
_____no_output_____
###Markdown
Pre-compute featuresLoad the data set and compute the features. These can be precomputed because they don't change during training. This can take a while to run on CPU.
###Code
from tqdm import tqdm
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def read_files(files):
contents = []
for path in files:
with open(path, 'rb') as f:
contents.append(f.read())
return contents
feature_list = []
with tf.Session() as sess:
for chunk in tqdm(chunks(image_paths, 5)):
contents = read_files(chunk)
result = sess.run([features], feed_dict={in_images: contents})
feature_list.extend(result[0])
feature_results = np.array(feature_list)
print(feature_results.shape)
###Output
_____no_output_____
###Markdown
Add and Train the classifierWe use Keras to define and train a simple classifier.
###Code
from keras.models import Sequential
from keras.layers import Dropout, Dense, Flatten
from keras import optimizers
FC_SIZE = 1024
NUM_CLASSES = 2
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, 1, 2048,)))
model.add(Dense(FC_SIZE, activation='relu', input_dim=(1, 1, 2048,)))
model.add(Flatten())
model.add(Dense(NUM_CLASSES, activation='sigmoid', input_dim=FC_SIZE))
model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare the train and test data.
###Code
from sklearn.model_selection import train_test_split
onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])
X_train, X_test, y_train, y_test = train_test_split(feature_results, onehot_labels, random_state=42, shuffle=True)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Train the classifier.
###Code
model.fit(X_train, y_train, epochs=16, batch_size=32)
###Output
_____no_output_____
###Markdown
Test the ClassifierLet's test the classifier and see how well it does. Since we only trained on a few images, we are not expecting to win a Kaggle competition, but it will likely get most of the images correct.
###Code
from numpy import argmax
y_probs = model.predict(X_test)
y_prob_max = np.argmax(y_probs, 1)
y_test_max = np.argmax(y_test, 1)
print(y_prob_max)
print(y_test_max)
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
import itertools
import matplotlib
from matplotlib import pyplot as plt
# compute a bunch of classification metrics
def classification_metrics(y_true, y_pred, y_prob):
cm_dict = {}
cm_dict['Accuracy'] = accuracy_score(y_true, y_pred)
cm_dict['Precision'] = precision_score(y_true, y_pred)
cm_dict['Recall'] = recall_score(y_true, y_pred)
cm_dict['F1'] = f1_score(y_true, y_pred)
cm_dict['AUC'] = roc_auc_score(y_true, y_prob[:,0])
cm_dict['Confusion Matrix'] = confusion_matrix(y_true, y_pred).tolist()
return cm_dict
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""Plots a confusion matrix.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
New BSD License - see appendix
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0: cm_min = 0
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_dict = classification_metrics(y_test_max, y_prob_max, y_probs)
for m in cm_dict:
print(m, cm_dict[m])
cm = np.asarray(cm_dict['Confusion Matrix'])
plot_confusion_matrix(cm, ['fail','pass'], normalize=False)
###Output
_____no_output_____
###Markdown
Service DefinitionLike in the QuickStart notebook our service definition pipeline consists of three stages. Because the preprocessing and featurizing stage don't contain any variables, we can use a default session.Here we use the Keras classifier as the final stage.
###Code
from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage, KerasStage
model_def = ModelDefinition()
model_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors))
model_def.pipeline.append(BrainWaveStage(tf.Session(), bwmodel))
model_def.pipeline.append(KerasStage(model))
model_def_path = os.path.join(datadir, 'save', 'model_def')
model_def.save(model_def_path)
print(model_def_path)
###Output
_____no_output_____
###Markdown
Deploy
###Code
from azureml.core.model import Model
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
model_name = "catsanddogs-model"
service_name = "modelbuild-service"
registered_model = Model.register(ws, model_def_path, model_name)
###Output
_____no_output_____
###Markdown
The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model.
###Code
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage
try:
service = Webservice(ws, service_name)
except WebserviceException:
image_config = BrainwaveImage.image_configuration()
deployment_config = BrainwaveWebservice.deploy_configuration()
service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config)
service.wait_for_deployment(true)
###Output
_____no_output_____
###Markdown
The service is now running in Azure and ready to serve requests. We can check the address and port.
###Code
print(service.ipAddress + ':' + str(service.port))
###Output
_____no_output_____
###Markdown
ClientThere is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service.
###Code
from azureml.contrib.brainwave.client import PredictionClient
client = PredictionClient(service.ipAddress, service.port)
###Output
_____no_output_____
###Markdown
You can adapt the client [code](../../pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C [client](../../sample-clients/csharp).The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). RequestLet's see how our service does on a few images. It may get a few wrong.
###Code
# Specify an image to classify
print('CATS')
for image_file in cat_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[0] > results[1] else 'WRONG '
print(result + str(results))
print('DOGS')
for image_file in dog_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[1] > results[0] else 'WRONG '
print(result + str(results))
###Output
_____no_output_____
###Markdown
CleanupRun the cell below to delete your service. In the [next notebook](project-brainwave-custom-weights.ipynb) you will learn how to retrain all the weights of one of the models
###Code
service.delete()
registered_model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Model Development This example shows how to build, train, evaluate and deploy a model running on FPGA. Only Windows is supported. We use TensorFlow and Keras to build our model. We are going to use transfer learning, with ResNet152 as a featurizer. We don't use the last layer of ResNet152 in this case and instead add and train our own classification layer.We will use the Kaggle Cats and Dogs dataset to train the classifier. The dataset can be downloaded [here](https://www.microsoft.com/en-us/download/details.aspx?id=54765). Download the zip and extract to a directory named 'catsanddogs' under your user directory ("~/catsanddogs").Please set up your environment as described in the [quick start](project-brainwave-quickstart.ipynb).
###Code
import os
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
Model ConstructionLoad the files we are going to use for training and testing. By default this notebook uses only a very small subset of the Cats and Dogs dataset. That makes it run quickly, but doesn't create a very accurate classifier. You can improve the classifier by using more of the dataset.
###Code
import glob
import imghdr
datadir = os.path.expanduser("~/catsanddogs")
cat_files = glob.glob(os.path.join(datadir, 'PetImages', 'Cat', '*.jpg'))
dog_files = glob.glob(os.path.join(datadir, 'PetImages', 'Dog', '*.jpg'))
# Limit the data set to make the notebook execute quickly.
cat_files = cat_files[:64]
dog_files = dog_files[:64]
# The data set has a few images that are not jpeg. Remove them.
cat_files = [f for f in cat_files if imghdr.what(f) == 'jpeg']
dog_files = [f for f in dog_files if imghdr.what(f) == 'jpeg']
if(not len(cat_files) or not len(dog_files)):
print("Please download the Kaggle Cats and Dogs dataset form https://www.microsoft.com/en-us/download/details.aspx?id=54765 and extract the zip to " + datadir)
raise ValueError("Data not found")
else:
print(cat_files[0])
print(dog_files[0])
# constructing a numpy array as labels
image_paths = cat_files + dog_files
total_files = len(cat_files) + len(dog_files)
labels = np.zeros(total_files)
labels[len(cat_files):] = 1
###Output
_____no_output_____
###Markdown
We need to preprocess the input file to get it into the form expected by ResNet152. We've provided a default implementation of the preprocessing that you can use.
###Code
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.contrib.brainwave.models.utils as utils
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
###Output
_____no_output_____
###Markdown
Alternatively, if you would like to customize the preprocessing, you can write your own preprocessor using TensorFlow operations.The input to the classifier we are training is the set of features produced by ResNet50. To train the classifier we need to featurize the images using ResNet50. You can also run the featurizer locally on CPU or GPU. We import the featurizer as frozen, so that we are only training the classifier.
###Code
from azureml.contrib.brainwave.models import QuantizedResnet152
model_path = os.path.expanduser('~/models')
bwmodel = QuantizedResnet152(model_path, is_frozen = True)
print(bwmodel.version)
###Output
_____no_output_____
###Markdown
Calling import_graph_def on the featurizer will create a service that runs the featurizer on FPGA.
###Code
features = bwmodel.import_graph_def(input_tensor=image_tensors)
###Output
_____no_output_____
###Markdown
Pre-compute featuresLoad the data set and compute the features. These can be precomputed because they don't change during training. This can take a while to run on CPU.
###Code
from tqdm import tqdm
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def read_files(files):
contents = []
for path in files:
with open(path, 'rb') as f:
contents.append(f.read())
return contents
feature_list = []
with tf.Session() as sess:
for chunk in tqdm(chunks(image_paths, 5)):
contents = read_files(chunk)
result = sess.run([features], feed_dict={in_images: contents})
feature_list.extend(result[0])
feature_results = np.array(feature_list)
print(feature_results.shape)
###Output
_____no_output_____
###Markdown
Add and Train the classifierWe use Keras to define and train a simple classifier.
###Code
from keras.models import Sequential
from keras.layers import Dropout, Dense, Flatten
from keras import optimizers
FC_SIZE = 1024
NUM_CLASSES = 2
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, 1, 2048,)))
model.add(Dense(FC_SIZE, activation='relu', input_dim=(1, 1, 2048,)))
model.add(Flatten())
model.add(Dense(NUM_CLASSES, activation='sigmoid', input_dim=FC_SIZE))
model.compile(optimizer=optimizers.SGD(lr=1e-4,momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Prepare the train and test data.
###Code
from sklearn.model_selection import train_test_split
onehot_labels = np.array([[0,1] if i else [1,0] for i in labels])
X_train, X_test, y_train, y_test = train_test_split(feature_results, onehot_labels, random_state=42, shuffle=True)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Train the classifier.
###Code
model.fit(X_train, y_train, epochs=16, batch_size=32)
###Output
_____no_output_____
###Markdown
Test the ClassifierLet's test the classifier and see how well it does. Since we only trained on a few images, we are not expecting to win a Kaggle competition, but it will likely get most of the images correct.
###Code
from numpy import argmax
y_probs = model.predict(X_test)
y_prob_max = np.argmax(y_probs, 1)
y_test_max = np.argmax(y_test, 1)
print(y_prob_max)
print(y_test_max)
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
import itertools
import matplotlib
from matplotlib import pyplot as plt
# compute a bunch of classification metrics
def classification_metrics(y_true, y_pred, y_prob):
cm_dict = {}
cm_dict['Accuracy'] = accuracy_score(y_true, y_pred)
cm_dict['Precision'] = precision_score(y_true, y_pred)
cm_dict['Recall'] = recall_score(y_true, y_pred)
cm_dict['F1'] = f1_score(y_true, y_pred)
cm_dict['AUC'] = roc_auc_score(y_true, y_prob[:,0])
cm_dict['Confusion Matrix'] = confusion_matrix(y_true, y_pred).tolist()
return cm_dict
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""Plots a confusion matrix.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
New BSD License - see appendix
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0: cm_min = 0
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_dict = classification_metrics(y_test_max, y_prob_max, y_probs)
for m in cm_dict:
print(m, cm_dict[m])
cm = np.asarray(cm_dict['Confusion Matrix'])
plot_confusion_matrix(cm, ['fail','pass'], normalize=False)
###Output
_____no_output_____
###Markdown
Service DefinitionLike in the QuickStart notebook our service definition pipeline consists of three stages. Because the preprocessing and featurizing stage don't contain any variables, we can use a default session.Here we use the Keras classifier as the final stage.
###Code
from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage, KerasStage
model_def = ModelDefinition()
model_def.pipeline.append(TensorflowStage(tf.Session(), in_images, image_tensors))
model_def.pipeline.append(BrainWaveStage(tf.Session(), bwmodel))
model_def.pipeline.append(KerasStage(model))
model_def_path = os.path.join(datadir, 'save', 'model_def')
model_def.save(model_def_path)
print(model_def_path)
###Output
_____no_output_____
###Markdown
Deploy
###Code
from azureml.core.model import Model
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
model_name = "catsanddogs-model"
service_name = "modelbuild-service"
registered_model = Model.register(ws, model_def_path, model_name)
###Output
_____no_output_____
###Markdown
The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model.
###Code
from azureml.core.webservice import Webservice
from azureml.exceptions import WebserviceException
from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage
try:
service = Webservice(ws, service_name)
except WebserviceException:
image_config = BrainwaveImage.image_configuration()
deployment_config = BrainwaveWebservice.deploy_configuration()
service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config)
service.wait_for_deployment(True)
###Output
_____no_output_____
###Markdown
The service is now running in Azure and ready to serve requests. We can check the address and port.
###Code
print(service.ipAddress + ':' + str(service.port))
###Output
_____no_output_____
###Markdown
ClientThere is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service.
###Code
from azureml.contrib.brainwave.client import PredictionClient
client = PredictionClient(service.ipAddress, service.port)
###Output
_____no_output_____
###Markdown
You can adapt the client [code](../../pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C [client](../../sample-clients/csharp).The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup). RequestLet's see how our service does on a few images. It may get a few wrong.
###Code
# Specify an image to classify
print('CATS')
for image_file in cat_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[0] > results[1] else 'WRONG '
print(result + str(results))
print('DOGS')
for image_file in dog_files[:8]:
results = client.score_image(image_file)
result = 'CORRECT ' if results[1] > results[0] else 'WRONG '
print(result + str(results))
###Output
_____no_output_____
###Markdown
CleanupRun the cell below to delete your service. In the [next notebook](project-brainwave-custom-weights.ipynb) you will learn how to retrain all the weights of one of the models
###Code
service.delete()
registered_model.delete()
###Output
_____no_output_____ |
EDA_aku/EDA_HT_ANOVA_aku.ipynb | ###Markdown
Načtení dat ze souboru
###Code
aku = pd.read_csv('aku.csv', sep=';', index_col=0) #sep: oddělovač, index_col: číslo sloupce s indexem řádku (soubor jej nemusí obsahovat)
aku
aku.columns # názvy slupců v datovém souboru
aku.head() # výpis prvních řádků souboru
aku['vyrobce'].dtype # data ve slpupci jsou v obecného typu "object"
# sloupec lze převést na typ Category
aku['vyrobce']=aku['vyrobce'].astype('category')
# pro data typu Category můžeme zvolit vlastní pořadí kategorií (hodnot)
aku['vyrobce']=aku['vyrobce'].cat.as_ordered()
aku['vyrobce']=aku['vyrobce'].cat.reorder_categories(['C', 'B', 'A', 'D'])
aku['vyrobce'] # v posledním řádku vidíme, že kategorie jsou uspořádány podle zadaného pořadí
###Output
_____no_output_____
###Markdown
Explorační analýza Explorační analýza kategoriální proměnné
###Code
# četnosti výskytu jednotlivých kategorií
aku['vyrobce'].value_counts()
# koláčový graf
aku['vyrobce'].value_counts().plot.pie()
# sloupcový graf
aku['vyrobce'].value_counts().plot.bar()
plt.xlabel("výrobce")
plt.ylabel("počet akumulátorů")
###Output
_____no_output_____
###Markdown
Explorační analýza kvantitativních proměnných
###Code
# základní numerické charakteristiky
aku.describe()
# histogramy
aku.plot.hist(alpha=0.5)
plt.xlabel("kapacita")
plt.ylabel("četnost výskytu")
# krabicové grafy
aku.plot.box()
plt.ylabel("kapacita")
# výpis řádků obsahujících odlehlá pozorování ve sloupci kapacit po 5 cyklech (podle metody vnitřních hradeb)
Q1_5=aku['kapacita_5'].quantile(0.25)
Q3_5=aku['kapacita_5'].quantile(0.75)
IQR_5 = Q3_5-Q1_5
out_5 = (aku['kapacita_5']<(Q1_5-1.5*IQR_5)) | (aku['kapacita_5']>(Q3_5+1.5*IQR_5))
aku[out_5]
# ponecháme jen řádky obsahující v obou sloupcích hodnoty nad 1500 mAh (zbylé akumulároty jsou považovány za vadné)
aku = aku[(aku['kapacita_5']>=1500)&(aku['kapacita_100']>=1500)].copy()
# histogramy po odstranění vadných akumulátorů
aku.plot.hist(alpha=0.5)
plt.xlabel("kapacita")
plt.ylabel("četnost výskytu")
# krabicové grafy po odstranění vadných akumulátorů
aku.plot.box()
plt.ylabel("kapacita")
# základní charakteristiky pro data očištěná od odlehých pozorování
aku.describe()
###Output
_____no_output_____
###Markdown
Závislost dvou kvantitativních proměnných
###Code
# bodový graf závislosti kapacit po 5 a kapacit po 100 cyklech
aku.plot.scatter(x='kapacita_5', y='kapacita_100')
# z rozložení bodů v grafu lze usoudit, že hodnoty kapacit po 5 a po 100 cyklech jsou vzájemně závislé
# Pearsonvy korelační koeficienty (bodové odhady korelací)
aku.corr() # pomocí pandas
np.corrcoef(aku['kapacita_5'], aku['kapacita_100']) # totéž pomocí numpy
###Output
_____no_output_____
###Markdown
Explorační analýza závislosti kvantitativní a kategoriální proměnné
###Code
# krabicové grafy kapacit po 5 cyklech v zavislosti na vyrobci
aku.boxplot(column=['kapacita_5'], by=['vyrobce']);
plt.title('Kapacity po 5 cyklech')
plt.xlabel('výrobce')
plt.ylabel('kapacita')
plt.tight_layout() # upravi mezery kolem grafu
# výrazné posunutí krabicových grafů (např. B proti D) naznačuje závislost kapacity po 5 cyklech na výrobci
###Output
_____no_output_____
###Markdown
Explorační analýza závislosti dvou kategoriálních proměnných
###Code
# pokles kapacit mezi 5. a 100. cyklem (přidáme jako další sloupec datového rámce)
aku['pokles']=aku['kapacita_5']-aku['kapacita_100']
# relativní pokles
aku['rel_pokles']=aku['pokles']/aku['kapacita_5']
# je relativní pokles > 0.1 ?
aku['pokles_nad10p'] = aku['rel_pokles']>0.1
# kontingenční tabulka (tabulka absolutních četností dvojic (výrobce, pokles > 0.1))
crt = pd.crosstab(aku['vyrobce'], aku['pokles_nad10p'])
crt
# mozaikový graf (grafické znázornění kontingeční tabulky)
mosaic(aku, ['vyrobce', 'pokles_nad10p']);
# výrazně rozdílné poměry výšek buněk v jednotlivých sloupcích nazačují závislsot mezi poklesem a výrobcem
###Output
_____no_output_____
###Markdown
Ověření normality
###Code
# pomocí QQ grafu
sm.qqplot(aku['kapacita_5'][aku['vyrobce']=='A'], line='45', fit=True)
plt.show()
# rozložení bodů podél přímky ukazuje na normální rozdělení dat
# Obdobný graf pro data zexponenciálního rozdělení ukazuje výrazný odklad od přímky. Z toho lze usuzovat, že data nemají normální rozdělení.
x = np.random.exponential(5, size=50)
sm.qqplot(x, line='45', fit=True)
plt.show()
# normalitu lze testovat např. Shapirovým-Wilkovým testem (H0: data mají normální rozdělení, H1: data nemají normální rozdělení)
stats.shapiro(aku['kapacita_5'][aku['vyrobce']=='A'])
# vyspoká p-hodnota ukazuje, že data nevykazují významnou odchylku od normality
###Output
_____no_output_____
###Markdown
Testy hypotéz, intervalové odhady Ověření normality pro jednotlivé kapacity a výrobce
###Code
aku[['kapacita_5', 'vyrobce']].groupby('vyrobce').apply(stats.shapiro)
aku[['kapacita_100', 'vyrobce']].groupby('vyrobce').apply(stats.shapiro)
###Output
_____no_output_____
###Markdown
Všechny p-hodnoty jsou vyšší než 0.05. Jednotlivé výběry můžeme považovat za výbery z normálního rozdělení. Jednovýběrový t-test (test střední hodnoty v normálním rozdělení) Dosahují akumulátory výrobce A po 5 cyklech deklarované kapacity?
###Code
kap5A = aku['kapacita_5'][aku['vyrobce']=='A']
kap5A.plot.box();
# krabicový graf nenaznačuje výraznou odchylku od deklarované hodnoty
###Output
_____no_output_____
###Markdown
Testujeme H0: $\mu_A=2000$ proti H1: $\mu_A\neq 2000$
###Code
# oboustranný jednovýběrový t-test
result = stats.ttest_1samp(kap5A, popmean=2000)
print("Jednovýběrový t-test: p-hodnota={:f}".format(result.pvalue))
###Output
Jednovýběrový t-test: p-hodnota=0.048518
###Markdown
Na hladině významnosti 0.05 zamítáme H0. Střední hodnota kapacit akumumulátorů výrobce A po 5 cyklech vykazuje statisticky význanou odchylku od deklarované kapacity.
###Code
# jednostranný jednovýběrový t-test H0: \mu_A=2000, H1: \mu_A>2000
result = stats.ttest_1samp(kap5A, popmean=2000, alternative='greater')
print("Jednovýběrový t-test: p-hodnota={:f}".format(result.pvalue))
# pro H1: \mu_A<2000 ... alternative='less'
# pro H1: \mu_A<>2000 ... alternative='two-sided' (výchozí hodnota parametru)
###Output
Jednovýběrový t-test: p-hodnota=0.024259
###Markdown
--- Dvouvýběrový t-test (test rovnosti středních hodnot vzájemně neávislých výběrů z normálních rozdělení) Liší se střední hodnoty kapacit akumulátorů výrobců B a D po 100 cyklech?
###Code
kapB100 = aku['kapacita_100'][aku['vyrobce']=='B']
kapD100 = aku['kapacita_100'][aku['vyrobce']=='D']
# krabicové grafy pomocí matplotlib
plt.boxplot([kapB100, kapD100])
plt.xticks(ticks=[1, 2], labels=['B', 'D'])
plt.xlabel('výrobce')
plt.ylabel('kapacita')
###Output
_____no_output_____
###Markdown
Krabicové grafy naznačují, že kapacity akamulátorů výrobce D jsou vyšší než výrobce B. Proto volíme jednostrannou alternativní hypotézu.Krabicové grafy nenaznačjí rozdíl v rozptylech. Použijeme jednostranný t-test pro výběry se shodnými rozptyly. Test H0: $\mu_B=\mu_D$ proti H1: $\mu_B<\mu_D$
###Code
# dvouvýběrový t-test
test = stats.ttest_ind(kapB100, kapD100, equal_var=True) # pro rozdílné rozptyly: equal_var=False
print("Dvouvýběrový t-test: p-hodnota={:f}".format(test.pvalue))
###Output
Dvouvýběrový t-test: p-hodnota=0.000003
###Markdown
Na hladině významnosti 0.05 zamítáme $H_0$. Střední hodnoty kapacit po 100 cyklech akumulátorů výrobce D jsou vyšší než střední hodnoty kapacit akumulátorů výrobce B. --- Test shody rozptylů pro dva výběry z normálních rozdělení Liší se rozptyly hodnoty kapacit akuamulátorů výrobců B a D po 100 cyklech? H_0: $\sigma_B^2=\sigma_D^2$, H_1: $\sigma_B^2>\sigma_D^2$
###Code
# test rovnosti rozptylů není dostupný v použitých knihovnách, provedeme jej ručně
SB100 = kapB100.var()
SD100 = kapD100.var()
F = SB100/SD100
pval = 1-stats.f.cdf(F, len(kapB100)-1, len(kapD100)-1)
print("F-test: p-hodnota={:f}".format(pval))
###Output
F-test: p-hodnota=0.225281
###Markdown
Na hladině významnosti 0.05 nezamítáme $H_0$. U akamulátorů výrobců B a D nebyl po 100 cyklech prokázán rozdíl v rozptylech kapacit. --- Párový t-test (test rovnosti středních hodnot pro párová data) Liší se kapacity akumulátorů výrobce A po 5 a po 100 cyklech? POZOR: Nejde o nezávislé výběry! Data jsou párová. Měření kapacit po 5 a po 100 cyklech je prováděno se stejnými akumulátory. Posuzujeme stř. hodnotu rozdílů po 5 a po 100 cyklech.
###Code
aku['pokles'][aku['vyrobce']=='A'].plot.box();
###Output
_____no_output_____
###Markdown
Krabicový graf ukazuje, že pokles kapacity u výrobce A je výrazně vyšší než 0. Ověření normality poklesu kapacit Shapirovým-Wilkovým testem.
###Code
stats.shapiro(aku['pokles'])
###Output
_____no_output_____
###Markdown
Hypotézu o normalitě nezamítáme. Poklesy kapacit mají normální rozdělení. Pro test stř. hodnoty poklesu použijeme t-test. Označme střední hodnotu poklesu kapacity jako $\mu_{pA}$.Testujeme $H_0$: $\mu_{pA}=0$ proti $H_1$: $\mu_{pA}>0$.
###Code
# Chybně (jako nezávislá data):
stats.ttest_ind(aku['kapacita_5'][aku['vyrobce']=='A'], aku['kapacita_100'][aku['vyrobce']=='A'])
# Správně pomocí párového t-testu (jako párová data):
stats.ttest_rel(aku['kapacita_5'][aku['vyrobce']=='A'], aku['kapacita_100'][aku['vyrobce']=='A'])
# Správně pomocí jednovýběrového t-testu poklesů:
stats.ttest_1samp(aku['pokles'][aku['vyrobce']=='A'], popmean=0)
result = stats.ttest_rel(aku['kapacita_5'][aku['vyrobce']=='A'], aku['kapacita_100'][aku['vyrobce']=='A'])
print("párový t-test: p-hodnota={:.10f}".format(result.pvalue))
###Output
párový t-test: p-hodnota=0.0000000004
###Markdown
Na hladině významnosti 0.05 zamítáme H0. U akamulátorů výrobce A dochází mezi 5 a 100 cyklem k poklesu stř. hodnoty kapacity. --- Intervalový odhad střední hodnoty výběru z normálního rozdělení Intervalové odhady stř. hodot kapacit po 5 cyklech
###Code
# ruční výpočet intervalového odhadu
alfa = 0.05
print('95% intervalové odhady stř. hodnot kapacit po 5 cyklech')
for v in aku['vyrobce'].unique():
aku_v = aku['kapacita_5'][aku['vyrobce']==v]
m = aku_v.mean() # výběrový průměr
s = aku_v.std() # výběrová směrodatná odchylka
n = len(aku)
t = stats.t.ppf(1-alfa/2, n-1) # 1-alfa/2 kvantil studentova rozdělení
d = t*s/n**0.5 # polovina délky konfidenčního intervalu
td = m-d # dolní mez
th = m+d # horn9 mez
print('výrobce {}: ({:.0f}, {:.0f}) mAh'.format(v, td, th))
###Output
95% intervalové odhady stř. hodnot kapacit po 5 cyklech
výrobce A: (2010, 2027) mAh
výrobce B: (1962, 1977) mAh
výrobce C: (1984, 2009) mAh
výrobce D: (2028, 2039) mAh
###Markdown
ANOVA (test rovnosti stř. hodnot více výběrů z normálních rozdělení)
###Code
sloupec = 'pokles' # sloupec s daty pro ANOVu
vyrobci = aku.vyrobce.cat.categories # hodnoty vysvětlující kategoriální proměnné
sloupce = [aku[sloupec][aku['vyrobce']==v] for v in vyrobci] # seznam posloupností s hodnotami v jednotlivych sloupcich
###Output
_____no_output_____
###Markdown
Explorační analýza
###Code
# explorační analýza pomocí krabicových grafů
plt.boxplot(sloupce, labels=vyrobci)
plt.ylabel(sloupec)
###Output
_____no_output_____
###Markdown
Mezi výběry jsou podstatné rozdíly (např. mezi C a D, C a B, A a D). Předběžně lze očekávat, že střední hodnoty nejsou shodné. Ověření předpokladů
###Code
# oveřní normality jednolivých výberů pomocí Shapirova-Wilkova testu
aku[[sloupec, 'vyrobce']].groupby('vyrobce').apply(stats.shapiro)
###Output
_____no_output_____
###Markdown
P-hodnoty jsou vyšší než 0.05. Hypotézy o normalitě nezamítáme.
###Code
# ověření shody rozptylů jednotivych výběrů pomocí Bartlettova testu.
stats.bartlett(*sloupce)
###Output
_____no_output_____
###Markdown
P-hodnota 0.003 je nižší než 0.05. Na hladině významnosti 0.05 zamítáme nulovou hypotézu o shodě roztylů. Pro test rovnosti středních hodnot použijeme Welchovou ANOVu (pro výběry s nestejnými rozptyly). Testujeme $H_0:\mu_1=\mu_2=\mu_3=\mu_4$ proti $H_1$: neplatí $H_0$.
###Code
res_anova = pingouin.welch_anova(aku, dv='pokles', between='vyrobce')
print(res_anova)
print("Welchova ANOVA: p-hodnota={:f}".format(res_anova['p-unc'][0]))
###Output
Welchova ANOVA: p-hodnota=0.000007
###Markdown
Na hladině významnosti 0.05 zamítáme $H_0$. Střední hodnoty nejsou shodné.
###Code
# post-hoc analýza
pingouin.pairwise_gameshowell(aku, dv='pokles', between='vyrobce')
###Output
_____no_output_____
###Markdown
Statisticky významný rozdíl v poklesu kapacity je mezi dvojicemi výrobců (C, B), (C, D) a (A, D). Další možnosti vícenásobného porovnání stř. hodnot
###Code
# ANOVA pro výběry s nestejnými rozptyly
oneway.anova_oneway(aku[sloupec], aku['vyrobce'], use_var='unequal')
# ANOVA pro věýběry se stejnými rozptyly (pomocí stats)
stats.f_oneway(*sloupce)
# ANOVA pro výběry se stejnými rozptyly (pomocí pingouin)
pingouin.anova(aku, dv='pokles', between='vyrobce')
# post-hoc analyza - porovnani str. hodnot jednotlivych dvojic pro stejné rozptyly
tukey_res = pairwise_tukeyhsd(aku[sloupec], aku['vyrobce'])
print(tukey_res)
# Kruskalův-Wallisův test (alternatitva ANOVy pro případ, že výběry nepochází z normalního rozdělení)
#
# Kruskalův-Wallisův test ověřuje shodu rozdělení jednotlivých výběrů.
# Jestliže rozdělení jednotlivých výběrů mají stejný "tvar" a mohou se lišit jen středními hodnotami (tj. jsou vzájemně posunuta o konstanty),
# je nulová hypotéza o shodě rozdělení ekvivalentní hypotéze o rovnosti středních hodnot.
#
stats.kruskal(*sloupce)
# post-hoc analyza pro Kruskaluv-Wallisuv test
sp.posthoc_dunn(sloupce)
#%load_ext watermark
#%watermark --iversions
###Output
The watermark extension is already loaded. To reload it, use:
%reload_ext watermark
pingouin : 0.5.1
numpy : 1.21.3
scipy : 1.7.1
statsmodels : 0.13.2
matplotlib : 3.4.3
scikit_posthocs: 0.7.0
pandas : 1.3.4
|
notebooks/utility/jetraecr/vae_viewer.ipynb | ###Markdown
VAE viewer notebook for JetBot===This notebook can visualize vae. This repository using JetBot real camera.
###Code
import sys
import PIL
import numpy as np
import cv2
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display
import torch
from torchvision.transforms import transforms
from learning_racer.vae import VAE
from jetcam.csi_camera import CSICamera
from jetcam.utils import bgr8_to_jpeg
###Output
_____no_output_____
###Markdown
Setting Parameter|Name | Description| Default||:----|:-----------|:-------||IMAGE_CHANNELS | Image channel such as RGB | 3 Not change||VARIANTS_SIZE | Variants size of VAE | 32 ||MODEL_PATH | Trained VAE model file path | ../../vae.torch|
###Code
IMAGE_CHANNELS = 3
VARIANTS_SIZE = 32
MODEL_PATH = '../../../vae.torch'
###Output
_____no_output_____
###Markdown
Load trained VAE model.Loading trained VAE model on GPU memory.
###Code
device = torch.device('cuda')
vae = VAE(image_channels=IMAGE_CHANNELS, z_dim=VARIANTS_SIZE)
vae.load_state_dict(torch.load(MODEL_PATH, map_location=torch.device(device)))
vae.to(device).eval()
###Output
_____no_output_____
###Markdown
Create camera Capture size is W=320, H=240.
###Code
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
FPS = 60
camera = CSICamera(width=CAMERA_WIDTH, height=CAMERA_HEIGHT, capture_width=CAMERA_WIDTH,
capture_height=CAMERA_HEIGHT, capture_fps=FPS)
camera.running = True
image = widgets.Image(format='jpeg', width=CAMERA_WIDTH, height=CAMERA_HEIGHT)
camera_link = traitlets.dlink((camera,'value'), (image,'value'), transform=bgr8_to_jpeg)
###Output
_____no_output_____
###Markdown
Define preprocess and postprocess
###Code
def preprocess(image):
observe = PIL.Image.fromarray(image)
observe = observe.resize((160,120))
croped = observe.crop((0, 40, 160, 120))
tensor = transforms.ToTensor()(croped)
return tensor
def rgb8_to_jpeg(image):
return bytes(cv2.imencode('.jpg', image)[1])
###Output
_____no_output_____
###Markdown
Visualize latent space function
###Code
ABS_LATENT_MAX_VALUE = 3
PANEL_HEIGHT = 10
PANEL_WIDTH = 10
def sigmoid(x, gain=1, offset_x=0):
return ((np.tanh(((x+offset_x)*gain)/2)+1)/2)
def color_bar_rgb(x):
gain = 10
offset_x= 0.2
offset_green = 0.6
x = (x * 2) - 1
red = sigmoid(x, gain, -1*offset_x)
blue = 1-sigmoid(x, gain, offset_x)
green = sigmoid(x, gain, offset_green) + (1-sigmoid(x,gain,-1*offset_green))
green = green - 1.0
return [blue * 255,green * 255,red * 255]
def _get_color(value):
t = (value + ABS_LATENT_MAX_VALUE) / (ABS_LATENT_MAX_VALUE * 2.0)
color = color_bar_rgb(t)
return color
def create_color_panel(latent_spaces):
images = []
for z in latent_spaces:
p = np.zeros((PANEL_HEIGHT, PANEL_WIDTH, 3))
color = _get_color(z)
p += color[::-1]
p = np.clip(p, 0, 255)
images.append(p)
panel = np.concatenate(images, axis=1)
return panel
###Output
_____no_output_____
###Markdown
Create GUI
###Code
image = widgets.Image(format='jpeg', width=320, height=240)
resize = widgets.Image(format='jpeg', width=160, height=80)
result = widgets.Image(format='jpeg', width=160, height=80)
camera_link = traitlets.dlink((camera,'value'), (image,'value'), transform=bgr8_to_jpeg)
color_bar = widgets.Image(format='jpeg', width=32*PANEL_WIDTH, height=10*PANEL_HEIGHT)
display(image)
display(widgets.HBox([resize,result]))
display(color_bar)
###Output
_____no_output_____
###Markdown
Start main process
###Code
def vae_process(change):
image = change['new']
image = preprocess(image)
resize.value = rgb8_to_jpeg(np.transpose(np.uint8(image*255),[1,2,0]))
z, _ ,_ = vae.encode(torch.unsqueeze(image,dim=0).to(device))
reconst = vae.decode(z)
reconst = reconst.detach().cpu()[0].numpy()
#Why ? reconstruction image change RGB.
reconst = np.transpose(np.uint8(reconst*255),[1,2,0])[:,:,::-1]
result.value = rgb8_to_jpeg(reconst)
latent_space = z.detach().cpu().numpy()[0]
color_bar.value = rgb8_to_jpeg(create_color_panel(latent_space))
vae_process({'new': camera.value})
camera.observe(vae_process, names='value')
###Output
_____no_output_____
###Markdown
Cleanup process
###Code
camera.unobserve(vae_process, names='value')
camera_link.unlink()
###Output
_____no_output_____
###Markdown
VAE viewer notebook for JetBot===This notebook can visualize vae. This repository using JetBot real camera.
###Code
import sys
import PIL
import numpy as np
import cv2
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display
import torch
from torchvision.transforms import transforms
from learning_racer.vae import VAE
from jetcam.csi_camera import CSICamera
from jetcam.utils import bgr8_to_jpeg
###Output
_____no_output_____
###Markdown
Setting Parameter|Name | Description| Default||:----|:-----------|:-------||IMAGE_CHANNELS | Image channel such as RGB | 3 Not change||VARIANTS_SIZE | Variants size of VAE | 32 ||MODEL_PATH | Trained VAE model file path | ../../vae.torch|
###Code
IMAGE_CHANNELS = 3
VARIANTS_SIZE = 32
MODEL_PATH = '../../../vae.torch'
###Output
_____no_output_____
###Markdown
Load trained VAE model.Loading trained VAE model on GPU memory.
###Code
device = torch.device('cuda')
vae = VAE(image_channels=IMAGE_CHANNELS, z_dim=VARIANTS_SIZE)
vae.load_state_dict(torch.load(MODEL_PATH, map_location=torch.device(device)))
vae.to(device).eval()
###Output
_____no_output_____
###Markdown
Create camera Capture size is W=320, H=240.
###Code
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
FPS = 60
camera = CSICamera(width=CAMERA_WIDTH, height=CAMERA_HEIGHT, capture_width=CAMERA_WIDTH,
capture_height=CAMERA_HEIGHT, capture_fps=FPS)
camera.running = True
image = widgets.Image(format='jpeg', width=CAMERA_WIDTH, height=CAMERA_HEIGHT)
camera_link = traitlets.dlink((camera,'value'), (image,'value'), transform=bgr8_to_jpeg)
###Output
_____no_output_____
###Markdown
Define preprocess and postprocess
###Code
def preprocess(image):
observe = PIL.Image.fromarray(image)
observe = observe.resize((160,120))
croped = observe.crop((0, 40, 160, 120))
tensor = transforms.ToTensor()(croped)
return tensor
def rgb8_to_jpeg(image):
return bytes(cv2.imencode('.jpg', image)[1])
###Output
_____no_output_____
###Markdown
Visualize latent space function
###Code
ABS_LATENT_MAX_VALUE = 10
PANEL_HEIGHT = 10
PANEL_WIDTH = 10
def sigmoid(x, gain=1, offset_x=0):
return ((np.tanh(((x+offset_x)*gain)/2)+1)/2)
def color_bar_rgb(x):
gain = 10
offset_x= 0.2
offset_green = 0.6
x = (x * 2) - 1
red = sigmoid(x, gain, -1*offset_x)
blue = 1-sigmoid(x, gain, offset_x)
green = sigmoid(x, gain, offset_green) + (1-sigmoid(x,gain,-1*offset_green))
green = green - 1.0
return [blue * 255,green * 255,red * 255]
def _get_color(value):
t = (value + ABS_LATENT_MAX_VALUE) / (ABS_LATENT_MAX_VALUE * 2.0)
color = color_bar_rgb(t)
return color
def create_color_panel(latent_spaces):
images = []
for z in latent_spaces:
p = np.zeros((PANEL_HEIGHT, PANEL_WIDTH, 3))
color = _get_color(z)
p += color[::-1]
p = np.clip(p, 0, 255)
images.append(p)
panel = np.concatenate(images, axis=1)
return panel
###Output
_____no_output_____
###Markdown
Create GUI
###Code
image = widgets.Image(format='jpeg', width=320, height=240)
resize = widgets.Image(format='jpeg', width=160, height=80)
result = widgets.Image(format='jpeg', width=160, height=80)
camera_link = traitlets.dlink((camera,'value'), (image,'value'), transform=bgr8_to_jpeg)
color_bar = widgets.Image(format='jpeg', width=32*PANEL_WIDTH, height=10*PANEL_HEIGHT)
display(image)
display(widgets.HBox([resize,result]))
display(color_bar)
###Output
_____no_output_____
###Markdown
Start main process
###Code
def vae_process(change):
image = change['new']
image = preprocess(image)
resize.value = rgb8_to_jpeg(np.transpose(np.uint8(image*255),[1,2,0]))
z, _ ,_ = vae.encode(torch.stack((image,image),dim=0)[:-1].to(device))
reconst = vae.decode(z)
reconst = reconst.detach().cpu()[0].numpy()
reconst = np.transpose(np.uint8(reconst*255),[1,2,0])
result.value = rgb8_to_jpeg(reconst)
latent_space = z.detach().cpu().numpy()[0]
color_bar.value = rgb8_to_jpeg(create_color_panel(latent_space))
vae_process({'new': camera.value})
camera.observe(vae_process, names='value')
###Output
_____no_output_____
###Markdown
Cleanup process
###Code
camera.unobserve(vae_process, names='value')
camera_link.unlink()
###Output
_____no_output_____
###Markdown
VAE viewer notebook for JetBot===This notebook can visualize vae. This repository using JetBot real camera.
###Code
import sys
import PIL
import numpy as np
import cv2
import traitlets
import ipywidgets.widgets as widgets
from IPython.display import display
import torch
from torchvision.transforms import transforms
sys.path.append('../../../vae')
from vae import VAE
from jetcam.csi_camera import CSICamera
from jetcam.utils import bgr8_to_jpeg
###Output
_____no_output_____
###Markdown
Setting Parameter|Name | Description| Default||:----|:-----------|:-------||IMAGE_CHANNELS | Image channel such as RGB | 3 Not change||VARIANTS_SIZE | Variants size of VAE | 32 ||MODEL_PATH | Trained VAE model file path | ../../vae.torch|
###Code
IMAGE_CHANNELS = 3
VARIANTS_SIZE = 32
MODEL_PATH = '../../../vae.torch'
###Output
_____no_output_____
###Markdown
Load trained VAE model.Loading trained VAE model on GPU memory.
###Code
device = torch.device('cuda')
vae = VAE(image_channels=IMAGE_CHANNELS, z_dim=VARIANTS_SIZE)
vae.load_state_dict(torch.load(MODEL_PATH, map_location=torch.device(device)))
vae.to(device).eval()
###Output
_____no_output_____
###Markdown
Create camera Capture size is W=320, H=240.
###Code
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
FPS = 60
camera = CSICamera(width=CAMERA_WIDTH, height=CAMERA_HEIGHT, capture_width=CAMERA_WIDTH,
capture_height=CAMERA_HEIGHT, capture_fps=FPS)
camera.running = True
image = widgets.Image(format='jpeg', width=CAMERA_WIDTH, height=CAMERA_HEIGHT)
camera_link = traitlets.dlink((camera,'value'), (image,'value'), transform=bgr8_to_jpeg)
###Output
_____no_output_____
###Markdown
Define preprocess and postprocess
###Code
def preprocess(image):
observe = PIL.Image.fromarray(image)
observe = observe.resize((160,120))
croped = observe.crop((0, 40, 160, 120))
tensor = transforms.ToTensor()(croped)
return tensor
def rgb8_to_jpeg(image):
return bytes(cv2.imencode('.jpg', image)[1])
###Output
_____no_output_____
###Markdown
Visualize latent space function
###Code
ABS_LATENT_MAX_VALUE = 10
PANEL_HEIGHT = 10
PANEL_WIDTH = 10
def sigmoid(x, gain=1, offset_x=0):
return ((np.tanh(((x+offset_x)*gain)/2)+1)/2)
def color_bar_rgb(x):
gain = 10
offset_x= 0.2
offset_green = 0.6
x = (x * 2) - 1
red = sigmoid(x, gain, -1*offset_x)
blue = 1-sigmoid(x, gain, offset_x)
green = sigmoid(x, gain, offset_green) + (1-sigmoid(x,gain,-1*offset_green))
green = green - 1.0
return [blue * 255,green * 255,red * 255]
def _get_color(value):
t = (value + ABS_LATENT_MAX_VALUE) / (ABS_LATENT_MAX_VALUE * 2.0)
color = color_bar_rgb(t)
return color
def create_color_panel(latent_spaces):
images = []
for z in latent_spaces:
p = np.zeros((PANEL_HEIGHT, PANEL_WIDTH, 3))
color = _get_color(z)
p += color[::-1]
p = np.clip(p, 0, 255)
images.append(p)
panel = np.concatenate(images, axis=1)
return panel
###Output
_____no_output_____
###Markdown
Create GUI
###Code
image = widgets.Image(format='jpeg', width=320, height=240)
resize = widgets.Image(format='jpeg', width=160, height=80)
result = widgets.Image(format='jpeg', width=160, height=80)
camera_link = traitlets.dlink((camera,'value'), (image,'value'), transform=bgr8_to_jpeg)
color_bar = widgets.Image(format='jpeg', width=32*PANEL_WIDTH, height=10*PANEL_HEIGHT)
display(image)
display(widgets.HBox([resize,result]))
display(color_bar)
###Output
_____no_output_____
###Markdown
Start main process
###Code
def vae_process(change):
image = change['new']
image = preprocess(image)
resize.value = rgb8_to_jpeg(np.transpose(np.uint8(image*255),[1,2,0]))
z, _ ,_ = vae.encode(torch.stack((image,image),dim=0)[:-1].to(device))
reconst = vae.decode(z)
reconst = reconst.detach().cpu()[0].numpy()
reconst = np.transpose(np.uint8(reconst*255),[1,2,0])
result.value = rgb8_to_jpeg(reconst)
latent_space = z.detach().cpu().numpy()[0]
color_bar.value = rgb8_to_jpeg(create_color_panel(latent_space))
vae_process({'new': camera.value})
camera.observe(vae_process, names='value')
###Output
_____no_output_____
###Markdown
Cleanup process
###Code
camera.unobserve(vae_process, names='value')
camera_link.unlink()
###Output
_____no_output_____ |
notebooks/todo/DopplerSolveTwoComponentsVeryHard.ipynb | ###Markdown
Doppler Solve: Two Components Setup
###Code
%matplotlib inline
%run notebook_setup.py
import starry
from pathlib import Path
starry_path = Path(starry.__file__).parents[0]
starry.config.lazy = True
starry.config.quiet = True
import numpy as np
import matplotlib.pyplot as plt
import starry
import george
import pymc3 as pm
import pymc3_ext as pmx
import theano.tensor as tt
from tqdm.auto import tqdm
###Output
_____no_output_____
###Markdown
Generate
###Code
# Settings
flux_err = 1e-4
ydeg = 15
nt = 16
inc = 40
veq = 60000
wav = np.linspace(642.85, 643.15, 200)
wav0 = np.linspace(642.75, 643.25, 200)
u = [0.5, 0.25]
# True intensity ratio (spot / photosphere)
ratio = 0.5
# True spectra (photosphere and spot)
spectrum1 = 1.0 - 0.925 * np.exp(-0.5 * (wav0 - 643.0) ** 2 / 0.0085 ** 2)
spectrum2 = (
1.0
- 0.63 * np.exp(-0.5 * (wav0 - 642.97) ** 2 / 0.0085 ** 2)
- 0.6 * np.exp(-0.5 * (wav0 - 643.08) ** 2 / 0.0085 ** 2)
)
# Prior on the spectra
spectral_mean1 = np.ones_like(wav0)
spectral_mean2 = np.ones_like(wav0)
spectral_cov1 = 1e-3 * np.ones_like(wav0)
spectral_cov2 = 1e-3 * np.ones_like(wav0)
# Plot them
fig, ax = plt.subplots(2, figsize=(12, 6), sharex=True, sharey=True)
ax[0].plot(wav0, spectrum1, "k-", label="true")
ax[0].plot(wav0, spectral_mean1, "C0-", label="prior")
ax[0].fill_between(
wav0,
spectral_mean1 - np.sqrt(spectral_cov1),
spectral_mean1 + np.sqrt(spectral_cov1),
color="C0",
alpha=0.3,
)
ax[0].legend()
ax[1].plot(wav0, spectrum2, "k-", label="true")
ax[1].plot(wav0, spectral_mean2, "C1-", label="prior")
ax[1].fill_between(
wav0,
spectral_mean2 - np.sqrt(spectral_cov2),
spectral_mean2 + np.sqrt(spectral_cov2),
color="C1",
alpha=0.3,
)
ax[1].legend()
ax[1].set_xlabel("rest wavelength [nm]")
ax[0].set_ylabel("spectrum 1")
ax[1].set_ylabel("spectrum 2");
# Maps
image1 = np.mean(
np.flipud(plt.imread(starry_path / "img" / "spot.png"))[:, :, :3], axis=2
)
image2 = 1 - image1
# Plot them
fig, ax = plt.subplots(1, 2)
ax[0].imshow(image1, origin="lower", cmap="plasma", vmin=0, vmax=1)
im = ax[1].imshow(image2, origin="lower", cmap="plasma", vmin=0, vmax=1)
for axis in ax:
axis.set_xticks([])
axis.set_yticks([])
ax[0].set_title("map 1")
ax[1].set_title("map 2")
plt.colorbar(im, ax=ax, shrink=0.55);
# Instantiate
map = starry.DopplerMap(
ydeg=ydeg,
udeg=len(u),
nc=2,
veq=veq,
inc=inc,
nt=nt,
wav=wav,
wav0=wav0,
lazy=False,
vsini_max=40000,
)
map.load(
maps=[image1, image2],
spectra=[spectrum1, ratio * spectrum2],
smoothing=0.075,
)
for n in range(len(u)):
map[1 + n] = u[n]
map.show()
# Generate the dataset
flux = map.flux(normalize=True)
flux += flux_err * np.random.randn(*flux.shape)
# Plot it
plt.figure(figsize=(3, 6))
plt.plot(
wav, flux.T + np.linspace(0, 1, map.nt).reshape(1, -1), color="k", lw=1
)
plt.xlabel("wavelength [nm]")
plt.ylabel("intensity");
plt.plot(flux.T);
###Output
_____no_output_____
###Markdown
Solve: Uniform prior
###Code
with pm.Model() as model:
# Instantiate a uniform map
map = starry.DopplerMap(
ydeg=ydeg,
udeg=len(u),
nc=2,
veq=veq,
inc=inc,
nt=nt,
wav=wav,
wav0=wav0,
lazy=True,
vsini_max=40000,
)
for n in range(len(u)):
map[1 + n] = u[n]
# SHT matrix: converts from pixels to Ylms
A = map.sht_matrix(smoothing=0.075)
npix = A.shape[1]
# Prior on the maps
p = pm.Uniform("p", lower=0.0, upper=1.0, shape=(npix,))
amp = pm.Uniform("amp", lower=0.0, upper=1.0)
y1 = amp * tt.dot(A, p)
y2 = amp * tt.dot(A, (1 - p))
map._y = tt.concatenate(
(tt.reshape(y1, (-1, 1)), tt.reshape(y2, (-1, 1))), axis=1
)
# Prior on the intensity ratio
r = pm.Uniform("r", lower=0.0, upper=1.0)
# Prior on the spectra
np.random.seed(0)
"""
spectrum1 = pm.Bound(pm.Normal, upper=1.05)(
"spectrum1",
mu=spectral_mean1,
sigma=np.sqrt(spectral_cov1),
shape=(map.nw0,),
testval=1 - np.sqrt(spectral_cov1) * np.abs(np.random.randn(map.nw0)),
)
spectrum2 = pm.Bound(pm.Normal, upper=1.05)(
"spectrum2",
mu=spectral_mean2,
sigma=np.sqrt(spectral_cov2),
shape=(map.nw0,),
testval=1 - np.sqrt(spectral_cov1) * np.abs(np.random.randn(map.nw0)),
)
"""
spectrum1 = pm.Bound(pm.Laplace, lower=0, upper=1 + 1e-4)(
"spectrum1",
mu=1.0,
b=1e-3,
shape=(map.nw0,),
testval=1 - 3e-2 * np.abs(np.random.randn(map.nw0)),
)
spectrum2 = pm.Bound(pm.Laplace, lower=0, upper=1 + 1e-4)(
"spectrum2",
mu=1.0,
b=1e-3,
shape=(map.nw0,),
testval=1 - 3e-2 * np.abs(np.random.randn(map.nw0)),
)
map.spectrum = tt.concatenate(
(tt.reshape(spectrum1, (1, -1)), r * tt.reshape(spectrum2, (1, -1))),
axis=0,
)
# Compute the model
flux_model = map.flux()
# Likelihood term
pm.Normal(
"obs",
mu=tt.reshape(flux_model, (-1,)),
sd=flux_err,
observed=flux.reshape(
-1,
),
)
niter = 5000
lr = 1e-1
loss = []
best_loss = np.inf
map_soln = model.test_point
with model:
for obj, point in tqdm(
pmx.optim.optimize_iterator(
pmx.optim.Adam(lr=lr), niter, start=map_soln
),
total=niter,
):
loss.append(obj)
if obj < best_loss:
best_loss = obj
map_soln = point
loss = np.array(loss)
logloss = np.log10(loss)
logloss[loss < 0] = -np.log10(-loss[loss < 0])
plt.plot(np.arange(len(loss)), logloss, lw=1);
with model:
map.visualize(backend="matplotlib", point=map_soln);
###Output
_____no_output_____ |
python_basico.ipynb | ###Markdown
Fuente https://www.w3schools.com/python/python_examples.asp
###Code
#docstring
"""
Esto es para hacer
multilinea
"""
#mostrar
print("hello")
###Output
hello
###Markdown
variables
###Code
x = 5
y = "Juan"
print(x)
print(y)
###Output
5
Juan
<class 'int'>
<class 'float'>
<class 'complex'>
###Markdown
tipo de variable
###Code
x1 = 1
y1 = 2.8
z1 = 1j
print(type(x1))
print(type(y1))
print(type(z1))
###Output
<class 'int'>
<class 'float'>
<class 'complex'>
###Markdown
numeracion cientifica
###Code
x = 35e3
y = 12E4
z = -87.7e100
print(x)
print(type(x))
print(y)
print(type(y))
print(z)
print(type(z))
###Output
35000.0
<class 'float'>
120000.0
<class 'float'>
-8.77e+101
<class 'float'>
###Markdown
cating int / convertir tipos a entero
###Code
x = int(1)
y = int(2.8)
z = int("3")
print(x)
print(y)
print(z)
###Output
1
2
3
###Markdown
cast float / convertir a tipo flotante
###Code
x = float(1)
y = float(2.8)
z = float("3")
w = float("2.3")
print(x)
print(y)
print(z)
print(w)
###Output
1.0
2.8
3.0
2.3
###Markdown
cast string & convertir a cadena o palabra
###Code
x = str("1")
y = str(2)
z = str(3.0)
print(x)
print(y)
print(z)
###Output
1
2
3.0
###Markdown
String
###Code
a = "la primera letra"
print(a[1])
b = "devuelve un substring"
print(b[2:5])
c = " elimina los espacios al principio y al final "
print(c.strip())
d = "longitud del string"
print(len(d))
e = "minusculas"
print(e.lower())
d = "mayusculas"
print(d.upper())
f = "reemplaza"
print(f.replace("reempl", "most"))
g = "corta, por un simbolo y devuelve un diccionario"
print(g.split(","))
###Output
a
vue
elimina los espacios al principio y al final
19
minusculas
MAYUSCULAS
mostaza
['corta', ' por un simbolo y devuelve un diccionario']
###Markdown
Operadores
###Code
x = 5
y = 3
print(x + y)
x1 = 5
y1 = 3
print(x1 - y1)
x2 = 5
y2 = 3
print(x2 * y2)
x3 = 12
y3 = 3
print(x3 / y3)
x4 = 5
y4 = 2
print(x4 % y4)
#asignacions
x = 5
print(x)
###Output
8
2
15
4.0
1
5
###Markdown
Python Operadores Operadores aritméticos
###Code
2+2 #suma
3-1 #resta
2*2 #multiplicación
5/2 #división
2**2 #potencia
10%3 #residuo ,módulo
10//3 #división entre enteros
###Output
_____no_output_____
###Markdown
Operadores de comparación
###Code
4<5 #menor que
4>5 #mayor que
4<=5 #menor o igual
4>=5 #mayor o igual
4==5 #igual a
4!=5 #diferente de
n
###Output
_____no_output_____
###Markdown
Operadores lógicos```and or not```and (y): todas deben ser verdaderas para que la afirmación sea verdaderaor (o): al menos una debe ser verdadera para que la afirmación sea verdaderanot (no): niega el valor de verdad
###Code
4<5 and 5>6
4<5 and 5<6
4<5 or 5>6
4>5 or 5>6
not 4<5
###Output
_____no_output_____
###Markdown
Tipos de datosingresos: numero; floatedad: numero; int estrato: numero; inttrabajo: binario; boolnivel_formacion: texto; straños_escolaridad: numero; intregimen_salud: texto; str
###Code
type(4) #integer o entero
type(4.5) #float o decimal
type("carro") #string o cadena de caracteres
type(False) #booleano
###Output
_____no_output_____
###Markdown
Variables* Variables en minúscula* Separación con un guión bajo* No empezar con un número* La variable debe ser elocuente
###Code
x = 4
y = True
z = 4.5
m = 'carro'
x, y, z, m
n = 'azul'
m + n
###Output
_____no_output_____
###Markdown
Estructuras de datos Listas```[]```
###Code
lista_vacia = []
type(lista_vacia)
lista = [1,3,6,7,8,12,4,7,2,4,98]
lista.append(7) #agregar a la última posición de la lista
lista
lista.count(7) #cuenta cuántas veces se repite el elemento en la lista
lista.extend([3,4,5,6,7,8]) #agregar más de un elemento
lista
lista.index(4) #preguntamos por la posición del elemento
lista[4]
lista.insert(0, 10) #insertamos por posición al elemento (posición, elemento)
lista
lista.remove(4) #elimina el elemento
lista
lista.pop() #elimina el último elemento
lista.pop(4) #elimina elemento por posición
lista.sort() #organizar de menor a mayor
lista
lista.sort(reverse=True) #organizar de mayor a menor
lista
lista.clear()
lista
###Output
_____no_output_____
###Markdown
Tupla```()```
###Code
tupla = (3,4,6,7,21,9,9,9,5,3,6)
tupla.count(9) #cuenta cuántas veces se repite el elemento en la tupla
tupla.index(4)
tupla[10]
type(tupla)
###Output
_____no_output_____
###Markdown
Diccionarios```{'key':'values'}```
###Code
dict_1 = {'key1':'values_1','key2':'values_2','key3':'values_3'}
dict_1['key3']
dict_2 = {'Idiomas':['es','en','it','po','fr','de','sw'],
'Países':['España','USA','Italia','Brasil','Francia','Alemania','Kenia'],
'Capitales':['Madrid','Washington','Roma','Brasilia','Paris','Berlín','Nairobi']}
dict_2['Idiomas'].append('ru')
dict_2['Países'].append('Rusia')
dict_2['Capitales'].append('Moscú')
dict_2.items()
dict_2.keys()
dict_2.values()
dict_2.clear()
import pandas as pd
df = pd.DataFrame(dict_2)
df
###Output
_____no_output_____
###Markdown
Funciones Funciones predeterminadas
###Code
print('Hola, mundo') #imprimir el elemento
print(4)
print(x)
input()
nombre = input()
apellido = input()
edad = input()
edad
int(edad) #transforma en un entero
float(edad) #transforma en un float
str(3241243465) #transforma en un string
list(range(0,10,2)) #punto de partida, punto de llegada, stepsize
list(range(10))
list(range(2,10))
lista_valores = list(range(20))
lista_valores
max(lista_valores) #número mayor
min(lista_valores) #número menor
sum(lista_valores) #sumatoria
abs(-3) #valor absoluto
n = 12341.234625865
round(n, 3) #funció para redondear (número a redondear, número de dígitos)
len(lista_valores) #cuántos elementos tiene la lista
###Output
_____no_output_____
###Markdown
Función```def nombre_funcion(argumentos): cuerpo de la función```
###Code
def suma(a,b):
return a+b
suma(3,4)
def descriptivo(a):
media = sum(a)/len(a)
return sum(a), len(a), media
descriptivo(lista_valores)
###Output
_____no_output_____
###Markdown
Condicionales```if elif else```
###Code
a = 2
b = 4
if a>b:
print(f'{a} es mayor que {b}')
elif a<b:
print(f'{a} es menor que {b}')
else:
print(f'{a} es igual a {b}')
def tipo(a):
if type(a)==int:
return 'entero'
elif type(a)==str:
return 'texto'
elif type(a)==float:
return 'decimal'
else:
return 'tipo diferente de dato'
def par_impar(a):
if type(a)==int:
if a%2==0:
print(f'{a} es par')
if a<10:
print(f'{a} es menor a 10.')
elif a>=10 and a<25:
print(f'{a} es mayor o igual a 10.')
else:
print(f'{a} es mayor a 25.')
else:
print(f'{a} es impar')
else:
print(f'{a} no es un entero es un {tipo(a)}. La función solo trabaja con enteros.')
par_impar(26)
5%2
par_impar(8.8)
tipo(7.9)
###Output
_____no_output_____
###Markdown
Bucles For```for i in iterable: cuerpo del bucle```
###Code
lista = list(range(10))
lista
for numero in lista:
print(numero)
lista_materias = ['mate1','mate2','mate3','micro1','micro2','micro3','macro1','macro2','macro3']
lista_vistas = ['mate1','mate2','micro1','micro2','macro1']
for materia in lista_materias:
if materia in lista_vistas:
print(f'Ya vi {materia}')
else:
print(f'No he visto {materia}')
for numero in lista:
par_impar(numero)
semestre_materias = {'primero':['mate1','fundamentos','historia'],
'segundo':['mate2','micro1','historia del pensamiento'],
'tercero':['mate3','micro2','macro1'],
'cuarto':['economate','marcro2','tecnicas']}
for semestre in semestre_materias.keys():
for materia in semestre_materias[semestre]:
print(semestre,materia)
for i in range(3):
for j in range(3):
print(i,j)
semestre_materias['segundo']
semestre_materias.values()
###Output
_____no_output_____
###Markdown
```break continue```
###Code
for numero in range(20):
if numero%2==0:
continue
elif numero==17:
break
else:
print(f'{numero} si va')
lista = [1,2,3,4]
lista_2 = [5,6,7,8]
for i,j in zip(lista,lista_2):
print(i,j)
for i in enumerate(lista_2):
print(i)
###Output
(0, 5)
(1, 6)
(2, 7)
(3, 8)
###Markdown
While```while condición: cuerpo del bucle```
###Code
contador = 0
lista = []
while True:
lista.append(1)
if len(lista)==30:
break
len(lista)
###Output
_____no_output_____
###Markdown
Atrapar errores```try:except:```
###Code
def dividir(a,b):
try:
print(a/b)
except ZeroDivisionError:
print('Error')
print('-------')
print('El segundo elemento no puede ser cero.')
except TypeError:
print('Error')
print('-------')
print('Ambos elementos deben ser números.')
print('División realizada')
dividir(4,2)
dividir(60,2)
dividir(120,60)
dividir(23.45,3.4)
dividir('u',0)
import math as mt
mt.sqrt(64)
import statistics as st
lista = [1,23,4,5,6,7,6,5,4,34,5,6,7,8,34,23,21,45]
st.mean(lista)
st.stdev(lista)
st.variance(lista)
###Output
_____no_output_____ |
ITIDeep1.ipynb | ###Markdown
###Code
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.models import model_from_yaml
batch_size = 128
num_classes = 10
epochs = 2
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save("model.h5")
print("Saved model to disk")
# make a prediction for a new image.
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import load_model
# load and prepare the image
def load_image(filename):
# load the image
img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert to array
img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, 28, 28, 1)
# prepare pixel data
img = img.astype('float32')
img = img / 255.0
return img
# load an image and predict the class
def run_example():
# load the image
img = load_image('/content/sample_image.png')
# load model
model = load_model('/content/model.h5')
# predict the class
result = model.predict_classes(img)
print(result[0])
# entry point, run the example
run_example()
###Output
_____no_output_____
###Markdown
lets now get to fashion mnist
###Code
###Output
_____no_output_____ |
lab02.1_PdM_Model_Development/train_basic_PdM_model.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Building a basic predictive maintenance model Simply put, **predictive maintenance (PdM)** is about pre-emptively finding and fixing flaws in a system (as long as it collects data over time, using sensors for example) in order to reduce downtime. Given a failure in some component or part of the system, we are asking how likely it is that this would result in system failure and downtime soon after. Loading and examining the data
###Code
import os # standard lib for OS operations
import urllib.request # for downloading data
# plotting libs
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(15,8)}) # set figure size
# ML classifiers and the like
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn import svm
# metrics for evaluating results
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
os.makedirs('./data', exist_ok = True)
container = 'https://sethmottstore.blob.core.windows.net/predmaint/'
urllib.request.urlretrieve(container + 'telemetry.csv', filename='../data/telemetry.csv')
urllib.request.urlretrieve(container + 'maintenance.csv', filename='../data/maintenance.csv')
urllib.request.urlretrieve(container + 'machines.csv', filename='../data/machines.csv')
urllib.request.urlretrieve(container + 'failures.csv', filename='../data/failures.csv')
# urllib.request.urlretrieve(container + 'errors.csv', filename='../data/errors.csv')
urllib.request.urlretrieve(container + 'anoms.csv', filename='../data/anoms.csv')
urllib.request.urlretrieve(container + 'telemetry_w_anoms.csv', filename='../data/telemetry_w_anoms.csv')
###Output
_____no_output_____
###Markdown
The relevant data sources for predictive maintenance include, but are not limited to: - **Machine operating conditions:** data of the equipment health over time (usually sensor-based and streaming). We will refer to this data as machine *telemetry data*. - **Error histor:** this data contains logs of *non-breaking* errors that happen thoughout a machine's operation and which parts of the machine they came from - **Failure history:** this data contains logs of severe errors that broke the machine down (requiring maintenance to operate again) and parts of the machine that caused it - **Maintenance/repair history:** what parts were fixed/replaced (as part of scheduled maintenance or due to failure) and when - **Equipment metadata:** anything we know about equipment and parts (such as make, model, etc.) Quiz Pick two of the use cases [mentioned earlier](usecases), and provide examples of the four kinds of data needed to perform PdM for those use cases.
###Code
# write solution here
###Output
_____no_output_____
###Markdown
From now on we will adopt to following consistent terminology to avoid confusion:- A system as a whole will be called a **machine** and its parts are called **components**- A machine can experience **errors** when anomalies happen. Errors do NOT result in shutdown, and they are NOT tied to any particular components, but they can cause one or several component to *eventually* fail.- A machine can experience **failure** when one of its components shuts down. This requires the component to be replaced before the machine can be operational again.- For our purposes, **maintenance** means a component was replaced. This can be either as part of a routine schedule or because the component failed (prior to its scheduled maintenance). Let's now begin loading all the data and looking at the kind of information it contains. We begin with the telemetry data.
###Code
import pandas as pd
df_telemetry = pd.read_csv('../data/telemetry.csv', header=0)
df_telemetry['datetime'] = pd.to_datetime(df_telemetry['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_telemetry.head()
###Output
_____no_output_____
###Markdown
Here's an example of the voltage for one machine over time.
###Code
ax = sns.lineplot(x="datetime", y="volt", data=df_telemetry.loc[df_telemetry['machineID'] == 1, ])
###Output
_____no_output_____
###Markdown
Next we have the error logs, which contains information about **non-breaking** errors that happened over the course of the machine running.
###Code
df_errors = pd.read_csv('../data/errors.csv', header=0)
df_errors['datetime'] = pd.to_datetime(df_errors['datetime'])
df_errors.head()
###Output
_____no_output_____
###Markdown
We used **anomaly detection** to find errors in the above dataset. There are four kinds of errors, one for each of the telemetry variables we collect, namely voltage, rotation, pressure and vibration. There is a lot to be said about the topic of error detection. For examples, the errors we have here are univariate, meaning that we detect anomalies separately for each telemetry variable. We can also try a multi-variate anomaly detection algorithm. In this case, we could use a method like principal component analysis (PCA) to detect anomalies on the most important principal component(s). Lab A simple question we can ask is this: Do some errors happen more often in some machines than others? In other words, what is the distribution of errors across machines?Use `pd.crosstab` to answer the above quesion. Hint: use the `normalize` argument.
###Code
# write solution here
# %cat ../solutions/crosstab.py
###Output
_____no_output_____
###Markdown
With so many machines, it may be easier to answer our question visually. We can pass the output of `pd.crosstab` directly to `sns.heatmap` to generate a heat map. How would you answer the question based on the heatmap below? Please provide examples.
###Code
# write solution here
# %cat ../solutions/heatmap.py
###Output
_____no_output_____
###Markdown
End of lab We can visualize the errors that happen on a given machine to get a sense of how they spread over time.
###Code
df_subset = df_errors.loc[(df_errors.datetime.between('2015-01-01', '2016-01-01')) & (df_errors.machineID == 1)]
df_subset.head()
ax = sns.stripplot(x="datetime", y="errorID", data=df_subset, jitter=0)
del df_subset
###Output
_____no_output_____
###Markdown
Let's now move on to the dataset that logs failures. As we can see, failures are logged by component (although any component failing will result in the machine as a whole failing).
###Code
df_fails = pd.read_csv('../data/failures.csv', header=0)
df_fails['datetime'] = pd.to_datetime(df_fails['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_fails.head()
###Output
_____no_output_____
###Markdown
Now we look at the dataset of maintenance log, which is also by component.
###Code
df_maint = pd.read_csv('../data/maintenance.csv', header=0)
df_maint['datetime'] = pd.to_datetime(df_maint['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_maint.head()
###Output
_____no_output_____
###Markdown
Lab For each component, find the percentage of replacements that are due to component failure (as opposed to scheduled maintenance).
###Code
# write solution here
# %cat ../solutions/percent_replacements.py
###Output
_____no_output_____
###Markdown
End of lab We can obtain the same answer in a more detailed way by doing an **outer join** of the maintenance logs and the failure logs to see how many records matched and where they came from (in `pd.merge` we can use the `indicator=True` argument to get a column called `_merge` that indicates if the keys were present in the left, right, or both datasets.
###Code
df_join = pd.merge(left=df_maint, right=df_fails.rename(columns={'failure':'comp'}), how = 'outer', indicator=True,
on=['datetime', 'machineID', 'comp'], validate='one_to_one')
df_join.head()
###Output
_____no_output_____
###Markdown
- If a record is present in the left dataset only, it represents a working component being replaced due to scheduled maintenance.- If a record is present in the right dataset only, it represents a failed component that was not replaced immediately. This case should be rare since it would result in downtime.- If a record is present in both datasets, it represents a failed component that was immediately replaced (we can also call this **un-**scheduled maintenance). We can run `pd.crosstab` to get counts for each of the above categories, broken up by component.
###Code
ct = pd.crosstab(df_join['comp'], df_join['_merge'], margins=True)
ct.rename(columns={"left_only":"not_failed_but_replaced", "right_only":"failed_not_replaced", "both":"failed_and_replaced"})
###Output
_____no_output_____
###Markdown
We can confirm that the second category is rare. This is usually the case in cases where downtime can result in significant costs. The last dataset we look at is the machine metadata. In this case, we only have information about the model and age of the machine.
###Code
df_machines = pd.read_csv('../data/machines.csv', header=0)
df_machines.head()
###Output
_____no_output_____
###Markdown
We are now ready to move on to the next phase, where we gradually combine our datasets into one dataset that will be used for modeling and contains the features we think will be useful. Feature engineering Our approach to getting the data ready for modeling will consist mainly of two things:- for the telemetry data, we get rolling aggregates (means and standard deviation) - for the error, failure and maintenance logs, we get obtain the number of hours since each of these events happenedWe then combine the result of the above two datasets into one, and add the machine metadata at the end. For the most part the feature engineering steps described above are relatively straight-forward, but in some cases we need to process the data in creative ways to get the results we want.
###Code
df_left = df_telemetry.loc[:, ['datetime', 'machineID']] # we set this aside to this table to join all our results with
# this will make it easier to automatically create features with the right column names
df_errors['errorID'] = df_errors['errorID'].apply(lambda x: int(x[-1]))
df_maint['comp'] = df_maint['comp'].apply(lambda x: int(x[-1]))
df_fails['failure'] = df_fails['failure'].apply(lambda x: int(x[-1]))
###Output
_____no_output_____
###Markdown
Let's begin with a function that will give us rolling mean and standard deviation for the telemetry data.
###Code
import numpy as np
def get_rolling_aggregates(df, colnames, suffixes, window, on, groupby, lagon = None):
"""
calculates rolling averages and standard deviations
Arguments:
df -- dataframe to run it on
colnames -- names of columns we want rolling statistics for
suffixes -- suffixes attached to the new columns (provide a list with strings)
window -- the lag over which rolling statistics are calculated
on -- the interval at which rolling statistics are calculated
groupby -- the column used to group results by
lagon -- the name of the datetime column used to compute lags (if none specified it defaults to row number)
Returns:
a dataframe with rolling statistics over a specified lag calculated over a specified interval
"""
rolling_colnames = [c + suffixes[0] for c in colnames]
df_rolling_mean = df.groupby(groupby).rolling(window=window, on=lagon)[colnames].mean()
df_rolling_mean.columns = rolling_colnames
df_rolling_mean.reset_index(inplace=True)
rolling_colnames = [c + suffixes[1] for c in colnames]
df_rolling_sd = df.groupby(groupby).rolling(window=window, on=lagon)[colnames].var()
df_rolling_sd.columns = rolling_colnames
df_rolling_sd = df_rolling_sd.apply(np.sqrt)
df_rolling_sd.reset_index(inplace=True, drop=True)
df_res = pd.concat([df_rolling_mean, df_rolling_sd], axis=1)
df_res = df_res.loc[df_res.index % on == on-1]
return df_res
###Output
_____no_output_____
###Markdown
We will apply this function twice, once to get rolling aggregates using a sliding window of 3 hours collected every 3 hours, and a second time to get rolling aggregates using a sliding window of 12 hours also collected every 3 hours.
###Code
cols_to_average = df_telemetry.columns[-4:]
df_telemetry_rolling_3h = get_rolling_aggregates(df_telemetry, cols_to_average,
suffixes = ['_ma_3', '_sd_3'],
window = 3, on = 3,
groupby = 'machineID', lagon = 'datetime')
# df_telemetry_rolling_3h.head(20)
df_telemetry_rolling_12h = get_rolling_aggregates(df_telemetry, cols_to_average,
suffixes = ['_ma_12', '_sd_12'],
window = 12, on = 3,
groupby = 'machineID', lagon = 'datetime')
# df_telemetry_rolling_12h.head(20)
###Output
_____no_output_____
###Markdown
We can combine both results into a single table and back-fill any missing values.
###Code
df_telemetry_rolling = pd.concat([df_telemetry_rolling_3h, df_telemetry_rolling_12h.drop(['machineID', 'datetime'], axis=1)],
axis=1, sort = True)
# df_telemetry_rolling.head()
df_telemetry_feat_roll = df_left.merge(df_telemetry_rolling, how="inner", on=['machineID', 'datetime'], validate = "one_to_one")
df_telemetry_feat_roll.fillna(method='bfill', inplace=True)
df_telemetry_feat_roll.head()
del df_telemetry_rolling, df_telemetry_rolling_3h, df_telemetry_rolling_12h
###Output
_____no_output_____
###Markdown
We now write a function that takes care of extracting features showing when events (errors, failures, replacements) occured. The data is then passed to the same 3-hour sliding filter as the telemetry data. Using a rolling max function, we compute if there was an event sometime in the last 3 hours. Finally we compute time elapsed since the last event. We use the following naming convention for the column names in the final dataset. For a given machine at a given date and time:- `e_1` is a flag indicating if error 1 occured, likewise for `e_2` through `e_5`- `de_1` is a numeric feature that represents the hours elapsed since the last time error 1 occured, likewise for `de_2` through `de_5`- `m_1` is a flag indicating if component 1 was replaced, likewise for `m_2` through `m_4`- `dm_1` is a numeric feature that represents the hours elapsed since the last time component 1 was replaced, likewise for `dm_2` through `dm_4`- `f_1` is a flag indicating if component 1 failed, likewise for `f_2` through `f_4`- `df_1` is a numeric feature that represents the hours elapsed since the last time component 1 failed, likewise for `df_2` through `df_4`Finally, we will use `f_1` through `f_4` to create the targets `y_1` through `y_4`:- `y_1` is a flag indicating if component 1 is about to fail, likewise for `y_2` through `y_4`
###Code
def get_datetime_diffs(df_left, df_right, catvar, prefix, window, on, lagon = None, diff_type = 'timedelta64[h]', validate = 'one_to_one', show_example = True):
"""
finds the last time an event (error, failure, maintenance) happened over a sliding window and the time elapsed since
Arguments:
df_left -- dataframe with keys
df_right -- dataframe with events (in this case: errors, failures, or maintenance)
catvar -- the column in df_right which encodes events
prefix -- prefix to add to new column names
window -- the lag over which rolling max is calculated
on -- the interval at which rolling max are calculated
lagon -- the name of the datetime column used to compute lags (if none specified it defaults to row number)
diff_type -- the format to convert time differences to (hours is the default)
validate -- set to 'one_to_one' to ensure the validity of the ensuing merge operation
show_example -- prints an example so we can check results
Returns:
the dataframe with the following columns for each event:
- a dummy column showing which event happened
- a corresponding difference column showing the time elapsed since the event last occured
"""
# create dummy columns and merge them with left data
keys = ['machineID', 'datetime']
df_dummies = pd.get_dummies(df_right[catvar], prefix=prefix)
df_wide = pd.concat([df_right.loc[:, keys], df_dummies], axis=1)
df_wide = df_wide.groupby(keys).sum().reset_index()
df = df_left.merge(df_wide, how="left", on=keys, validate = validate).fillna(0)
# run a rolling window through event flags to aggregate data
dummy_col_names = df_dummies.columns
df = df.groupby('machineID').rolling(window=window, on=lagon)[dummy_col_names].max()
df.reset_index(inplace=True)
df = df.loc[df.index % on == on-1]
df.reset_index(inplace=True, drop=True)
df_first = df.groupby('machineID', as_index=False).nth(0)
# calculate the time of the last event and the time elapsed since
for col in dummy_col_names:
whenlast, diffcol = 'last_' + col, 'd' + col
df.loc[:, col].fillna(value = 0, inplace=True)
# let's assume an event happened in row 0, so we don't have missing values for the time elapsed
df.iloc[df_first.index, df.columns.get_loc(col)] = 1
df.loc[df[col] == 1, whenlast] = df.loc[df[col] == 1, 'datetime']
# for the first occurence we don't know when it last happened, so we assume it happened then
df.iloc[df_first.index, df.columns.get_loc(whenlast)] = df.iloc[df_first.index, df.columns.get_loc('datetime')]
df[whenlast].fillna(method='ffill', inplace=True)
# df.loc[df[whenlast] > df['datetime'], whenlast] = np.nan
df.loc[df[whenlast] <= df['datetime'], diffcol] = (df['datetime'] - df[whenlast]).astype(diff_type)
df.drop(columns = whenlast, inplace=True)
if show_example == True:
col = np.random.choice(dummy_col_names, size = 1)[0]
idx = np.random.choice(df.loc[df[col] == 1, :].index.tolist(), size = 1)[0]
print('Example:\n')
print(df.loc[df.index.isin(range(idx-3, idx+5)), ['datetime', col, 'd' + col]])
return df
df_errors_feat_roll = get_datetime_diffs(df_left, df_errors, catvar='errorID', prefix='e', window = 6, lagon = 'datetime', on = 3)
df_errors_feat_roll.tail()
df_errors_feat_roll.loc[df_errors_feat_roll['machineID'] == 2, :].head()
df_maint_feat_roll = get_datetime_diffs(df_left, df_maint, catvar='comp', prefix='m',
window = 6, lagon = 'datetime', on = 3, show_example=False)
df_maint_feat_roll.tail()
df_maint_feat_roll.loc[df_maint_feat_roll['machineID'] == 2, :].head()
df_fails_feat_roll = get_datetime_diffs(df_left, df_fails, catvar='failure', prefix='f',
window = 6, lagon = 'datetime', on = 3, show_example=False)
df_fails_feat_roll.tail()
###Output
_____no_output_____
###Markdown
Combine features in one datasetWe now combine all four datasets into one dataset called `df_all`. First we check of course that all data frames have the same dimensions.
###Code
assert(df_errors_feat_roll.shape[0] == df_fails_feat_roll.shape[0] == df_maint_feat_roll.shape[0] == df_telemetry_feat_roll.shape[0])
df_all = pd.concat([df_telemetry_feat_roll,
df_errors_feat_roll.drop(columns=['machineID', 'datetime']),
df_maint_feat_roll.drop(columns=['machineID', 'datetime']),
df_fails_feat_roll.drop(columns=['machineID', 'datetime'])], axis = 1, verify_integrity=True)
# df_all = pd.merge(left=df_telemetry_feat_roll, right=df_all, on = ['machineID', 'datetime'], validate='one_to_one')
df_all = pd.merge(left=df_all, right=df_machines, how="left", on='machineID', validate = 'many_to_one')
del df_join, df_left
del df_telemetry_feat_roll, df_errors_feat_roll, df_fails_feat_roll, df_maint_feat_roll
###Output
_____no_output_____
###Markdown
Lab This may be a good place to stop and look at the correlation matrix for all the features we have in the data. We expect some obvious correlations, but let's see if we get any less obvious ones too. We will use `sns.heatmap` to visualize the correlation matrix.
###Code
# write solution here
# %cat ../solutions/correlation_matrix.py
###Output
_____no_output_____
###Markdown
Describe what you see in the correlation matrix. What would relatively high correlations between `m_1` through `m_4` suggest? What about the relatively high correlations between `m_1` through `m_4` and `f_1` through `f_4`? We can export the data for one of the machines to a CSV file. Export the subset of the data corresponding to the machine with ID 51 to CSV, then download the CSV file and open it in Excel to examine its content. Comment on what you see.
###Code
# write solution here
# %cat ../solutions/export_csv.py
###Output
_____no_output_____
###Markdown
End of lab Let's look at all the features we've so far added to the data.
###Code
df_all.info()
###Output
_____no_output_____
###Markdown
The last step in data prep is for us to create labels for the PdM model. You might wonder why we don't just use `f_1` through `f_4` as our labels, since they indicate when a machine failed. In fact we could, but PdM is not about predicting when a machine fails, but predicting when it's **about to fail**. So it's better to create labels indicate the state of the machine shortly prior to failure (how far back we want to go is something we need to determine). Lab This is a difficult coding exercise, so we've done part of the work for you already. So far we know that we each machine has four components, and we have a feature column for each, called `f_1`, `f_2`, `f_3`, and `f_4` which tell us when a component failed. Using these features, we want to create four labels called `y_1`, `y_2`, `y_3`, and `y_4` which tell us when a component is about to fail. To get more precise, initiate with `y_1 = 0` and for a given machine, let `y_1 = 1` whenever the date and time is anywhere between 3 hours and 2 days prior to a failure occuring. Similary compute `y_2`, `y_3`, and `y_4`. Places where you need to enter code are marked as ` YOUR CODE GOES HERE`. HINT: Use the `Timedelta` method for `datetime` column types.
###Code
for i in range(1, 5): # iterate over the four components
# find all the times a component failed for a given machine
df_temp = df_all.loc[df_all['f_' + str(i)] == 1, ['machineID', 'datetime']]
label = 'y_' + str(i) # name of target column (one per component)
## YOUR CODE GOES HERE (initialize y_i = 0)
for n in range(df_temp.shape[0]): # iterate over all the failure times
machineID, datetime = df_temp.iloc[n, :]
## YOUR CODE GOES HERE (set y_i = 1 whenever datetime is between 2 days and 3 hours prior to failure)
# %load ../solutions/compute_labels.py
###Output
_____no_output_____
###Markdown
To run the above script change the magic `%cat` to `%load` which will load the content of the script into the cell. Then select the cell a second time and run it. End of lab
###Code
import itertools
ct = pd.concat([pd.crosstab(df_all['y_' + str(i)], df_all['f_' + str(i)]) for i in range(1, 5)], axis=1)
ct.columns = ['f' + str(i) + '=' + str(j) for i, j in itertools.product(range(1, 5), range(2))]
ct
###Output
_____no_output_____
###Markdown
A word of caution here is in order. We should more carefully examine the distribution of the labels across machines. A brief glance at it for 10 randomly chosen machines shows that the distribution for `y_3` and `y_4` is not evenly distributed and that many machines contain only negative labels (because `f_3` and `f_4` are 0) while the machines with positive labels show a large numbers of failures. Problems like this can cause bias in our models, even when such differences can be legimtimately explained away by differences in the underlying components. As an example of the kind of problem we may run into, consider this: If in the modeling phase we choose to split the data into training and testing by machine ID (assign some machine IDs to training and the rest to testing), we will need to ensure that machines with both positive and negative labels are well represented in both datasets.
###Code
import itertools
ct = pd.concat([pd.crosstab(df_all['machineID'],
df_all['y_' + str(i)]) for i in range(1, 5)], axis=1)
ct.columns = ['y_' + str(i) + '=' + str(j) for i, j in itertools.product(range(1, 5), range(2))]
ct.loc[np.random.randint(1, 100, 10)]
###Output
_____no_output_____
###Markdown
Modeling See [here](https://docs.microsoft.com/en-us/azure/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenancemodeling-techniques-for-predictive-maintenance) for more about modeling approaches for PdM. We constructed a binary label that can be used to predict the probability that the system will fail in the next $T$ time steps (48 hours, based on our specified choice). If explanability is also a goal here, then we should prefer models that can also help us explain the root cause of a failure.We have two ways of splitting the data into training and testing:- we choose a cut-off time $T_c$ such that the training data is all the data before $T_c - w$ and the test data is all the data after $T_c$, where $w$ is a safe margin to make sure that as we avoid leakage into the training data when we label the data- we split training and test set based machine ID so that assets show up in one or the other split For your benefit, here is a list of [solution templates for predictive maintenance](https://docs.microsoft.com/en-us/azure/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenancesolution-templates-for-predictive-maintenance).
###Code
df_all.columns
###Output
_____no_output_____
###Markdown
Let's begin by splitting the data into training and test sets, based on a date cut-off.
###Code
X_drop = ['datetime', 'machineID', 'f_1', 'f_2', 'f_3', 'f_4', 'y_1', 'y_2', 'y_3', 'y_4', 'model']
Y_keep = ['y_1', 'y_2', 'y_3', 'y_4']
X_train = df_all.loc[df_all['datetime'] < '2015-10-01', ].drop(X_drop, axis=1)
y_train = df_all.loc[df_all['datetime'] < '2015-10-01', Y_keep]
X_test = df_all.loc[df_all['datetime'] > '2015-10-15', ].drop(X_drop, axis=1)
y_test = df_all.loc[df_all['datetime'] > '2015-10-15', Y_keep]
%store X_train ../data
%store X_test ../data
%store y_train ../data
%store y_test ../data
###Output
_____no_output_____
###Markdown
Lab Report the number of failures that occur in the training and test data. Do you think the split is adequate or should we split based on a different cut-off? If so, do you recommend a higher or lower cut-off?
###Code
# write solution here
# %cat ../solutions/train_test_failures.py
###Output
_____no_output_____
###Markdown
End of lab We can now train our model. We have chosen a MLP (multi-linear perceptron) as our model, which is a basic neural network model.
###Code
pipeline = Pipeline([('scaler', StandardScaler()), ('classifier', MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 10), random_state=1))])
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
###Output
_____no_output_____
###Markdown
Lab Print the confusion matrix and precision and recall for each of the four failure types. You can use the functions `confusion_matrix` and `classification report` to do the computation for you. The rows in the matrix represent actual cases of non-failure and failure. The columns represent predicted cases. How is precision and recall calculated from the confusion matrix?
###Code
# write solution here
# %load ../solutions/confusion_matrix.py
###Output
_____no_output_____
###Markdown
End of lab Finally, let's create ROC plots for each type of failure.
###Code
sns.set(rc={'figure.figsize':(18,5)})
from sklearn.metrics import auc, roc_curve
plt.close('all')
fig, axs = plt.subplots(ncols=4, sharex=True, sharey=True)
for y_idx in range(4): # choose one of the outcomes
fpr, tpr, thresholds = roc_curve(y_test.values[:, y_idx], y_pred[:, y_idx])
roc_auc = auc(fpr, tpr)
axs[y_idx].set_title('ROC of y_' + str(y_idx))
axs[y_idx].set_ylabel('TPR')
axs[y_idx].set_xlabel('FPR')
axs[y_idx].plot(fpr, tpr, 'b', label = 'AUC = {0:.2f}'.format(roc_auc))
axs[y_idx].legend(loc = 'lower right')
axs[y_idx].plot([0, 1], [0, 1],'r--')
plt.show()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Building a basic predictive maintenance model Simply put, **predictive maintenance (PdM)** is about pre-emptively finding and fixing flaws in a system (as long as it collects data over time, using sensors for example) in order to reduce downtime. Given a failure in some component or part of the system, we are asking how likely it is that this would result in system failure and downtime soon after. Loading and examining the data
###Code
import os # standard lib for OS operations
import urllib.request # for downloading data
# plotting libs
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(15,8)}) # set figure size
# ML classifiers and the like
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn import svm
# metrics for evaluating results
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
os.makedirs('../data', exist_ok = True)
container = 'https://sethmottstore.blob.core.windows.net/predmaint/'
urllib.request.urlretrieve(container + 'telemetry.csv', filename='../data/telemetry.csv')
urllib.request.urlretrieve(container + 'maintenance.csv', filename='../data/maintenance.csv')
urllib.request.urlretrieve(container + 'machines.csv', filename='../data/machines.csv')
urllib.request.urlretrieve(container + 'failures.csv', filename='../data/failures.csv')
urllib.request.urlretrieve(container + 'errors.csv', filename='../data/errors.csv')
urllib.request.urlretrieve(container + 'anoms.csv', filename='../data/anoms.csv')
#urllib.request.urlretrieve(container + 'telemetry_w_anoms.csv', filename='../data/telemetry_w_anoms.csv')
###Output
_____no_output_____
###Markdown
The relevant data sources for predictive maintenance include, but are not limited to: - **Machine operating conditions:** data of the equipment health over time (usually sensor-based and streaming). We will refer to this data as machine *telemetry data*. - **Error histor:** this data contains logs of *non-breaking* errors that happen thoughout a machine's operation and which parts of the machine they came from - **Failure history:** this data contains logs of severe errors that broke the machine down (requiring maintenance to operate again) and parts of the machine that caused it - **Maintenance/repair history:** what parts were fixed/replaced (as part of scheduled maintenance or due to failure) and when - **Equipment metadata:** anything we know about equipment and parts (such as make, model, etc.) Quiz Pick two of the use cases [mentioned earlier](usecases), and provide examples of the four kinds of data needed to perform PdM for those use cases.
###Code
# write solution here
###Output
_____no_output_____
###Markdown
From now on we will adopt to following consistent terminology to avoid confusion:- A system as a whole will be called a **machine** and its parts are called **components**- A machine can experience **errors** when anomalies happen. Errors do NOT result in shutdown, and they are NOT tied to any particular components, but they can cause one or several component to *eventually* fail.- A machine can experience **failure** when one of its components shuts down. This requires the component to be replaced before the machine can be operational again.- For our purposes, **maintenance** means a component was replaced. This can be either as part of a routine schedule or because the component failed (prior to its scheduled maintenance). Let's now begin loading all the data and looking at the kind of information it contains. We begin with the telemetry data.
###Code
import pandas as pd
df_telemetry = pd.read_csv('../data/telemetry.csv', header=0)
df_telemetry['datetime'] = pd.to_datetime(df_telemetry['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_telemetry.head()
###Output
_____no_output_____
###Markdown
Here's an example of the voltage for one machine over time.
###Code
ax = sns.lineplot(x="datetime", y="volt", data=df_telemetry.loc[df_telemetry['machineID'] == 1, ])
###Output
_____no_output_____
###Markdown
Next we have the error logs, which contains information about **non-breaking** errors that happened over the course of the machine running.
###Code
df_errors = pd.read_csv('../data/anoms.csv', header=0)
df_errors['datetime'] = pd.to_datetime(df_errors['datetime'])
df_errors.head()
###Output
_____no_output_____
###Markdown
We used **anomaly detection** to find errors in the above dataset. There are four kinds of errors, one for each of the telemetry variables we collect, namely voltage, rotation, pressure and vibration. There is a lot to be said about the topic of error detection. For examples, the errors we have here are univariate, meaning that we detect anomalies separately for each telemetry variable. We can also try a multi-variate anomaly detection algorithm. In this case, we could use a method like principal component analysis (PCA) to detect anomalies on the most important principal component(s). Lab A simple question we can ask is this: Do some errors happen more often in some machines than others? In other words, what is the distribution of errors across machines?Use `pd.crosstab` to answer the above quesion. Hint: use the `normalize` argument.
###Code
rep_dir = {"volt":"error1", "rotate":"error2","pressure":"error3","vibration":"error4"}
df_errors = df_errors.replace({"errorID": rep_dir})
ct = pd.crosstab(df_errors['machineID'], df_errors['errorID'], rownames=['device'], colnames=['error'], normalize='columns')
%cat ../solutions/crosstab.py
###Output
ct = pd.crosstab(df_errors['machineID'], df_errors['errorID'], rownames=['device'], colnames=['error'], normalize='columns')
###Markdown
With so many machines, it may be easier to answer our question visually. We can pass the output of `pd.crosstab` directly to `sns.heatmap` to generate a heat map. How would you answer the question based on the heatmap below? Please provide examples.
###Code
ax = sns.heatmap(ct, xticklabels=2, yticklabels=False)
%cat ../solutions/heatmap.py
###Output
ax = sns.heatmap(ct, xticklabels=2, yticklabels=False)
###Markdown
End of lab We can visualize the errors that happen on a given machine to get a sense of how they spread over time.
###Code
df_subset = df_errors.loc[(df_errors.datetime.between('2015-01-01', '2016-01-01')) & (df_errors.machineID == 1)]
df_subset.head()
ax = sns.stripplot(x="datetime", y="errorID", data=df_subset, jitter=0)
del df_subset
###Output
_____no_output_____
###Markdown
Let's now move on to the dataset that logs failures. As we can see, failures are logged by component (although any component failing will result in the machine as a whole failing).
###Code
df_fails = pd.read_csv('../data/failures.csv', header=0)
df_fails['datetime'] = pd.to_datetime(df_fails['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_fails.head()
###Output
_____no_output_____
###Markdown
Now we look at the dataset of maintenance log, which is also by component.
###Code
df_maint = pd.read_csv('../data/maintenance.csv', header=0)
df_maint['datetime'] = pd.to_datetime(df_maint['datetime'], format="%m/%d/%Y %I:%M:%S %p")
df_maint.head()
###Output
_____no_output_____
###Markdown
Lab For each component, find the percentage of replacements that are due to component failure (as opposed to scheduled maintenance).
###Code
df_counts = pd.DataFrame({'replacements' : df_maint.groupby(['comp']).count()['machineID'],
'failures' : df_fails.groupby(['failure']).count()['machineID']})
df_counts['percent_due_to_failure'] = df_counts['failures'] / df_counts['replacements']
df_counts
%cat ../solutions/percent_replacements.py
###Output
df_counts = pd.DataFrame({'replacements' : df_maint.groupby(['comp']).count()['machineID'],
'failures' : df_fails.groupby(['failure']).count()['machineID']})
df_counts['percent_due_to_failure'] = df_counts['failures'] / df_counts['replacements']
df_counts
###Markdown
End of lab We can obtain the same answer in a more detailed way by doing an **outer join** of the maintenance logs and the failure logs to see how many records matched and where they came from (in `pd.merge` we can use the `indicator=True` argument to get a column called `_merge` that indicates if the keys were present in the left, right, or both datasets.
###Code
df_join = pd.merge(left=df_maint, right=df_fails.rename(columns={'failure':'comp'}), how = 'outer', indicator=True,
on=['datetime', 'machineID', 'comp'], validate='one_to_one')
df_join.head()
###Output
_____no_output_____
###Markdown
- If a record is present in the left dataset only, it represents a working component being replaced due to scheduled maintenance.- If a record is present in the right dataset only, it represents a failed component that was not replaced immediately. This case should be rare since it would result in downtime.- If a record is present in both datasets, it represents a failed component that was immediately replaced (we can also call this **un-**scheduled maintenance). We can run `pd.crosstab` to get counts for each of the above categories, broken up by component.
###Code
ct = pd.crosstab(df_join['comp'], df_join['_merge'], margins=True)
ct.rename(columns={"left_only":"not_failed_but_replaced", "right_only":"failed_not_replaced", "both":"failed_and_replaced"})
###Output
_____no_output_____
###Markdown
We can confirm that the second category is rare. This is usually the case in cases where downtime can result in significant costs. The last dataset we look at is the machine metadata. In this case, we only have information about the model and age of the machine.
###Code
df_machines = pd.read_csv('../data/machines.csv', header=0)
df_machines.head()
###Output
_____no_output_____
###Markdown
We are now ready to move on to the next phase, where we gradually combine our datasets into one dataset that will be used for modeling and contains the features we think will be useful. Feature engineering Our approach to getting the data ready for modeling will consist mainly of two things:- for the telemetry data, we get rolling aggregates (means and standard deviation) - for the error, failure and maintenance logs, we get obtain the number of hours since each of these events happenedWe then combine the result of the above two datasets into one, and add the machine metadata at the end. For the most part the feature engineering steps described above are relatively straight-forward, but in some cases we need to process the data in creative ways to get the results we want.
###Code
df_left = df_telemetry.loc[:, ['datetime', 'machineID']] # we set this aside to this table to join all our results with
# this will make it easier to automatically create features with the right column names
df_errors['error'] = df_errors['errorID'].apply(lambda x: int(x[-1]))
df_maint['comp'] = df_maint['comp'].apply(lambda x: int(x[-1]))
df_fails['failure'] = df_fails['failure'].apply(lambda x: int(x[-1]))
###Output
_____no_output_____
###Markdown
Let's begin with a function that will give us rolling mean and standard deviation for the telemetry data.
###Code
import numpy as np
def get_rolling_aggregates(df, colnames, suffixes, window, on, groupby, lagon = None):
"""
calculates rolling averages and standard deviations
Arguments:
df -- dataframe to run it on
colnames -- names of columns we want rolling statistics for
suffixes -- suffixes attached to the new columns (provide a list with strings)
window -- the lag over which rolling statistics are calculated
on -- the interval at which rolling statistics are calculated
groupby -- the column used to group results by
lagon -- the name of the datetime column used to compute lags (if none specified it defaults to row number)
Returns:
a dataframe with rolling statistics over a specified lag calculated over a specified interval
"""
rolling_colnames = [c + suffixes[0] for c in colnames]
df_rolling_mean = df.groupby(groupby).rolling(window=window, on=lagon)[colnames].mean()
df_rolling_mean.columns = rolling_colnames
df_rolling_mean.reset_index(inplace=True)
rolling_colnames = [c + suffixes[1] for c in colnames]
df_rolling_sd = df.groupby(groupby).rolling(window=window, on=lagon)[colnames].var()
df_rolling_sd.columns = rolling_colnames
df_rolling_sd = df_rolling_sd.apply(np.sqrt)
df_rolling_sd.reset_index(inplace=True, drop=True)
df_res = pd.concat([df_rolling_mean, df_rolling_sd], axis=1)
df_res = df_res.loc[df_res.index % on == on-1]
return df_res
###Output
_____no_output_____
###Markdown
We will apply this function twice, once to get rolling aggregates using a sliding window of 3 hours collected every 3 hours, and a second time to get rolling aggregates using a sliding window of 12 hours also collected every 3 hours.
###Code
cols_to_average = df_telemetry.columns[-4:]
df_telemetry_rolling_3h = get_rolling_aggregates(df_telemetry, cols_to_average,
suffixes = ['_ma_3', '_sd_3'],
window = 3, on = 3,
groupby = 'machineID', lagon = 'datetime')
# df_telemetry_rolling_3h.head(20)
df_telemetry_rolling_12h = get_rolling_aggregates(df_telemetry, cols_to_average,
suffixes = ['_ma_12', '_sd_12'],
window = 12, on = 3,
groupby = 'machineID', lagon = 'datetime')
# df_telemetry_rolling_12h.head(20)
###Output
_____no_output_____
###Markdown
We can combine both results into a single table and back-fill any missing values.
###Code
df_telemetry_rolling = pd.concat([df_telemetry_rolling_3h, df_telemetry_rolling_12h.drop(['machineID', 'datetime'], axis=1)],
axis=1, sort = True)
# df_telemetry_rolling.head()
df_telemetry_feat_roll = df_left.merge(df_telemetry_rolling, how="inner", on=['machineID', 'datetime'], validate = "one_to_one")
df_telemetry_feat_roll.fillna(method='bfill', inplace=True)
df_telemetry_feat_roll.head()
del df_telemetry_rolling, df_telemetry_rolling_3h, df_telemetry_rolling_12h
###Output
_____no_output_____
###Markdown
We now write a function that takes care of extracting features showing when events (errors, failures, replacements) occured. The data is then passed to the same 3-hour sliding filter as the telemetry data. Using a rolling max function, we compute if there was an event sometime in the last 3 hours. Finally we compute time elapsed since the last event. We use the following naming convention for the column names in the final dataset. For a given machine at a given date and time:- `e_1` is a flag indicating if error 1 occured, likewise for `e_2` through `e_5`- `de_1` is a numeric feature that represents the hours elapsed since the last time error 1 occured, likewise for `de_2` through `de_5`- `m_1` is a flag indicating if component 1 was replaced, likewise for `m_2` through `m_4`- `dm_1` is a numeric feature that represents the hours elapsed since the last time component 1 was replaced, likewise for `dm_2` through `dm_4`- `f_1` is a flag indicating if component 1 failed, likewise for `f_2` through `f_4`- `df_1` is a numeric feature that represents the hours elapsed since the last time component 1 failed, likewise for `df_2` through `df_4`Finally, we will use `f_1` through `f_4` to create the targets `y_1` through `y_4`:- `y_1` is a flag indicating if component 1 is about to fail, likewise for `y_2` through `y_4`
###Code
def get_datetime_diffs(df_left, df_right, catvar, prefix, window, on, lagon = None, diff_type = 'timedelta64[h]', validate = 'one_to_one', show_example = True):
"""
finds the last time an event (error, failure, maintenance) happened over a sliding window and the time elapsed since
Arguments:
df_left -- dataframe with keys
df_right -- dataframe with events (in this case: errors, failures, or maintenance)
catvar -- the column in df_right which encodes events
prefix -- prefix to add to new column names
window -- the lag over which rolling max is calculated
on -- the interval at which rolling max are calculated
lagon -- the name of the datetime column used to compute lags (if none specified it defaults to row number)
diff_type -- the format to convert time differences to (hours is the default)
validate -- set to 'one_to_one' to ensure the validity of the ensuing merge operation
show_example -- prints an example so we can check results
Returns:
the dataframe with the following columns for each event:
- a dummy column showing which event happened
- a corresponding difference column showing the time elapsed since the event last occured
"""
# create dummy columns and merge them with left data
keys = ['machineID', 'datetime']
df_dummies = pd.get_dummies(df_right[catvar], prefix=prefix)
df_wide = pd.concat([df_right.loc[:, keys], df_dummies], axis=1)
df_wide = df_wide.groupby(keys).sum().reset_index()
df = df_left.merge(df_wide, how="left", on=keys, validate = validate).fillna(0)
# run a rolling window through event flags to aggregate data
dummy_col_names = df_dummies.columns
df = df.groupby('machineID').rolling(window=window, on=lagon)[dummy_col_names].max()
df.reset_index(inplace=True)
df = df.loc[df.index % on == on-1]
df.reset_index(inplace=True, drop=True)
df_first = df.groupby('machineID', as_index=False).nth(0)
# calculate the time of the last event and the time elapsed since
for col in dummy_col_names:
whenlast, diffcol = 'last_' + col, 'd' + col
df.loc[:, col].fillna(value = 0, inplace=True)
# let's assume an event happened in row 0, so we don't have missing values for the time elapsed
df.iloc[df_first.index, df.columns.get_loc(col)] = 1
df.loc[df[col] == 1, whenlast] = df.loc[df[col] == 1, 'datetime']
# for the first occurence we don't know when it last happened, so we assume it happened then
df.iloc[df_first.index, df.columns.get_loc(whenlast)] = df.iloc[df_first.index, df.columns.get_loc('datetime')]
df[whenlast].fillna(method='ffill', inplace=True)
# df.loc[df[whenlast] > df['datetime'], whenlast] = np.nan
df.loc[df[whenlast] <= df['datetime'], diffcol] = (df['datetime'] - df[whenlast]).astype(diff_type)
df.drop(columns = whenlast, inplace=True)
if show_example == True:
col = np.random.choice(dummy_col_names, size = 1)[0]
idx = np.random.choice(df.loc[df[col] == 1, :].index.tolist(), size = 1)[0]
print('Example:\n')
print(df.loc[df.index.isin(range(idx-3, idx+5)), ['datetime', col, 'd' + col]])
return df
df_errors_feat_roll = get_datetime_diffs(df_left, df_errors, catvar='errorID', prefix='e', window = 6, lagon = 'datetime', on = 3)
df_errors_feat_roll.tail()
df_errors_feat_roll.loc[df_errors_feat_roll['machineID'] == 2, :].head()
df_maint_feat_roll = get_datetime_diffs(df_left, df_maint, catvar='comp', prefix='m',
window = 6, lagon = 'datetime', on = 3, show_example=False)
df_maint_feat_roll.tail()
df_maint_feat_roll.loc[df_maint_feat_roll['machineID'] == 2, :].head()
df_fails_feat_roll = get_datetime_diffs(df_left, df_fails, catvar='failure', prefix='f',
window = 6, lagon = 'datetime', on = 3, show_example=False)
df_fails_feat_roll.tail()
###Output
_____no_output_____
###Markdown
Combine features in one datasetWe now combine all four datasets into one dataset called `df_all`. First we check of course that all data frames have the same dimensions.
###Code
assert(df_errors_feat_roll.shape[0] == df_fails_feat_roll.shape[0] == df_maint_feat_roll.shape[0] == df_telemetry_feat_roll.shape[0])
df_all = pd.concat([df_telemetry_feat_roll,
df_errors_feat_roll.drop(columns=['machineID', 'datetime']),
df_maint_feat_roll.drop(columns=['machineID', 'datetime']),
df_fails_feat_roll.drop(columns=['machineID', 'datetime'])], axis = 1, verify_integrity=True)
# df_all = pd.merge(left=df_telemetry_feat_roll, right=df_all, on = ['machineID', 'datetime'], validate='one_to_one')
df_all = pd.merge(left=df_all, right=df_machines, how="left", on='machineID', validate = 'many_to_one')
del df_join, df_left
del df_telemetry_feat_roll, df_errors_feat_roll, df_fails_feat_roll, df_maint_feat_roll
###Output
_____no_output_____
###Markdown
Lab This may be a good place to stop and look at the correlation matrix for all the features we have in the data. We expect some obvious correlations, but let's see if we get any less obvious ones too. We will use `sns.heatmap` to visualize the correlation matrix.
###Code
import seaborn as sns
corr = df_all.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
%cat ../solutions/correlation_matrix.py
###Output
import seaborn as sns
corr = df_all.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
###Markdown
Describe what you see in the correlation matrix. What would relatively high correlations between `m_1` through `m_4` suggest? What about the relatively high correlations between `m_1` through `m_4` and `f_1` through `f_4`? We can export the data for one of the machines to a CSV file. Export the subset of the data corresponding to the machine with ID 51 to CSV, then download the CSV file and open it in Excel to examine its content. Comment on what you see.
###Code
df_all.loc[(df_all['machineID'] == 51), :].sort_values(['datetime', 'machineID']).to_csv('bla.csv')
%cat ../solutions/export_csv.py
###Output
df_all.loc[(df_all['machineID'] == 51), :].sort_values(['datetime', 'machineID']).to_csv('bla.csv')
###Markdown
End of lab Let's look at all the features we've so far added to the data.
###Code
df_all.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 292033 entries, 0 to 292032
Data columns (total 44 columns):
datetime 292033 non-null datetime64[ns]
machineID 292033 non-null int64
volt_ma_3 292033 non-null float64
rotate_ma_3 292033 non-null float64
pressure_ma_3 292033 non-null float64
vibration_ma_3 292033 non-null float64
volt_sd_3 292033 non-null float64
rotate_sd_3 292033 non-null float64
pressure_sd_3 292033 non-null float64
vibration_sd_3 292033 non-null float64
volt_ma_12 292033 non-null float64
rotate_ma_12 292033 non-null float64
pressure_ma_12 292033 non-null float64
vibration_ma_12 292033 non-null float64
volt_sd_12 292033 non-null float64
rotate_sd_12 292033 non-null float64
pressure_sd_12 292033 non-null float64
vibration_sd_12 292033 non-null float64
e_error1 292033 non-null float64
e_error2 292033 non-null float64
e_error3 292033 non-null float64
e_error4 292033 non-null float64
de_error1 292033 non-null float64
de_error2 292033 non-null float64
de_error3 292033 non-null float64
de_error4 292033 non-null float64
m_1 292033 non-null float64
m_2 292033 non-null float64
m_3 292033 non-null float64
m_4 292033 non-null float64
dm_1 292033 non-null float64
dm_2 292033 non-null float64
dm_3 292033 non-null float64
dm_4 292033 non-null float64
f_1 292033 non-null float64
f_2 292033 non-null float64
f_3 292033 non-null float64
f_4 292033 non-null float64
df_1 292033 non-null float64
df_2 292033 non-null float64
df_3 292033 non-null float64
df_4 292033 non-null float64
model 292033 non-null object
age 292033 non-null int64
dtypes: datetime64[ns](1), float64(40), int64(2), object(1)
memory usage: 100.3+ MB
###Markdown
The last step in data prep is for us to create labels for the PdM model. You might wonder why we don't just use `f_1` through `f_4` as our labels, since they indicate when a machine failed. In fact we could, but PdM is not about predicting when a machine fails, but predicting when it's **about to fail**. So it's better to create labels indicate the state of the machine shortly prior to failure (how far back we want to go is something we need to determine). Lab This is a difficult coding exercise, so we've done part of the work for you already. So far we know that we each machine has four components, and we have a feature column for each, called `f_1`, `f_2`, `f_3`, and `f_4` which tell us when a component failed. Using these features, we want to create four labels called `y_1`, `y_2`, `y_3`, and `y_4` which tell us when a component is about to fail. To get more precise, initiate with `y_1 = 0` and for a given machine, let `y_1 = 1` whenever the date and time is anywhere between 3 hours and 2 days prior to a failure occuring. Similary compute `y_2`, `y_3`, and `y_4`. Places where you need to enter code are marked as ` YOUR CODE GOES HERE`. HINT: Use the `Timedelta` method for `datetime` column types.
###Code
for i in range(1, 5): # iterate over the four components
# find all the times a component failed for a given machine
df_temp = df_all.loc[df_all['f_' + str(i)] == 1, ['machineID', 'datetime']]
label = 'y_' + str(i) # name of target column (one per component)
## YOUR CODE GOES HERE (initialize y_i = 0)
for n in range(df_temp.shape[0]): # iterate over all the failure times
machineID, datetime = df_temp.iloc[n, :]
## YOUR CODE GOES HERE (set y_i = 1 whenever datetime is between 2 days and 3 hours prior to failure)
# %load ../solutions/compute_labels.py
for i in range(1, 5): # iterate over the four components
# find all the times a component failed for a given machine
df_temp = df_all.loc[df_all['f_' + str(i)] == 1, ['machineID', 'datetime']]
label = 'y_' + str(i) # name of target column (one per component)
df_all[label] = 0
for n in range(df_temp.shape[0]): # iterate over all the failure times
machineID, datetime = df_temp.iloc[n, :]
dt_end = datetime - pd.Timedelta('3 hours') # 3 hours prior to failure
dt_start = datetime - pd.Timedelta('2 days') # n days prior to failure
if n % 500 == 0:
print("a failure occured on machine {0} at {1}, so {2} is set to 1 between {4} and {3}".format(machineID, datetime, label, dt_end, dt_start))
df_all.loc[(df_all['machineID'] == machineID) &
(df_all['datetime'].between(dt_start, dt_end)), label] = 1
###Output
a failure occured on machine 1 at 2015-01-01 08:00:00, so y_1 is set to 1 between 2014-12-30 08:00:00 and 2015-01-01 05:00:00
a failure occured on machine 1 at 2015-01-01 08:00:00, so y_2 is set to 1 between 2014-12-30 08:00:00 and 2015-01-01 05:00:00
a failure occured on machine 82 at 2015-12-09 08:00:00, so y_2 is set to 1 between 2015-12-07 08:00:00 and 2015-12-09 05:00:00
a failure occured on machine 1 at 2015-01-01 08:00:00, so y_3 is set to 1 between 2014-12-30 08:00:00 and 2015-01-01 05:00:00
a failure occured on machine 1 at 2015-01-01 08:00:00, so y_4 is set to 1 between 2014-12-30 08:00:00 and 2015-01-01 05:00:00
###Markdown
To run the above script change the magic `%cat` to `%load` which will load the content of the script into the cell. Then select the cell a second time and run it. End of lab
###Code
import itertools
ct = pd.concat([pd.crosstab(df_all['y_' + str(i)], df_all['f_' + str(i)]) for i in range(1, 5)], axis=1)
ct.columns = ['f' + str(i) + '=' + str(j) for i, j in itertools.product(range(1, 5), range(2))]
ct
###Output
_____no_output_____
###Markdown
A word of caution here is in order. We should more carefully examine the distribution of the labels across machines. A brief glance at it for 10 randomly chosen machines shows that the distribution for `y_3` and `y_4` is not evenly distributed and that many machines contain only negative labels (because `f_3` and `f_4` are 0) while the machines with positive labels show a large numbers of failures. Problems like this can cause bias in our models, even when such differences can be legimtimately explained away by differences in the underlying components. As an example of the kind of problem we may run into, consider this: If in the modeling phase we choose to split the data into training and testing by machine ID (assign some machine IDs to training and the rest to testing), we will need to ensure that machines with both positive and negative labels are well represented in both datasets.
###Code
import itertools
ct = pd.concat([pd.crosstab(df_all['machineID'],
df_all['y_' + str(i)]) for i in range(1, 5)], axis=1)
ct.columns = ['y_' + str(i) + '=' + str(j) for i, j in itertools.product(range(1, 5), range(2))]
ct.loc[np.random.randint(1, 100, 10)]
###Output
_____no_output_____
###Markdown
Modeling See [here](https://docs.microsoft.com/en-us/azure/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenancemodeling-techniques-for-predictive-maintenance) for more about modeling approaches for PdM. We constructed a binary label that can be used to predict the probability that the system will fail in the next $T$ time steps (48 hours, based on our specified choice). If explanability is also a goal here, then we should prefer models that can also help us explain the root cause of a failure.We have two ways of splitting the data into training and testing:- we choose a cut-off time $T_c$ such that the training data is all the data before $T_c - w$ and the test data is all the data after $T_c$, where $w$ is a safe margin to make sure that as we avoid leakage into the training data when we label the data- we split training and test set based machine ID so that assets show up in one or the other split For your benefit, here is a list of [solution templates for predictive maintenance](https://docs.microsoft.com/en-us/azure/machine-learning/team-data-science-process/cortana-analytics-playbook-predictive-maintenancesolution-templates-for-predictive-maintenance).
###Code
df_all.columns
###Output
_____no_output_____
###Markdown
Let's begin by splitting the data into training and test sets, based on a date cut-off.
###Code
X_drop = ['datetime', 'machineID', 'f_1', 'f_2', 'f_3', 'f_4', 'y_1', 'y_2', 'y_3', 'y_4', 'model']
Y_keep = ['y_1', 'y_2', 'y_3', 'y_4']
X_train = df_all.loc[df_all['datetime'] < '2015-10-01', ].drop(X_drop, axis=1)
y_train = df_all.loc[df_all['datetime'] < '2015-10-01', Y_keep]
X_test = df_all.loc[df_all['datetime'] > '2015-10-15', ].drop(X_drop, axis=1)
y_test = df_all.loc[df_all['datetime'] > '2015-10-15', Y_keep]
%store X_train ../data
%store X_test ../data
%store y_train ../data
%store y_test ../data
###Output
Stored 'X_train' (DataFrame)
Stored 'X_test' (DataFrame)
Stored 'y_train' (DataFrame)
Stored 'y_test' (DataFrame)
###Markdown
Lab Report the number of failures that occur in the training and test data. Do you think the split is adequate or should we split based on a different cut-off? If so, do you recommend a higher or lower cut-off?
###Code
print(pd.DataFrame({"train": y_train.apply(sum, axis = 0), "test": y_test.apply(sum, axis = 0)}))
%cat ../solutions/train_test_failures.py
###Output
print(pd.DataFrame({"train": y_train.apply(sum, axis = 0), "test": y_test.apply(sum, axis = 0)}))
###Markdown
End of lab We can now train our model. We have chosen a MLP (multi-linear perceptron) as our model, which is a basic neural network model.
###Code
pipeline = Pipeline([('scaler', StandardScaler()), ('classifier', MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 10), random_state=1))])
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
###Output
_____no_output_____
###Markdown
Lab Print the confusion matrix and precision and recall for each of the four failure types. You can use the functions `confusion_matrix` and `classification report` to do the computation for you. The rows in the matrix represent actual cases of non-failure and failure. The columns represent predicted cases. How is precision and recall calculated from the confusion matrix?
###Code
# write solution here
# %load ../solutions/confusion_matrix.py
print("confusion matrix:")
for y_idx in range(4):
print("---------------- for y_" + str(y_idx+1))
print(confusion_matrix(y_test.values[:, y_idx], y_pred[:, y_idx]))
print("\nclassification report:")
print(classification_report(y_test, y_pred))
print("AUC = {}".format(roc_auc_score(y_test, y_pred, average='weighted')))
###Output
confusion matrix:
---------------- for y_1
[[61907 115]
[ 431 147]]
---------------- for y_2
[[61534 76]
[ 926 64]]
---------------- for y_3
[[62022 170]
[ 250 158]]
---------------- for y_4
[[61803 168]
[ 408 221]]
classification report:
precision recall f1-score support
0 0.56 0.25 0.35 578
1 0.46 0.06 0.11 990
2 0.48 0.39 0.43 408
3 0.57 0.35 0.43 629
avg / total 0.51 0.23 0.29 2605
AUC = 0.6122623056675458
###Markdown
End of lab Finally, let's create ROC plots for each type of failure.
###Code
sns.set(rc={'figure.figsize':(18,5)})
from sklearn.metrics import auc, roc_curve
plt.close('all')
fig, axs = plt.subplots(ncols=4, sharex=True, sharey=True)
for y_idx in range(4): # choose one of the outcomes
fpr, tpr, thresholds = roc_curve(y_test.values[:, y_idx], y_pred[:, y_idx])
roc_auc = auc(fpr, tpr)
axs[y_idx].set_title('ROC of y_' + str(y_idx))
axs[y_idx].set_ylabel('TPR')
axs[y_idx].set_xlabel('FPR')
axs[y_idx].plot(fpr, tpr, 'b', label = 'AUC = {0:.2f}'.format(roc_auc))
axs[y_idx].legend(loc = 'lower right')
axs[y_idx].plot([0, 1], [0, 1],'r--')
plt.show()
###Output
_____no_output_____ |
titanic/titaniclearningqi.ipynb | ###Markdown
Titanic data: Learning from disaster**Task**: predict survival of a passage giving his/her ticket class class, name, gender, age, number of siblings / spouses aboard, number of parents / children aboard, ticket number, cabin number and Port of embarkation**Notes:** - Based on the tutorial - Fix some bugs- Add cross-validation and grid search- Add Validation and Learning curvesPart I : Exploratory Data Analysis-------------------------
###Code
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
#Learning curve
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import validation_curve
###Output
_____no_output_____
###Markdown
Step 1: Load data
###Code
#-----------------------------------------------------------
# Step 01: load data using panda
#-----------------------------------------------------------
train_df = pd.read_csv('../input/train.csv') # train set
test_df = pd.read_csv('../input/test.csv') # test set
combine = [train_df, test_df]
###Output
_____no_output_____
###Markdown
Step 2: Acquire and clean data
###Code
#-----------------------------------------------------------
# Step 02: Acquire and clean data
#-----------------------------------------------------------
train_df.head(5)
train_df.info()
train_df.describe()
train_df.describe(include=['O'])
###Output
_____no_output_____
###Markdown
Training data statistics: - 891 training samples - Age, Cabin, Embarked: incomplete data - Data type: - object: Name, Sex, Ticket, Cabin, Embarked - int64: PassengerId, Survived, Pclass, SibSp, Parch - float64: Age, Fare - Survive rate: 0.383838
###Code
# remove Features: Ticket, Cabin
#train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
#test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
#combine = [train_df, test_df]
for dataset in combine:
dataset['Cabin'] = dataset['Cabin'].fillna('U')
dataset['Cabin'] = dataset.Cabin.str.extract('([A-Za-z])', expand=False)
for dataset in combine:
dataset['Cabin'] = dataset['Cabin'].map( {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E':0,
'F':0, 'G':0, 'T':0, 'U':1} ).astype(int)
train_df.head()
train_df = train_df.drop(['Ticket'], axis=1)
test_df = test_df.drop(['Ticket'], axis=1)
combine = [train_df, test_df]
# survival rate distribtion as a function of Pclass
train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# obtain Title from name (Mr, Mrs, Miss etc)
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Dona'],'Royalty')
dataset['Title'] = dataset['Title'].replace(['Mme'], 'Mrs')
dataset['Title'] = dataset['Title'].replace(['Mlle','Ms'], 'Miss')
dataset['Title'] = dataset['Title'].replace(['Capt', 'Col', 'Major','Rev'], 'Officer')
dataset['Title'] = dataset['Title'].replace(['Jonkheer', 'Don','Sir'], 'Royalty')
dataset.loc[(dataset.Sex == 'male') & (dataset.Title == 'Dr'),'Title'] = 'Mr'
dataset.loc[(dataset.Sex == 'female') & (dataset.Title == 'Dr'),'Title'] = 'Mrs'
#: count survived rate for different titles
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# Covert 'Title' to numbers (Mr->1, Miss->2 ...)
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Royalty":5, "Officer": 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Remove 'Name' and 'PassengerId' in training data, and 'Name' in testing data
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
# if age < 16, set 'Sex' to Child
for dataset in combine:
dataset.loc[(dataset.Age < 16),'Sex'] = 'Child'
# Covert 'Sex' to numbers (female:1, male:2)
for dataset in combine:
dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0, 'Child': 2} ).astype(int)
train_df.head()
# Age distribution for different values of Pclass and gender
#grid = sns.FacetGrid(train_df, row='Pclass', col='Sex', size=2.2, aspect=1.6)
#grid.map(plt.hist, 'Age', bins=20)
#grid.add_legend()
# Guess age values using median values for age across set of Pclass and gender frature combinations
for dataset in combine:
dataset['Age']=dataset.groupby(['Sex', 'Pclass'])['Age'].transform(lambda x: x.fillna(x.mean())).astype(int)
# create Age bands and determine correlations with Survived
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
# Create family size from 'sibsq + parch + 1'
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
#create another feature called IsAlone
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[(dataset['FamilySize'] == 1), 'IsAlone'] = 1
dataset.loc[(dataset['FamilySize'] > 4), 'IsAlone'] = 2
train_df[['IsAlone','Survived']].groupby(['IsAlone'], as_index=False).mean()
#drop Parch, SibSp, and FamilySize features in favor of IsAlone
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head()
# Create an artfical feature combinbing PClass and Age.
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head()
# fill the missing values of Embarked feature with the most common occurance
freq_port = train_df.Embarked.dropna().mode()[0]
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
# fill the missing values of Fare
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
# Create FareBand
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
# Convert the Fare feature to ordinal values based on the FareBand
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
train_df.describe()
#correlation matrix
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(train_df.corr(), vmax=.8, square=True);
###Output
_____no_output_____
###Markdown
Part II : Learning Model-------------------
###Code
#------------------------------------------------------------------
# Step 03: Learning model
#------------------------------------------------------------------
X_data = train_df.drop("Survived", axis=1) # data: Features
Y_data = train_df["Survived"] # data: Labels
X_test_kaggle = test_df.drop("PassengerId", axis=1).copy() # test data (kaggle)
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
# grid search
def grid_search_model(X, Y, model, parameters, cv):
CV_model = GridSearchCV(estimator=model, param_grid=parameters, cv=cv)
CV_model.fit(X, Y)
CV_model.cv_results_
print("Best Score:", CV_model.best_score_," / Best parameters:", CV_model.best_params_)
#validation curve
def validation_curve_model(X, Y, model, param_name, parameters, cv, ylim, log=True):
train_scores, test_scores = validation_curve(model, X, Y, param_name=param_name, param_range=parameters,cv=cv, scoring="accuracy")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.figure()
plt.title("Validation curve")
plt.fill_between(parameters, train_scores_mean - train_scores_std,train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(parameters, test_scores_mean - test_scores_std,test_scores_mean + test_scores_std, alpha=0.1, color="g")
if log==True:
plt.semilogx(parameters, train_scores_mean, 'o-', color="r",label="Training score")
plt.semilogx(parameters, test_scores_mean, 'o-', color="g",label="Cross-validation score")
else:
plt.plot(parameters, train_scores_mean, 'o-', color="r",label="Training score")
plt.plot(parameters, test_scores_mean, 'o-', color="g",label="Cross-validation score")
#plt.ylim([0.55, 0.9])
if ylim is not None:
plt.ylim(*ylim)
plt.ylabel('Score')
plt.xlabel('Parameter C')
plt.legend(loc="best")
return plt
# Learning curve
def Learning_curve_model(X, Y, model, cv, train_sizes):
plt.figure()
plt.title("Learning curve")
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(model, X, Y, cv=cv, n_jobs=4, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",label="Cross-validation score")
plt.legend(loc="best")
return plt
# lrearning, prediction and printing results
def predict_model(X, Y, model, Xtest, submit_name):
model.fit(X, Y)
Y_pred = model.predict(Xtest)
score = cross_val_score(model, X, Y, cv=cv)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv(submit_name, index=False)
return score
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
search_param = 0 # 1 -- grid search / 0 -- don't search
plot_vc = 0 # 1--display validation curve/ 0-- don't display
plot_lc = 1 # 1--display learning curve/ 0 -- don't display
#grid search: Logistic Regression
model = LogisticRegression()
if search_param==1:
param_range = np.logspace(-6, 5, 12)
param_grid = dict(C=param_range)
grid_search_model(X_data, Y_data, model, param_grid, cv)
#Validation Curve: Logistic Regression
if plot_vc == 1:
param_range = np.logspace(-6, 3, 10)
param_name="C"
ylim=[0.55, 0.9]
validation_curve_model(X_data, Y_data, model, "C", param_range, cv, ylim)
#learn curve
logreg = LogisticRegression(C=1000)
if plot_lc==1:
train_size=np.linspace(.1, 1.0, 15)
Learning_curve_model(X_data, Y_data, logreg, cv, train_size)
# Logistic Regression
acc_log = predict_model(X_data, Y_data, logreg, X_test_kaggle, 'submission_Logistic.csv')
###Output
_____no_output_____
###Markdown
Support Vector Machines
###Code
search_param = 0 # 1 -- grid search / 0 -- don't search
plot_vc = 0 # 1--display validation curve/ 0-- don't display
plot_lc = 1 # 1--display learning curve/ 0 -- don't display
#grid search: SVM
search_param = 0
if search_param==1:
param_range = np.linspace(0.5, 5, 9)
param_grid = dict(C=param_range)
grid_search_model(X_data, Y_data, SVC(), param_grid, cv)
#Validation Curve: SVC
if plot_vc == 1:
param_range = np.linspace(0.1, 10, 10)
param_name="C"
ylim=[0.78, 0.90]
validation_curve_model(X_data, Y_data, SVC(), "C", param_range, cv, ylim, log=False)
#learn curve: SVC
svc = SVC(C=1, probability=True)
if plot_lc == 1:
train_size=np.linspace(.1, 1.0, 15)
Learning_curve_model(X_data, Y_data, svc, cv, train_size)
# Support Vector Machines
acc_svc = predict_model(X_data, Y_data, svc, X_test_kaggle, 'submission_SVM.csv')
###Output
_____no_output_____
###Markdown
KNN
###Code
search_param = 0 # 1 -- grid search / 0 -- don't search
plot_vc = 0 # 1--display validation curve/ 0-- don't display
plot_lc = 1 # 1--display learning curve/ 0 -- don't display
#grid search: KNN
if search_param==1:
param_range = (np.linspace(1, 10, 10)).astype(int)
param_grid = dict(n_neighbors=param_range)
grid_search_model(X_data, Y_data, KNeighborsClassifier(), param_grid, cv)
#Validation Curve: KNN
if plot_vc==1:
param_range = np.linspace(2, 20, 10).astype(int)
param_name="n_neighbors"
ylim=[0.75, 0.90]
validation_curve_model(X_data, Y_data, KNeighborsClassifier(), "n_neighbors", param_range, cv, ylim, log=False)
#learn curve: KNN
knn = KNeighborsClassifier(n_neighbors = 10)
if plot_lc==1:
train_size=np.linspace(.1, 1.0, 15)
Learning_curve_model(X_data, Y_data, knn, cv, train_size)
# KNN
acc_knn = predict_model(X_data, Y_data, knn, X_test_kaggle, 'submission_KNN.csv')
###Output
_____no_output_____
###Markdown
Naive Bayes
###Code
# Gaussian Naive Bayes
gaussian = GaussianNB()
acc_gaussian = predict_model(X_data, Y_data, gaussian, X_test_kaggle, 'submission_Gassian_Naive_Bayes.csv')
###Output
_____no_output_____
###Markdown
Perceptron
###Code
# Perceptron
perceptron = Perceptron()
acc_perceptron = predict_model(X_data, Y_data, perceptron, X_test_kaggle, 'submission_Perception.csv')
###Output
/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/stochastic_gradient.py:84: FutureWarning: max_iter and tol parameters have been added in <class 'sklearn.linear_model.perceptron.Perceptron'> in 0.19. If both are left unset, they default to max_iter=5 and tol=None. If tol is not None, max_iter defaults to max_iter=1000. From 0.21, default max_iter will be 1000, and default tol will be 1e-3.
"and default tol will be 1e-3." % type(self), FutureWarning)
###Markdown
Linear SVC
###Code
# Linear SVC
linear_svc = LinearSVC()
acc_linear_svc = predict_model(X_data, Y_data, linear_svc, X_test_kaggle, 'submission_Linear_SVC.csv')
###Output
_____no_output_____
###Markdown
Stochastic Gradient Descent
###Code
# Stochastic Gradient Descent
sgd = SGDClassifier()
acc_sgd = predict_model(X_data, Y_data, sgd, X_test_kaggle, 'submission_stochastic_Gradient_Descent.csv')
###Output
/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/stochastic_gradient.py:84: FutureWarning: max_iter and tol parameters have been added in <class 'sklearn.linear_model.stochastic_gradient.SGDClassifier'> in 0.19. If both are left unset, they default to max_iter=5 and tol=None. If tol is not None, max_iter defaults to max_iter=1000. From 0.21, default max_iter will be 1000, and default tol will be 1e-3.
"and default tol will be 1e-3." % type(self), FutureWarning)
###Markdown
Decision Tree
###Code
# Decision Tree
decision_tree = DecisionTreeClassifier()
acc_decision_tree = predict_model(X_data, Y_data, decision_tree, X_test_kaggle, 'submission_Decision_Tree.csv')
###Output
_____no_output_____
###Markdown
Random Forest
###Code
search_param = 0 # 1 -- grid search / 0 -- don't search
plot_vc = 0 # 1--display validation curve/ 0-- don't display
plot_lc = 1 # 1--display learning curve/ 0 -- don't display
#grid search: KNN (This step is very slow)
#param_range = (np.linspace(10, 110, 10)).astype(int)
#param_leaf = (np.linspace(1, 2, 2)).astype(int)
#param_grid = {'n_estimators':param_range, 'min_samples_leaf':param_leaf}
#grid_search_model(X_data, Y_data, RandomForestClassifier(), param_grid, cv)
if plot_vc==1:
param_range = np.linspace(10, 110, 10).astype(int)
ylim=[0.75, 0.90]
validation_curve_model(X_data, Y_data, RandomForestClassifier(min_samples_leaf=12), "n_estimators", param_range, cv, ylim, log=False)
if plot_vc==1:
param_range = np.linspace(1, 21, 10).astype(int)
ylim=[0.75, 0.90]
validation_curve_model(X_data, Y_data, RandomForestClassifier(n_estimators=80), "min_samples_leaf", param_range, cv, ylim, log=False)
# Random Forest
random_forest = RandomForestClassifier(n_estimators=80, random_state =0, min_samples_leaf = 12)
acc_random_forest = predict_model(X_data, Y_data, random_forest, X_test_kaggle, 'submission_random_forest.csv')
###Output
_____no_output_____
###Markdown
Ensemble votring
###Code
#ensemble votring
ensemble_voting = VotingClassifier(estimators=[('lg', logreg), ('sv', svc), ('rf', random_forest),('kn',knn)], voting='soft')
acc_ensemble_voting = predict_model(X_data, Y_data, ensemble_voting, X_test_kaggle, 'submission_ensemble_voting.csv')
models = pd.DataFrame({'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'Decision Tree', 'ensemble_voting'],'KFoldScore': [acc_svc.mean(), acc_knn.mean(), acc_log.mean(),
acc_random_forest.mean(), acc_gaussian.mean(), acc_perceptron.mean(),
acc_sgd.mean(), acc_linear_svc.mean(), acc_decision_tree.mean(), acc_ensemble_voting.mean()],
'Std': [acc_svc.std(), acc_knn.std(), acc_log.std(),
acc_random_forest.std(), acc_gaussian.std(), acc_perceptron.std(),
acc_sgd.std(), acc_linear_svc.std(), acc_decision_tree.std(), acc_ensemble_voting.std()]})
models.sort_values(by='KFoldScore', ascending=False)
###Output
_____no_output_____ |
PyCitySchools/PyCitySchools.ipynb | ###Markdown
Observations:1. Overall Subject-wise % Stats: Students have a better pass percentage in reading(85.8%) over math(74.9%). 2. School Type & Performance: Charter schools (% Overall Passing - 90.5%) have a higher pass percentage than District schools(% Overall Passing - 50.7%). From the given data, it looks like even though both the schools have similar budget amount per student, Charter schools have comparatively less students than District schools ,which certainly helps in overall quality of education.3. Budget & Performance: Schools with the least budgets(less than $629) have better overall pass percentange. Thus, more overall budget per student doesn't mean better performace for the students. There is a possibilty than there might be other factors (school size, school type etc.) contributing to better performance of schools with less budget per student. 4. School Size & Performance: Schools with small (<1000) and medium (1000-2000) sizes have better overall pass percentages than larger schools. Hence the number of students is inversly proportional to overall pass percentage. Conclusion:Schools with less students have a better overall pass percentage. Hence charter schools with comparatively less students are doing better than District Schools. Overall students have a better pass percentage in reading than math.
###Code
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
#school_data_complete
#school_data
#student_data
#Calculate the total number of schools
count_schools=school_data["school_name"].nunique()
#Calculate the total number of students
count_students=student_data["Student ID"].nunique()
#Calculate the total budget
total_budget=school_data["budget"].sum()
#Calculate the average math score
avg_math_score=school_data_complete["math_score"].mean()
#Calculate the average reading score
avg_reading_score=school_data_complete["reading_score"].mean()
#Calculate the percentage of students with a passing math score (70 or greater)
percent_pass_math=((student_data.loc[(student_data['math_score']>=70)]['Student ID'].count())/count_students)*100
#Calculate the percentage of students with a passing reading score (70 or greater)
percent_pass_reading=((student_data.loc[(student_data['reading_score']>=70)]['Student ID'].count())/count_students)*100
#Calculate the percentage of students who passed math and reading (% Overall Passing)
percent_pass_all=((student_data.loc[(student_data['reading_score']>=70) & (student_data['math_score']>=70)]['Student ID'].count())/count_students)*100
#Create a dataframe to hold the above results
District_Summary_df=pd.DataFrame({"Total Schools":[count_schools]
,"Total Students":[count_students]
,"Total Budget":[total_budget]
,"Average Math Score":[avg_math_score]
,"Average Reading Score":[avg_reading_score]
,"% Passing Math":[percent_pass_math]
,"% Passing Reading":[percent_pass_reading]
,"% Overall Passing":[percent_pass_all]
})
#Optional: give the displayed data cleaner formatting
District_Summary_df.style.format({"Total Students":'{:,}'
,"Total Budget":'${:,.2f}'
,"Average Math Score":'{:,.2f}'
,"Average Reading Score":'{:,.2f}'
,"% Passing Math":'{:,.2f}%'
,"% Passing Reading":'{:,.2f}%'
,"% Overall Passing":'{:,.2f}%'
})
#Create an overview table that summarizes key metrics about each school:
#group by School Name
school_group_df=school_data_complete.set_index('school_name').groupby(["school_name"])
#School Type
school_type=school_data.set_index('school_name')["type"]
#Total Students
total_students=school_group_df["Student ID"].nunique()
#Total School Budget
school_budget=school_data.set_index('school_name')["budget"]
#Per Student Budget
student_budget=school_data.set_index('school_name')["budget"]/school_data.set_index('school_name')["size"]
#Average Math Score
avg_math_score=school_group_df["math_score"].mean()
#Average Reading Score
avg_reading_score=school_group_df["reading_score"].mean()
#% Passing Math
per_pass_math=((student_data.loc[(student_data['math_score']>=70)].groupby("school_name")['Student ID'].count())/total_students)*100
#% Passing Reading
per_pass_reading=((student_data.loc[(student_data['reading_score']>=70)].groupby("school_name")['Student ID'].count())/total_students)*100
#% Overall Passing (The percentage of students that passed math and reading.)
per_pass_all=((student_data.loc[(student_data['reading_score']>=70) & (student_data['math_score']>=70)].groupby("school_name")['Student ID'].count())/total_students)*100
#Create a dataframe to hold the above results
School_Summary_df=pd.DataFrame({"School Type":school_type
,"Total Students":total_students
,"Total School Budget":school_budget
,"Per Student Budget":student_budget
,"Average Math Score":avg_math_score
,"Average Reading Score":avg_reading_score
,"% Passing Math":per_pass_math
,"% Passing Reading":per_pass_reading
,"% Overall Passing":per_pass_all
})
#formatting
School_Summary_df.style.format({"Total School Budget":'${:,.2f}'
,"Per Student Budget":'${:,.2f}'
,"Average Math Score":'{:,.2f}'
,"Average Reading Score":'{:,.2f}'
,"% Passing Math":'{:,.2f}%'
,"% Passing Reading":'{:,.2f}%'
,"% Overall Passing":'{:,.2f}%'
})
#Sort and display the top five performing schools by % overall passing
top_5_schools_df=School_Summary_df.sort_values("% Overall Passing",ascending=False)
#formatting
top_5_schools_df.head().style.format({"Total School Budget":'${:,.2f}'
,"Per Student Budget":'${:,.2f}'
,"Average Math Score":'{:,.2f}'
,"Average Reading Score":'{:,.2f}'
,"% Passing Math":'{:,.2f}%'
,"% Passing Reading":'{:,.2f}%'
,"% Overall Passing":'{:,.2f}%'
})
#Sort and display the five worst-performing schools by % overall passing
bottom_5_schools_df=School_Summary_df.sort_values("% Overall Passing",ascending=True)
#formatting
bottom_5_schools_df.head().style.format({"Total School Budget":'${:,.2f}'
,"Per Student Budget":'${:,.2f}'
,"Average Math Score":'{:,.2f}'
,"Average Reading Score":'{:,.2f}'
,"% Passing Math":'{:,.2f}%'
,"% Passing Reading":'{:,.2f}%'
,"% Overall Passing":'{:,.2f}%'
})
#Create a table that lists the average Math Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#Create a pandas series for each grade. Hint: use a conditional statement.
#Group each series by school
ninth_math_avg = student_data.loc[student_data['grade'] == '9th'].groupby('school_name')["math_score"].mean()
tenth_math_avg = student_data.loc[student_data['grade'] == '10th'].groupby('school_name')["math_score"].mean()
eleventh_math_avg = student_data.loc[student_data['grade'] == '11th'].groupby('school_name')["math_score"].mean()
twelfth_math_avg = student_data.loc[student_data['grade'] == '12th'].groupby('school_name')["math_score"].mean()
#Combine the series into a dataframe
Math_Scores_by_Grade_df=pd.DataFrame({"9th":ninth_math_avg
,"10th":tenth_math_avg
,"11th":eleventh_math_avg
,"12th":twelfth_math_avg
})
#Optional: give the displayed data cleaner formatting
Math_Scores_by_Grade_df.style.format({"9th": '{:.2f}',
"10th": '{:.2f}',
"11th": "{:.2f}",
"12th": "{:.2f}"})
#Create a table that lists the average reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#Create a pandas series for each grade. Hint: use a conditional statement.
#Group each series by school
ninth_reading_avg = student_data.loc[student_data['grade'] == '9th'].groupby('school_name')["reading_score"].mean()
tenth_reading_avg = student_data.loc[student_data['grade'] == '10th'].groupby('school_name')["reading_score"].mean()
eleventh_reading_avg = student_data.loc[student_data['grade'] == '11th'].groupby('school_name')["reading_score"].mean()
twelfth_reading_avg = student_data.loc[student_data['grade'] == '12th'].groupby('school_name')["reading_score"].mean()
#Combine the series into a dataframe
reading_Scores_by_Grade_df=pd.DataFrame({"9th":ninth_reading_avg
,"10th":tenth_reading_avg
,"11th":eleventh_reading_avg
,"12th":twelfth_reading_avg
})
#Optional: give the displayed data cleaner formatting
reading_Scores_by_Grade_df.style.format({"9th": '{:.2f}',
"10th": '{:.2f}',
"11th": "{:.2f}",
"12th": "{:.2f}"})
#Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
#find max budget to set max bin limit
budget_max=max(school_data_complete['budget']/school_data_complete['size'])+1
budget_bins = [0, 584.99, 629.99, 644.99, budget_max]
budget_labels = ['<$585', "$585-629", "$630-644", "$645-675"]
school_data_complete['Spending Ranges (Per Student)'] = pd.cut(school_data_complete['budget']/school_data_complete['size'], budget_bins, labels = budget_labels)
#group by bins
spending_groups_df=school_data_complete.groupby('Spending Ranges (Per Student)')
#total student counts by bin range
totStudents=spending_groups_df["Student ID"].count()
#Average Math Score
avg_math_score=spending_groups_df['math_score'].mean()
#Average Reading Score
avg_reading_score=spending_groups_df['reading_score'].mean()
#% Passing Math
percentage_math=((school_data_complete.loc[(school_data_complete['math_score']>=70)].groupby("Spending Ranges (Per Student)")['Student ID'].count())/totStudents)*100
#% Passing Reading
percentage_reading=((school_data_complete.loc[(school_data_complete['reading_score']>=70)].groupby("Spending Ranges (Per Student)")['Student ID'].count())/totStudents)*100
#Overall Passing Rate
percentage_overall_pass=((school_data_complete.loc[(school_data_complete['reading_score']>=70)
& (school_data_complete['math_score']>=70)].groupby("Spending Ranges (Per Student)")['Student ID'].count())/totStudents)*100
Scores_by_School_Spending_df = pd.DataFrame({
"Average Math Score": avg_math_score,
"Average Reading Score": avg_reading_score,
'% Passing Math': percentage_math,
'% Passing Reading': percentage_reading,
"% Overall Passing": percentage_overall_pass
})
#formatting
Scores_by_School_Spending_df.style.format({"Average Math Score":'{:,.2f}'
,"Average Reading Score":'{:,.2f}'
,"% Passing Math":'{:,.2f}%'
,"% Passing Reading":'{:,.2f}%'
,"% Overall Passing":'{:,.2f}%'
})
#Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
#find max size of bin
size_max=max(school_data_complete['size'])+1
size_bins = [0, 999, 1999, size_max]
size_labels = ['Small (<1000)', 'Medium (1000-2000)', 'Large (2000-5000)']
school_data_complete['School Size'] = pd.cut(school_data_complete['size'], size_bins, labels = size_labels)
school_size_groups_df=school_data_complete.groupby("School Size")
tot_Students=school_size_groups_df["Student ID"].nunique()
#Average Math Score
avg_math_score1=school_size_groups_df['math_score'].mean()
#Average Reading Score
avg_reading_score1=school_size_groups_df['reading_score'].mean()
#% Passing Math
percentage_math1=((school_data_complete.loc[(school_data_complete['math_score']>=70)].groupby("School Size")['Student ID'].count())/tot_Students)*100
#% Passing Reading
percentage_reading1=((school_data_complete.loc[(school_data_complete['reading_score']>=70)].groupby("School Size")['Student ID'].count())/tot_Students)*100
#Overall Passing Rate
percentage_overall_pass1=((school_data_complete.loc[(school_data_complete['reading_score']>=70)
& (school_data_complete['math_score']>=70)].groupby("School Size")['Student ID'].count())/tot_Students)*100
Scores_by_School_Size_df = pd.DataFrame({
"Average Math Score": avg_math_score1,
"Average Reading Score": avg_reading_score1,
'% Passing Math': percentage_math1,
'% Passing Reading': percentage_reading1,
"% Overall Passing": percentage_overall_pass1
})
#formatting
Scores_by_School_Size_df.style.format({"Average Math Score":'{:,.2f}'
,"Average Reading Score":'{:,.2f}'
,"% Passing Math":'{:,.2f}%'
,"% Passing Reading":'{:,.2f}%'
,"% Overall Passing":'{:,.2f}%'
})
#Create a table that breaks down school performances based on School Type. Use 4 reasonable bins to group school spending. Include in the table each of the following:
school_type_groups_df=school_data_complete.groupby("type")
tot_Students1=school_type_groups_df["Student ID"].nunique()
#Average Math Score
avg_math_score2=school_type_groups_df['math_score'].mean()
#Average Reading Score
avg_reading_score2=school_type_groups_df['reading_score'].mean()
#% Passing Math
percentage_math2=((school_data_complete.loc[(school_data_complete['math_score']>=70)].groupby("type")['Student ID'].count())/tot_Students1)*100
#% Passing Reading
percentage_reading2=((school_data_complete.loc[(school_data_complete['reading_score']>=70)].groupby("type")['Student ID'].count())/tot_Students1)*100
#Overall Passing Rate
percentage_overall_pass2=((school_data_complete.loc[(school_data_complete['reading_score']>=70)
& (school_data_complete['math_score']>=70)].groupby("type")['Student ID'].count())/tot_Students1)*100
Scores_by_School_Type_df = pd.DataFrame({
"Average Math Score": avg_math_score2,
"Average Reading Score": avg_reading_score2,
'% Passing Math': percentage_math2,
'% Passing Reading': percentage_reading2,
"% Overall Passing": percentage_overall_pass2
})
Scores_by_School_Type_df.index.name = "School Type"
#formatting
Scores_by_School_Type_df.style.format({"Average Math Score":'{:,.2f}'
,"Average Reading Score":'{:,.2f}'
,"% Passing Math":'{:,.2f}%'
,"% Passing Reading":'{:,.2f}%'
,"% Overall Passing":'{:,.2f}%'
})
###Output
_____no_output_____
###Markdown
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
school_df = pd.DataFrame(school_data)
school_df.head()
print(total_schools, total_budget, total_students)
school_data = school_data.rename(columns={"name":"school_name"})
student_data = student_data.rename(columns={"school":"school_name"})
# Combine the data into a single dataset
school_data_complete = pd.merge(school_data, student_data, how="left", on=["school_name"])
school_data_complete.head()
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
# Calculate the total number of schools
total_schools = school_df["School ID"].count()
# Print
total_schools
# Calculate the total number of students
total_students = school_df["size"].sum()
# Print
total_students
# Calculate the total budget
total_budget= school_df["budget"].sum()
# Print
total_budget
# Calculate the average math score
avg_mathscore = school_data_complete["math_score"].mean()
# Calculate the average reading score
avg_readscore = school_data_complete["reading_score"].mean()
# Calculate the overall passing rate (overall average score), i.e.
# (avg. math score + avg. reading score)/2
passing_rate = (avg_mathscore + avg_readscore)/2
# Calculate the percentage of students with a passing
# math score (70 or greater)
passing_math = school_data_complete.query('math_score >70')["School ID"].count()/total_students*100
# Calculate the percentage of students with a passing
# reading score (70 or greater)
passing_read = school_data_complete.query('reading_score >70')["School ID"].count()/total_students*100
# Create a dataframe
new_df = pd.DataFrame({"Total Schools":[total_schools],
"Total Students":[total_students],
"Total Budget":[total_budget],
"Average Math Score":[avg_mathscore],
"Average Reading Score":[avg_readscore],
"Passing Math":[passing_math],
"Passing Reading":[passing_read],
"Overall Passing Rate":[passing_rate]
})
new_df
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two) * Create a dataframe to hold the above results
###Code
# Create an overview table
school_data = school_data_complete[["School ID", "school_name",
"type", "size", "budget",
"Student ID", "student_name",
"gender", "grade", "reading_score",
"math_score"]].copy()
school_data.head()
# Group by school name
school = school_data.groupby(['school_name'])
# Total Students per school
total_students_sum = school["Student ID"].count()
# Total Budget per school
total_budget_sum = school["budget"].mean()
# Total Budget per student
bgd_per_stu = total_budget_sum/total_students_sum
# Avg Math Score per School
avg_mathscore_sum = school["math_score"].mean()
# Avg Read Score per School
avg_readscore_sum = school["reading_score"].mean()
# Students Pass per School
passing_math = school_data.query('math_score >70')["School ID"].count()/total_students_sum
passing_reading = school_data.query('reading_score >70')["School ID"].count()/total_students_sum
# Pass Rate
passrate_grp = ((avg_mathscore_sum + avg_readscore_sum)/2)
# Creating a DataFrame
school_data_summary = pd.DataFrame({"Total Students":total_students_sum,
"Total School Budget": total_budget_sum,
"Per Student Budget": bgd_per_stu,
"Average Math Score": avg_mathscore_sum,
"Average Reading Score": avg_readscore_sum,
"% Passing Math": passing_math,
"% Passing Reading": passing_reading,
"% Overall Passing Rate": passrate_grp
})
# Remove Decimal after dot
school_data_summary["% Passing Math"] = school_data_summary["% Passing Math"].round(2)
school_data_summary["% Passing Reading"] = school_data_summary["% Passing Reading"].round(2)
school_data_summary["% Overall Passing Rate"] = school_data_summary["% Overall Passing Rate"].round(2)
# Print DataFrame
school_data_summary.head(2)
# Add dollar sign and percent sign
school_data_summary["Total School Budget"] = school_data_summary["Total Students"].map("${:,.2f}".format)
school_data_summary["Per Student Budget"] = school_data_summary["Per Student Budget"].map("${:,.2f}".format)
school_data_summary["% Passing Math"] = school_data_summary["% Passing Math"].astype(str) + '%'
school_data_summary["% Passing Reading"] = school_data_summary["% Passing Reading"].astype(str) + '%'
school_data_summary["% Overall Passing Rate"] = school_data_summary["% Overall Passing Rate"].astype(str) + '%'
school_data_summary.head()
###Output
_____no_output_____
###Markdown
Top Performing Schools (By Passing Rate) * Sort and display the top five schools in overall passing rate
###Code
Top_schools = school_data_summary.sort_values(
["% Overall Passing Rate"], ascending=False)
Top_schools.head()
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By Passing Rate) * Sort and display the five worst-performing schools
###Code
Bottom_schools = school_data_summary.sort_values(
["% Overall Passing Rate"], ascending=True)
Bottom_schools.head()
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
# Create a pandas series for each grade
nineth_graders = school_data[(school_data["grade"] == "9th")]
tenth_graders = school_data[(school_data["grade"] == "10th")]
eleventh_graders = school_data[(school_data["grade"] == "11th")]
twelfth_graders = school_data[(school_data["grade"] == "12th")]
# Group each series by school
nineth_graders_score = nineth_graders.groupby(["school_name"]).mean()["math_score"]
tenth_graders_score = tenth_graders.groupby(["school_name"]).mean()["math_score"]
eleventh_graders_score = eleventh_graders.groupby(["school_name"]).mean()["math_score"]
twelfth_graders_score = twelfth_graders.groupby(["school_name"]).mean()["math_score"]
# Combine the series into a dataframe
mathscore_by_grade = pd.DataFrame({"9th": nineth_graders_score,
"10th":tenth_graders_score,
"11th":eleventh_graders_score,
"12th":twelfth_graders_score
})
# Remove Decimal after dot
mathscore_by_grade["9th"] = mathscore_by_grade["9th"].round(2)
mathscore_by_grade["10th"] = mathscore_by_grade["10th"].round(2)
mathscore_by_grade["11th"] = mathscore_by_grade["11th"].round(2)
mathscore_by_grade["12th"] = mathscore_by_grade["12th"].round(2)
mathscore_by_grade.head()
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# Group each series by school
nineth_grade = nineth_graders.groupby(["school_name"]).mean()["reading_score"]
tenth_grade = tenth_graders.groupby(["school_name"]).mean()["reading_score"]
eleventh_grade = eleventh_graders.groupby(["school_name"]).mean()["reading_score"]
twelfth_grade = twelfth_graders.groupby(["school_name"]).mean()["reading_score"]
# Combine the series into a dataframe
readscore_by_grade = pd.DataFrame({"9th": nineth_grade,
"10th":tenth_grade,
"11th":eleventh_grade,
"12th":twelfth_grade
})
# Remove Decimal after dot
readscore_by_grade["9th"] = readscore_by_grade["9th"].round(2)
readscore_by_grade["10th"] = readscore_by_grade["10th"].round(2)
readscore_by_grade["11th"] = readscore_by_grade["11th"].round(2)
readscore_by_grade["12th"] = readscore_by_grade["12th"].round(2)
readscore_by_grade.head()
###Output
_____no_output_____
###Markdown
Observable Trends* Charter schools significantly outperform District schools: this is especially apparent in the percentage of students whom pass math, reading, and both. The average scores for math and reading are higher, as well. * Large schools (i.e. those with 2,000-5,000 students) significantly underperform - in both average scores and percentage pass rates - relative to those with 2,000 or less students.* There seems to be a negative correlation between school spending per student and performance in terms of average scores and percentage pass rates. This would be an interesting subject to explore further in order to explain what other variables may be contributing to this apparent phenomenon.
###Code
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_student_df = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_student_df.head()
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math **and** reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
# Calculate the total number of schools
school_count = len(school_student_df["school_name"].unique())
school_count
# Calculate the total number of students
student_count = len(school_student_df["Student ID"].unique())
student_count
# Calculate the total budget
total_budget = school_data["budget"].sum()
total_budget
# Calculate the average score for math & reading, respectively
average_math_score = school_student_df["math_score"].mean()
average_math_score
average_reading_score = school_student_df["reading_score"].mean()
average_reading_score
# Calculate the percentage of students who passed math, reading, and both (overall)
passing_math_count = school_student_df[(school_student_df["math_score"] >= 70)].count()["student_name"]
passing_math_count
passing_math_percentage = passing_math_count / float(student_count) * 100
passing_math_percentage
passing_reading_count = school_student_df[(school_student_df["reading_score"] >= 70)].count()["student_name"]
passing_reading_count
passing_reading_percentage = passing_reading_count / float(student_count) * 100
passing_reading_percentage
overall_passing_count = school_student_df[(school_student_df["math_score"] >= 70) & (school_student_df["reading_score"] >= 70)].count()["student_name"]
overall_passing_percentage = overall_passing_count / float(student_count) * 100
# Create a dataframe to hold results & display in cleaner formatting
# Minor Data Cleanup
district_summary_df = pd.DataFrame({"Total Schools": [school_count],
"Total Students": [student_count],
"Total Budget": [total_budget],
"Average Math Score": [average_math_score],
"Average Reading Score": [average_reading_score],
"% Passing Math": [passing_math_percentage],
"% Passing Reading": [passing_reading_percentage],
"% Overall Passing": [overall_passing_percentage]})
district_summary_df = district_summary_df[["Total Schools", "Total Students", "Total Budget",
"Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Overall Passing"]]
district_summary_df["Total Students"] = district_summary_df["Total Students"].map("{:,}".format)
district_summary_df["Total Budget"] = district_summary_df["Total Budget"].map("${:,.2f}".format)
district_summary_df.head()
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results
###Code
# Set the index and determine School Type
school_type = school_data.set_index(["school_name"])["type"]
# Calculate the total students per school
students_per_school = school_student_df.groupby(["school_name"]).count()["Student ID"]
# Calculate total and per student budgets for each school
per_school_budget = school_student_df.groupby(["school_name"]).mean()["budget"]
per_student_budget = per_school_budget / students_per_school
# Calculate the average math score for each school
school_math_score = school_student_df.groupby(["school_name"]).mean()["math_score"]
# Calculate the average reading score for each school
school_reading_score = school_student_df.groupby(["school_name"]).mean()["reading_score"]
# Calculate the percentage students passing math for each school
school_passing_math = school_student_df[(school_student_df["math_score"] >= 70)].groupby("school_name").count()["student_name"]
school_math_percent = (school_passing_math / students_per_school) * 100
# Calculate the percentage of students passing reading for each school
school_passing_reading = school_student_df[(school_student_df["reading_score"] >= 70)].groupby("school_name").count()["student_name"]
school_reading_percent = (school_passing_reading / students_per_school) * 100
# Calculate the percentage of students passing both subjects (overall)
school_passing_overall = school_student_df[(school_student_df["math_score"] >= 70) & (school_student_df["reading_score"] >= 70)].groupby("school_name").count()["student_name"]
school_overall_percent = (school_passing_overall / students_per_school) * 100
# Create a data frame to display school summary data
school_summary = pd.DataFrame({"School Type": school_type,
"Total Students": students_per_school,
"Total School Budget": per_school_budget,
"Per Student Budget": per_student_budget,
"Average Math Score": school_math_score,
"Average Reading Score": school_reading_score,
"% Passing Math": school_math_percent,
"% Passing Reading": school_reading_percent,
"% Overall Passing Rate": school_overall_percent})
# Cleaning and reformatting
school_summary_df = school_summary[["School Type", "Total Students", "Total School Budget", "Per Student Budget",
"Average Math Score", "Average Reading Score",
"% Passing Math", "% Passing Reading", "% Overall Passing Rate"]]
school_summary_df["Total Students"] = school_summary["Total Students"].map("{:,}".format)
school_summary_df["Total School Budget"] = school_summary["Total School Budget"].map("${:,.2f}".format)
school_summary_df["Per Student Budget"] = school_summary["Per Student Budget"].map("${:,.2f}".format)
# Display the data frame
school_summary_df
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing) * Sort and display the top five performing schools by % overall passing.
###Code
top_performing_schools = school_summary_df.sort_values(["% Overall Passing Rate"], ascending=False)
top_performing_schools.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing) * Sort and display the five worst-performing schools by % overall passing.
###Code
bottom_performing_schools = school_summary_df.sort_values(["% Overall Passing Rate"], ascending=True)
bottom_performing_schools.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Math Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
# Create a panda series for each grade using conditional statements
nineth_graders = school_student_df[(school_student_df["grade"] == "9th")]
tenth_graders = school_student_df[(school_student_df["grade"] == "10th")]
eleventh_graders = school_student_df[(school_student_df["grade"] == "11th")]
twelfth_graders = school_student_df[(school_student_df["grade"] == "12th")]
# Group each series by school name
nineth_math_scores = nineth_graders.groupby(["school_name"]).mean()["math_score"]
tenth_math_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"]
eleventh_math_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"]
twelfth_math_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"]
# Combine the series into a dataframe
math_scores_grade_df = pd.DataFrame({"9th": nineth_math_scores, "10th": tenth_math_scores,
"11th": eleventh_math_scores, "12th": twelfth_math_scores})
# Clean the formatting
math_scores_grade_df = math_scores_grade_df[["9th", "10th", "11th", "12th"]]
math_scores_grade_df.index.name = None
# Display the dataframe
math_scores_grade_df
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# Create a panda series for each grade using conditional statements
nineth_graders = school_student_df[(school_student_df["grade"] == "9th")]
tenth_graders = school_student_df[(school_student_df["grade"] == "10th")]
eleventh_graders = school_student_df[(school_student_df["grade"] == "11th")]
twelfth_graders = school_student_df[(school_student_df["grade"] == "12th")]
# Group each series by school name
nineth_reading_scores = nineth_graders.groupby(["school_name"]).mean()["reading_score"]
tenth_reading_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"]
eleventh_reading_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"]
twelfth_reading_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"]
# Combine the series into a dataframe
reading_scores_grade_df = pd.DataFrame({"9th": nineth_reading_scores, "10th": tenth_reading_scores,
"11th": eleventh_reading_scores, "12th": twelfth_reading_scores})
# Clean the formatting
reading_scores_grade_df = reading_scores_grade_df[["9th", "10th", "11th", "12th"]]
reading_scores_grade_df.index.name = None
# Display the dataframe
reading_scores_grade_df
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# Create the bins to group school spending
school_spending_bins = [0, 585, 630, 645, 680]
group_names = ["<$585", "$585-630", "$630-645", "$645-680"]
# Categorize the school spending based on the bins
school_summary_df["Spending Ranges (Per Student)"] = pd.cut(per_student_budget, school_spending_bins, labels=group_names)
# Calculate the scores based on school spending
spending_math_scores = school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"]
spending_reading_scores = school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"]
spending_passing_math = school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"]
spending_passing_reading = school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"]
overall_passing_rate = school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Overall Passing Rate"]
# Assemble into data frame
spending_summary_df = pd.DataFrame({"Average Math Score" : spending_math_scores,
"Average Reading Score": spending_reading_scores,
"% Passing Math": spending_passing_math,
"% Passing Reading": spending_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
spending_summary_df = spending_summary_df[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
# Display results
spending_summary_df
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
# Create the bins to group school size
school_size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# Categorize the spending based on the bins
school_summary["School Size"] = pd.cut(school_summary["Total Students"], school_size_bins, labels=group_names)
# Calculate the scores based on school size
size_math_scores = school_summary.groupby(["School Size"]).mean()["Average Math Score"]
size_reading_scores = school_summary.groupby(["School Size"]).mean()["Average Reading Score"]
size_passing_math = school_summary.groupby(["School Size"]).mean()["% Passing Math"]
size_passing_reading = school_summary.groupby(["School Size"]).mean()["% Passing Reading"]
overall_passing_rate = school_summary.groupby(["School Size"]).mean()["% Overall Passing Rate"]
# Assemble into data frame
size_summary_df = pd.DataFrame({"Average Math Score" : size_math_scores,
"Average Reading Score": size_reading_scores,
"% Passing Math": size_passing_math,
"% Passing Reading": size_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
size_summary_df = size_summary_df[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
# Display results
size_summary_df
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type
###Code
# Calculate the scores based on school type
type_math_scores = school_summary_df.groupby(["School Type"]).mean()["Average Math Score"]
type_reading_scores = school_summary_df.groupby(["School Type"]).mean()["Average Reading Score"]
type_passing_math = school_summary_df.groupby(["School Type"]).mean()["% Passing Math"]
type_passing_reading = school_summary_df.groupby(["School Type"]).mean()["% Passing Reading"]
overall_passing_rate = school_summary_df.groupby(["School Type"]).mean()["% Overall Passing Rate"]
# Assemble into data frame
type_summary_df = pd.DataFrame({"Average Math Score" : type_math_scores,
"Average Reading Score": type_reading_scores,
"% Passing Math": type_passing_math,
"% Passing Reading": type_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
type_summary_df = type_summary_df[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
# Display results
type_summary_df
###Output
_____no_output_____
###Markdown
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_df = "Resources/schools_complete.csv"
student_data_df = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_df)
student_data = pd.read_csv(student_data_df)
# Combine the data into a single dataset.
school_data_combined = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math **and** reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
# Calculate Totals
school_count = len(school_data_combined["school_name"].unique())
student_count = school_data_combined["Student ID"].count()
total_budget = school_data["budget"].sum()
# Calculate the Average Scores
average_math_score = school_data_combined["math_score"].mean()
average_reading_score = school_data_combined["reading_score"].mean()
# Calculate the Percentage Pass Rates
passing_math_count = school_data_combined[(school_data_combined["math_score"] >= 70)].count()["student_name"]
passing_math_percentage = passing_math_count / float(student_count) * 100
passing_reading_count = school_data_combined[(school_data_combined["reading_score"] >= 70)].count()["student_name"]
passing_reading_percentage = passing_reading_count / float(student_count) * 100
passing_math_reading_count = school_data_combined[(school_data_combined["math_score"] >= 70)
& (school_data_combined["reading_score"] >= 70)].count()["student_name"]
overall_passing_rate = passing_math_reading_count / float(student_count) * 100
# Data Cleanup
district_summary = pd.DataFrame({"Total Schools": [school_count],
"Total Students": [student_count],
"Total Budget": [total_budget],
"Average Math Score": [average_math_score],
"Average Reading Score": [average_reading_score],
"% Passing Math": [passing_math_percentage],
"% Passing Reading": [passing_reading_percentage],
"% Overall Passing": [overall_passing_rate]})
district_summary = district_summary[["Total Schools", "Total Students", "Total Budget",
"Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Overall Passing"]]
# Formatting
district_summary["Total Students"] = district_summary["Total Students"].map("{:,}".format)
district_summary["Total Budget"] = district_summary["Total Budget"].map("${:,.2f}".format)
# Display the DataFrame
district_summary
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results
###Code
# Determine the School Type
school_types = school_data.set_index(["school_name"])["type"]
# Calculate the total student count
per_school_counts = school_data_combined["school_name"].value_counts()
# Calculate the total school budget and per capita spending
per_school_budget = school_data_combined.groupby(["school_name"]).mean()["budget"]
per_school_capita = per_school_budget / per_school_counts
# Calculate the average test scores
per_school_math = school_data_combined.groupby(["school_name"]).mean()["math_score"]
per_school_reading = school_data_combined.groupby(["school_name"]).mean()["reading_score"]
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing) * Sort and display the top five performing schools by % overall passing.
###Code
# Get the students who passed math and passed reading by creating separate filtered DataFrames.
school_passing_math = school_data_combined[(school_data_combined["math_score"] >= 70)]
school_passing_reading = school_data_combined[(school_data_combined["reading_score"] >= 70)]
# Get the the students who passed both reading and math in a separate DataFrame.
passing_math_and_reading = school_data_combined[(school_data_combined["reading_score"] >= 70)
& (school_data_combined["math_score"] >= 70)]
# Calculate the Percentage Pass Rates
per_school_passing_math = school_passing_math.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100
per_school_passing_reading = school_passing_reading.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100
overall_passing_rate = passing_math_and_reading.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100
# Convert to DataFrame
per_school_summary = pd.DataFrame({"School Type": school_types,
"Total Students": per_school_counts,
"Total School Budget": per_school_budget,
"Per Student Budget": per_school_capita,
"Average Math Score": per_school_math,
"Average Reading Score": per_school_reading,
"% Passing Math": per_school_passing_math,
"% Passing Reading": per_school_passing_reading,
"% Overall Passing": overall_passing_rate})
# Minor data munging
per_school_summary = per_school_summary[["School Type", "Total Students", "Total School Budget", "Per Student Budget",
"Average Math Score", "Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing"]]
per_school_summary["Total School Budget"] = per_school_summary["Total School Budget"].map("${:,.2f}".format)
per_school_summary["Per Student Budget"] = per_school_summary["Per Student Budget"].map("${:,.2f}".format)
# Display the DataFrame
per_school_summary
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing) * Sort and display the five worst-performing schools by % overall passing.
###Code
# Sort and show top five schools
top_schools = per_school_summary.sort_values(["% Overall Passing"], ascending=False)
top_schools.head(5)
# Sort and show bottom five schools
bottom_schools = per_school_summary.sort_values(["% Overall Passing"], ascending=True)
bottom_schools.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
# Sort and show bottom five schools
bottom_schools = per_school_summary.sort_values(["% Overall Passing"], ascending=True)
bottom_schools.head(5)
# Group each by school name
ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["math_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"]
# Combine series into single DataFrame
scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
# data munging
scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]]
scores_by_grade.index.name = None
# Display the DataFrame
scores_by_grade
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# Create data series of scores by grade levels using conditionals
ninth_graders = school_data_combined[(school_data_combined["grade"] == "9th")]
tenth_graders = school_data_combined[(school_data_combined["grade"] == "10th")]
eleventh_graders = school_data_combined[(school_data_combined["grade"] == "11th")]
twelfth_graders = school_data_combined[(school_data_combined["grade"] == "12th")]
# Group each by school name
ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["reading_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"]
# Combine series into single DataFrame
scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
# Minor data munging
scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]]
scores_by_grade.index.name = None
# Display the DataFrame
scores_by_grade
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# Establish the bins
spending_bins = [0, 585, 630, 645, 680]
group_names = ["<$585", "$585-630", "$630-645", "$645-680"]
# Create a copy of the school summary since it has the "Per Student Budget"
# This step can be skip but its best to make a copy.
school_spending_df = per_school_summary
# Categorize spending based on the bins.
school_spending_df["Spending Ranges (Per Student)"] = pd.cut(per_school_capita, spending_bins, labels=group_names, right=False)
school_spending_df
# Calculate averages for the desired columns.
spending_math_scores = school_spending_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"]
spending_reading_scores = school_spending_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"]
spending_passing_math = school_spending_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"]
spending_passing_reading = school_spending_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"]
overall_passing_spending = school_spending_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Overall Passing"]
# Assemble into DataFrame
spending_summary = pd.DataFrame({"Average Math Score" : spending_math_scores.round(2),
"Average Reading Score": spending_reading_scores.round(2),
"% Passing Math": spending_passing_math.round(2),
"% Passing Reading": spending_passing_reading.round(2),
"% Overall Passing": overall_passing_spending.round(2)})
# Minor data munging
spending_summary = spending_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing"]]
# Display results
spending_summary
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
# Establish the bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# Categorize the spending based on the bins
per_school_summary["School Size"] = pd.cut(per_school_summary["Total Students"], size_bins, labels=group_names, right=False)
per_school_summary
# Calculate averages for the desired columns.
size_math_scores = per_school_summary.groupby(["School Size"]).mean()["Average Math Score"]
size_reading_scores = per_school_summary.groupby(["School Size"]).mean()["Average Reading Score"]
size_passing_math = per_school_summary.groupby(["School Size"]).mean()["% Passing Math"]
size_passing_reading = per_school_summary.groupby(["School Size"]).mean()["% Passing Reading"]
size_overall_passing = per_school_summary.groupby(["School Size"]).mean()["% Overall Passing"]
# Assemble into DataFrame
size_summary = pd.DataFrame({"Average Math Score" : size_math_scores,
"Average Reading Score": size_reading_scores,
"% Passing Math": size_passing_math,
"% Passing Reading": size_passing_reading,
"% Overall Passing": size_overall_passing})
# Minor data munging
size_summary = size_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing"]]
# Display results
size_summary
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type
###Code
# Create new series using groupby for"
# Type | Average Math Score | Average Reading Score | % Passing Math | % Passing Reading | % Overall Passing
type_math_scores = per_school_summary.groupby(["School Type"]).mean()["Average Math Score"]
type_reading_scores = per_school_summary.groupby(["School Type"]).mean()["Average Reading Score"]
type_passing_math = per_school_summary.groupby(["School Type"]).mean()["% Passing Math"]
type_passing_reading = per_school_summary.groupby(["School Type"]).mean()["% Passing Reading"]
type_overall_passing = per_school_summary.groupby(["School Type"]).mean()["% Overall Passing"]
# Assemble into DataFrame
type_summary = pd.DataFrame({"Average Math Score" : type_math_scores,
"Average Reading Score": type_reading_scores,
"% Passing Math": type_passing_math,
"% Passing Reading": type_passing_reading,
"% Overall Passing": type_overall_passing})
# Minor data munging
type_summary = type_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Overall Passing"]]
# Display results
type_summary
###Output
_____no_output_____
###Markdown
District Summary Total Number of Schools in a District
###Code
#Sets the index to type
District_df = school_data_complete.set_index("type")
#Filters type to look at District Schools only & returns school names without duplicates
Distr_schools = District_df.loc["District", "school_name"].unique()
#Returns the total counts for District only
total_District_schools = len(Distr_schools)
###Output
_____no_output_____
###Markdown
Total Number of Students in a District
###Code
#Filters type to look at District Schools only & returns size (# of students in each school) without duplicates
Distr_students = District_df.loc["District","size"].unique()
#Calculates the total number of students in all District school
total_District_students = Distr_students.sum()
###Output
_____no_output_____
###Markdown
Total District Schools Budget
###Code
#Filters District row and budget column only
Dist_Budget = District_df.loc["District", "budget"].unique()
#Calculates the total budget for District
total_District_budget = Dist_Budget.sum()
###Output
_____no_output_____
###Markdown
District Average Math and Reading Scores
###Code
#Creates a new df that is filtered by District row & Math/Reading scores columns
#Know data is correct if row counts matches total number of District student
District_Scores_df = District_df.loc[[ "District"], ["math_score", "reading_score"]]
#Calculates the average of Math scores only
Math_Avg = District_Scores_df["math_score"].mean()
#Calculates the average of Reading scores only
Reading_Avg = District_Scores_df["reading_score"].mean()
###Output
_____no_output_____
###Markdown
District Students with Passing Math and Reading Scores
###Code
#Defines, filters passing MATH scores (only math scores >= 70 in the df) & creates new filtered df
passing_Mscore = District_Scores_df["math_score"] >= 70
passing_math_df = District_Scores_df.loc[passing_Mscore, ["math_score"]]
#Returns the number of students with passing Math scores
Math_passers = passing_math_df["math_score"].count()
#Defines, filters passing READING scores (only reading scores >= 70 in the df) & creates new filtered df
passing_Rscore = District_Scores_df["reading_score"] >= 70
passing_reading_df = District_Scores_df.loc[passing_Rscore, ["reading_score"]]
#Returns the number of students with passing Reading scores
Reading_passers = passing_reading_df["reading_score"].count()
#Defines and filters number of students passing MATH AND READING (only math & reading scores >= 70 in the df) then creating a new filtered df
pass_math_AND_reading = passing_Mscore & passing_Rscore
studentspass_MandR_df = District_Scores_df.loc[pass_math_AND_reading, :]
#Returns the number of students with passing Math & Reading scores
Math_Reading_passers = studentspass_MandR_df["math_score"].count()
###Output
_____no_output_____
###Markdown
Percentage of District Students Passing Math & Reading
###Code
#Calculates percentage of students with MATH Passing scores
pct_Math_passing = (Math_passers / total_District_students) * 100
#Calculates percentage of students with READING Passing scores
pct_Reading_passing = (Reading_passers / total_District_students) * 100
#Calculates percentage of students with MATH & READING Passing scores
pct_MathReading_passing = (Math_Reading_passers / total_District_students) * 100
#Creates a dictionary of District results
District = {
"Total Schools": [total_District_schools],
"Total Students": [total_District_students],
"Total Budget": [total_District_budget],
"Average Math Score": [Math_Avg],
"Average Reading Score": [Reading_Avg],
"% Math Passing": [pct_Math_passing],
"% Reading Passing": [pct_Reading_passing],
"% Overall Passing": [pct_MathReading_passing]
}
#Creates summary dataframe that includes dictionary of District results
District_Summary_df = pd.DataFrame(District)
#Fixes formatting of total students and total budget values
District_Summary_df['Total Budget'] = District_Summary_df['Total Budget'].map('${:,.2f}'.format)
District_Summary_df['Total Students'] = District_Summary_df['Total Students'].map('{:,.0f}'.format)
District_Summary_df.index = [' ']
District_Summary_df
###Output
_____no_output_____
###Markdown
School Summary
###Code
#Creates a dataframe with selected columns of interest from the merged data
allschools_df = school_data_complete[["Student ID","school_name", "type", "size", "budget", "math_score", "reading_score"]]
###Output
_____no_output_____
###Markdown
Per Student Budget
###Code
#Creates a new column in the df above that includes the calculated amount of budget per student
allschools_df["Per Student Budget"] = allschools_df["budget"] / allschools_df["size"]
###Output
<ipython-input-17-948b583e3e32>:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
allschools_df["Per Student Budget"] = allschools_df["budget"] / allschools_df["size"]
###Markdown
School Students with Passing Scores in Math, Reading and Overall (Math & Reading)
###Code
#Total number of students (to be used in calculating % Math, % Reading & Overall Passing)
#Creates a df grouped by school name with total number of students
student_total = allschools_df.groupby(['school_name'])["Student ID"].count()
#Filters df with MATH passers (only include math scores >= 70) from all schools
schoolMath_passing = allschools_df.loc[allschools_df["math_score"] >= 70]
#Creates a df grouped by school name with total number of students, who PASSED MATH ONLY
group_MathPassers = schoolMath_passing.groupby(['school_name'])['Student ID'].count()
#Filters df with READING passers (only include reading scores >= 70) from all schools
schoolReading_passing = allschools_df.loc[allschools_df["reading_score"] >= 70]
#Creates a df grouped by school name with total number of students, who PASSED READING ONLY
group_ReadingPassers = schoolReading_passing.groupby(['school_name'])['Student ID'].count()
#Creates a variable that includes students with MATH and READING passing scores
MathReading_pass_scores = (allschools_df["math_score"] >= 70) & (allschools_df["reading_score"] >= 70)
#Filters df with MATH & READING passers (only include math & reading scores >= 70) from all schools
schoolMathReading_passing = allschools_df.loc[MathReading_pass_scores]
#Creates a df grouped by school name with total number of students, who PASSED MATH & READING
group_MathReading_Passers = schoolMathReading_passing.groupby(['school_name'])['Student ID'].count()
###Output
_____no_output_____
###Markdown
Passing Rates for Math, Reading & Overall
###Code
#Calculate Math, Reading & Overall passing rates
pct_schoolMath = (group_MathPassers / student_total)*100
pct_schoolReading = (group_ReadingPassers / student_total)*100
pct_schoolMathReading = (group_MathReading_Passers / student_total)*100
#Set up School Summary Dataframe & clean up stage
#Uses the base df created set in the very beginning for school summary only (allschools_df)
school_groups_df = allschools_df.groupby(["school_name"]).mean()
#Adds type back (disappeared after the groupby above since non-numeric column)
school_groups_df["School Type"] = allschools_df.groupby(["school_name"])['type'].min()
#Add columns for Passing Rates
school_groups_df["% Math Passing"] = pct_schoolMath
school_groups_df["% Reading Passing"] = pct_schoolReading
school_groups_df["% Overall Passing"] = pct_schoolMathReading
#Drops school ID (only needed to clarify that we are aggregating students)
school_groups_df = school_groups_df.drop(["Student ID"], axis=1)
#Renames some columns
school_groups_df = school_groups_df.rename(columns={"size":"Total Students",
"budget":"Total School Budget",
"math_score":"Average Math Score",
"reading_score":"Average Reading Score"})
#Removes index title
school_groups_df.index.names = ['']
#Fixes formatting of total budget, total students and per student budget values
school_groups_df['Total School Budget'] = school_groups_df['Total School Budget'].map('${:,.2f}'.format)
school_groups_df['Per Student Budget'] = school_groups_df['Per Student Budget'].map('{:.2f}'.format)
#Rearranged columns for final summary
school_summary_df = school_groups_df[["School Type", "Total Students",
"Total School Budget", "Per Student Budget",
"Average Math Score", "Average Reading Score",
"% Math Passing", "% Reading Passing",
"% Overall Passing"]]
school_summary_df
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing)
###Code
#Sorts and displays the top five performing schools by % overall passing
topfive_Overall_df = school_summary_df.sort_values('% Overall Passing', ascending=False).head()
topfive_Overall_df
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing)
###Code
#Sorts and displays the five worst-performing schools by % overall passing
bottomfive_Overall_df = school_summary_df.sort_values('% Overall Passing', ascending=True).head()
bottomfive_Overall_df
###Output
_____no_output_____
###Markdown
Math Scores by Grade
###Code
#Uses merged data as base df
#Narrows down df by selecting columns of interest and set index to the name of school
bygrade_df = school_data_complete[["school_name", "grade", "math_score", "reading_score"]]
indexed_bygrade = bygrade_df.set_index(["school_name"])
#Filters df for 9th grade only
nineth_students = (indexed_bygrade["grade"] == "9th")
nineth_scores = indexed_bygrade.loc[nineth_students]
#Creates series for average math scores for 9th graders
avgMath_ninth = nineth_scores.groupby(["school_name"])['math_score'].mean()
#Filters df for 10th grade only
tenth_students = (indexed_bygrade["grade"] == "10th")
tenth_scores = indexed_bygrade.loc[tenth_students]
#Creates series for average math scores for 10th graders
avgMath_tenth = tenth_scores.groupby(["school_name"])['math_score'].mean()
#Filters df for 11th grade only
eleventh_students = (indexed_bygrade["grade"] == "11th")
eleventh_scores = indexed_bygrade.loc[eleventh_students]
#Creates series for average math scores for 11th graders
avgMath_eleventh = eleventh_scores.groupby(["school_name"])['math_score'].mean()
#Filters df for 12th grade only
twelveth_students = (indexed_bygrade["grade"] == "12th")
twelveth_scores = indexed_bygrade.loc[twelveth_students]
#Creates series for average math scores for 12th graders
avgMath_twelveth = twelveth_scores.groupby(["school_name"])['math_score'].mean()
#Creates final dataframe summary for Math Scores by Grade
Math_byGrade = pd.DataFrame({
"9th": avgMath_ninth,
"10th": avgMath_tenth,
"11th": avgMath_eleventh,
"12th": avgMath_twelveth
})
Math_byGrade.index.names = ['']
Math_byGrade
###Output
_____no_output_____
###Markdown
Reading Score by Grade
###Code
#Creates series for average Reading scores for 9th graders
avgReading_ninth = nineth_scores.groupby(["school_name"])['reading_score'].mean()
#Creates series for average Reading scores for 10th graders
avgReading_tenth = tenth_scores.groupby(["school_name"])['reading_score'].mean()
#Creates series for average Reading scores for 11th graders
avgReading_eleventh = eleventh_scores.groupby(["school_name"])['reading_score'].mean()
#Creates series for average Reading scores for 12th graders
avgReading_twelveth = twelveth_scores.groupby(["school_name"])['reading_score'].mean()
#Creates final dataframe summary for Reading Scores by Grade
Reading_byGrade = pd.DataFrame({
"9th": avgReading_ninth,
"10th": avgReading_tenth,
"11th": avgReading_eleventh,
"12th": avgReading_twelveth
})
Reading_byGrade.index.names = ['']
Reading_byGrade
###Output
_____no_output_____
###Markdown
Scores by School Spending
###Code
#Uses school_summary_df as base df and deletes columns that are not needed
school_spending_df = school_summary_df.drop(["Total Students", "Total School Budget"], axis=1)
#Creates a series that contains the list of bin data (Per Student Budget)
#Convert values from strings to float since values need to be a float to bin
cut_series = school_spending_df["Per Student Budget"].astype(float)
#Creates list of breakpoints & bin labels (Per Student Budget: min=578, max=655)
bin_breakpoints = [0, 584.9, 609.9, 634.9, 659.9]
bin_labels = ["<$585", "$585-610", "$610-635", "$635-660"]
#Adds the new series above (cut_series) to the current df as a new column
school_spending_df["School Spending (per Student)"] = pd.cut(
x=cut_series,
bins=bin_breakpoints,
labels=bin_labels,
include_lowest=True
)
#Creates df grouped by average budget per student
avg_budget_df = school_spending_df.groupby(["School Spending (per Student)"]).mean()
#Fixes formatting
avg_budget_df['Average Math Score'] = avg_budget_df['Average Math Score'].map('{:.2f}'.format)
avg_budget_df['Average Reading Score'] = avg_budget_df['Average Reading Score'].map('{:.2f}'.format)
avg_budget_df['% Math Passing'] = avg_budget_df['% Math Passing'].map('{:.2f}'.format)
avg_budget_df['% Reading Passing'] = avg_budget_df['% Reading Passing'].map('{:.2f}'.format)
avg_budget_df['% Overall Passing'] = avg_budget_df['% Overall Passing'].map('{:.2f}'.format)
avg_budget_df
###Output
_____no_output_____
###Markdown
Scores by School Size
###Code
#Creates a series that contains the list of bin data (Total Students)
cut_series2 = school_summary_df["Total Students"]
#list of breakpoints or bins to fill & labels
bin_ranges = [0, 999.9, 1999.9, 4999.9]
size_labels = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
#Adds the new series above (cut_series2) to the current df as a new column
school_summary_df["School Size"] = pd.cut(
x=cut_series2,
bins=bin_ranges,
labels=size_labels,
include_lowest=True
)
#Creates df grouped by average school size
avg_schoolsize_df = school_summary_df.groupby(["School Size"]).mean()
avg_schoolsize_df.drop(["Total Students"], axis=1)
###Output
_____no_output_____
###Markdown
Scores by School Type
###Code
#Creates df grouped by type of school
school_type = school_summary_df.groupby(["School Type"]).mean()
#Deletes column not needed for summary
school_type.drop(["Total Students"], axis=1)
###Output
_____no_output_____
###Markdown
PyCitySchools Analysis
###Code
# Dependencies and Setup
import pandas as pd
import os
# File to Load (Remember to Change These)
school_data_to_load = os.path.join('Resources', 'schools_complete.csv')
student_data_to_load = os.path.join('Resources', 'students_complete.csv')
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head()
###Output
_____no_output_____
###Markdown
District Summary
###Code
# Calculate the total number of schools
total_schools = school_data_complete['school_name'].nunique()
# Calculate the total number of students
total_students = school_data_complete['Student ID'].nunique()
# Calculate the total budget
total_budget = school_data['budget'].sum()
# Calculate the average math score
math_score_avg = school_data_complete['math_score'].mean()
# Calculate the average reading score
reading_score_avg = school_data_complete['reading_score'].mean()
# Calculate the percentage of students with a passing math score (70 or greater)
passing_math_pct = (((student_data['math_score'] >= 70).sum())/total_students)*100
# Calculate the percentage of students with a passing reading score (70 or greater)
passing_reading_pct = (((student_data['reading_score'] >= 70).sum())/total_students)*100
# Calculate the percentage of students who passed math and reading (% Overall Passing)
overall_passing = ((((student_data['reading_score'] >= 70) & (student_data['math_score'] >= 70)).sum())/total_students)*100
# Create a dataframe to hold the above results
district_summary_header = ['Total Schools', 'Total Students', 'Total Budget', 'Average Math Score',
'Average Reading Score', '% Passing Math', '% Passing Reading', '% Overall Passing Rate']
district_summary_values = [total_schools, total_students, total_budget, math_score_avg, reading_score_avg,
passing_math_pct, passing_reading_pct, overall_passing]
district_summary_df = pd.DataFrame([district_summary_values], columns=district_summary_header)
# Formatting
district_summary_df = district_summary_df.round(2)
district_summary_df['Total Budget'] = district_summary_df['Total Budget'].map("${:,.0f}".format)
district_summary_df
###Output
_____no_output_____
###Markdown
School Summary
###Code
# Determine the school name and type
school_type = school_data.set_index(['school_name'])['type']
# Determine total students per school
per_school_students = school_data_complete['school_name'].value_counts()
# Determine total school budget
per_school_budget = school_data_complete.groupby(['school_name']).mean()['budget']
# Determine per student budget
per_student_budget = per_school_budget/per_school_students
# Calculate average math score
per_school_math_score = school_data_complete.groupby(['school_name']).mean()['math_score']
# Calculate average reading score
per_school_reading_score = school_data_complete.groupby(['school_name']).mean()['reading_score']
# Calculate % passing math
per_school_passing_math_pct = (school_data_complete[school_data_complete['math_score'] >=70].groupby(['school_name'])['student_name'].count()/per_school_students)*100
# Calculate % passing reading
per_school_passing_reading_pct = (school_data_complete[school_data_complete['reading_score'] >=70].groupby(['school_name'])['student_name'].count()/per_school_students)*100
# Calculate % overall passing (the percentage of student that passed math and reading)
per_school_overall_passing = (school_data_complete[(school_data_complete['math_score'] >=70)&(school_data_complete['reading_score'] >=70)].groupby(['school_name'])['student_name'].count()/per_school_students)*100
# Create a dataframe for school summary
school_summary_df = pd.DataFrame({'School Type': school_type,
'Total Students': per_school_students,
'Total School Budget': per_school_budget,
'Per Student Budget': per_student_budget,
'Average Math Score': per_school_math_score,
'Average Reading Score': per_school_reading_score,
'% Passing Math': per_school_passing_math_pct,
'% Passing Reading': per_school_passing_reading_pct,
'% Overall Passing Rate': per_school_overall_passing})
# Formatting
school_summary_df['Total School Budget'] = school_summary_df['Total School Budget'].map("${:,.2f}".format)
school_summary_df['Per Student Budget'] = school_summary_df['Per Student Budget'].map("${:,.2f}".format)
school_summary_df = school_summary_df.round(2)
school_summary_df
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing)
###Code
top_perf_schools = school_summary_df.sort_values(['% Overall Passing Rate'], ascending=False)
top_perf_schools.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing)
###Code
top_perf_schools = school_summary_df.sort_values(['% Overall Passing Rate'], ascending=True)
top_perf_schools.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade
###Code
# Create a pandas series for each grade using a conditional statement
ninth_grade = school_data_complete[(school_data_complete['grade'] == '9th')]
tenth_grade = school_data_complete[(school_data_complete['grade'] == '10th')]
eleventh_grade = school_data_complete[(school_data_complete['grade'] == '11th')]
twelfth_grade = school_data_complete[(school_data_complete['grade'] == '12th')]
# Group each series by school
ninth_grade_math = ninth_grade.groupby(['school_name']).mean()['math_score']
tenth_grade_math = tenth_grade.groupby(['school_name']).mean()['math_score']
eleventh_grade_math = eleventh_grade.groupby(['school_name']).mean()['math_score']
twelfth_grade_math = twelfth_grade.groupby(['school_name']).mean()['math_score']
# Combine the series into a dataframe
math_scores_per_grade_df = pd.DataFrame({'9th': ninth_grade_math,
'10th': tenth_grade_math,
'11th': eleventh_grade_math,
'12th': twelfth_grade_math})
# Formatting
math_scores_per_grade_df = math_scores_per_grade_df.round(2)
math_scores_per_grade_df.index.name = None
math_scores_per_grade_df
###Output
_____no_output_____
###Markdown
Reading Score by Grade
###Code
# Group each series by school
ninth_grade_reading = ninth_grade.groupby(['school_name']).mean()['reading_score']
tenth_grade_reading = tenth_grade.groupby(['school_name']).mean()['reading_score']
eleventh_grade_reading = eleventh_grade.groupby(['school_name']).mean()['reading_score']
twelfth_grade_reading = twelfth_grade.groupby(['school_name']).mean()['reading_score']
# Combine the series into a dataframe
reading_scores_per_grade_df = pd.DataFrame({'9th': ninth_grade_reading,
'10th': tenth_grade_reading,
'11th': eleventh_grade_reading,
'12th': twelfth_grade_reading})
# Formatting
reading_scores_per_grade_df = reading_scores_per_grade_df.round(2)
reading_scores_per_grade_df.index.name = None
reading_scores_per_grade_df
###Output
_____no_output_____
###Markdown
Scores by School Spending
###Code
# Establish the bins
bins = [0, 585, 630, 645, 680]
bins_group_names = ['<$585','$585-615', '$615-645', '$645-680']
# Calculate breaks down school performances based on average Spending Ranges
school_summary_df['Spending Ranges (Per Student)'] = pd.cut(per_student_budget, bins, labels=bins_group_names)
spending_avg_math_score = school_summary_df.groupby(['Spending Ranges (Per Student)']).mean()['Average Math Score']
spending_avg_reading_score = school_summary_df.groupby(['Spending Ranges (Per Student)']).mean()['Average Reading Score']
spending_pct_passing_math = school_summary_df.groupby(['Spending Ranges (Per Student)']).mean()['% Passing Math']
spending_pct_passing_reading = school_summary_df.groupby(['Spending Ranges (Per Student)']).mean()['% Passing Reading']
spending_overall_passing_rate = school_summary_df.groupby(['Spending Ranges (Per Student)']).mean()['% Overall Passing Rate']
# Create a dataframe for scores by school spending
scores_school_spending_df = pd.DataFrame({'Average Math Score' : spending_avg_math_score,
'Average Reading Score': spending_avg_reading_score,
'% Passing Math': spending_pct_passing_math,
'% Passing Reading': spending_pct_passing_reading,
'% Overall Passing Rate': spending_overall_passing_rate})
# Formatting
scores_school_spending_df = scores_school_spending_df.round(2)
scores_school_spending_df
###Output
_____no_output_____
###Markdown
Scores by School Size
###Code
# Establish the bins
bins = [0, 1000, 2000, 5000]
bins_group_names = ['Small (<1000)','Medium (1000-2000)', 'Large (2000-5000)']
# Calculate breaks down school performances based on school size
school_summary_df['School Size'] = pd.cut(school_summary_df['Total Students'], bins, labels=bins_group_names)
school_size_avg_math_score = school_summary_df.groupby(['School Size']).mean()['Average Math Score']
school_size_avg_reading_score = school_summary_df.groupby(['School Size']).mean()['Average Reading Score']
school_size_pct_passing_math = school_summary_df.groupby(['School Size']).mean()['% Passing Math']
school_size_pct_passing_reading = school_summary_df.groupby(['School Size']).mean()['% Passing Reading']
school_size_overall_passing_rate = school_summary_df.groupby(['School Size']).mean()['% Overall Passing Rate']
# Create a dataframe for scores by school size
scores_school_size_df = pd.DataFrame({'Average Math Score' : school_size_avg_math_score,
'Average Reading Score': school_size_avg_reading_score,
'% Passing Math': school_size_pct_passing_math,
'% Passing Reading': school_size_pct_passing_reading,
'% Overall Passing Rate': school_size_overall_passing_rate})
# Formatting
scores_school_size_df = scores_school_size_df.round(2)
scores_school_size_df
###Output
_____no_output_____
###Markdown
Scores by School Type
###Code
# Calculate breaks down school performances based on school type
school_type_avg_math_score = school_summary_df.groupby(['School Type']).mean()['Average Math Score']
school_type_avg_reading_score = school_summary_df.groupby(['School Type']).mean()['Average Reading Score']
school_type_pct_passing_math = school_summary_df.groupby(['School Type']).mean()['% Passing Math']
school_type_pct_passing_reading = school_summary_df.groupby(['School Type']).mean()['% Passing Reading']
school_type_overall_passing_rate = school_summary_df.groupby(['School Type']).mean()['% Overall Passing Rate']
# Create a dataframe for scores by school type
scores_school_type_df = pd.DataFrame({'Average Math Score': school_type_avg_math_score,
'Average Reading Score': school_type_avg_reading_score,
'% Passing Math': school_type_pct_passing_math,
'% Passing Reading': school_type_pct_passing_reading,
'% Overall Passing Rate': school_type_overall_passing_rate})
# Formatting
scores_school_type_df = scores_school_type_df.round(2)
scores_school_type_df
###Output
_____no_output_____
###Markdown
District Summary Total Number of Schools in a District
###Code
#Sets the index to type
District_df = school_data_complete.set_index("type")
#Filters type to look at District Schools only & returns school names without duplicates
Distr_schools = District_df.loc["District", "school_name"].unique()
#Returns the total counts for District only
total_District_schools = len(Distr_schools)
###Output
_____no_output_____
###Markdown
Total Number of Students in a District
###Code
#Filters type to look at District Schools only & returns size (# of students in each school) without duplicates
Distr_students = District_df.loc["District","size"].unique()
#Calculates the total number of students in all District school
total_District_students = Distr_students.sum()
###Output
_____no_output_____
###Markdown
Total District Schools Budget
###Code
#Filters District row and budget column only
Dist_Budget = District_df.loc["District", "budget"].unique()
#Calculates the total budget for District
total_District_budget = Dist_Budget.sum()
###Output
_____no_output_____
###Markdown
District Average Math and Reading Scores
###Code
#Creates a new df that is filtered by District row & Math/Reading scores columns
#Know data is correct if row counts matches total number of District student
District_Scores_df = District_df.loc[[ "District"], ["math_score", "reading_score"]]
#Calculates the average of Math scores only
Math_Avg = District_Scores_df["math_score"].mean()
#Calculates the average of Reading scores only
Reading_Avg = District_Scores_df["reading_score"].mean()
###Output
_____no_output_____
###Markdown
District Students with Passing Math and Reading Scores
###Code
#Defines, filters passing MATH scores (only math scores >= 70 in the df) & creates new filtered df
passing_Mscore = District_Scores_df["math_score"] >= 70
passing_math_df = District_Scores_df.loc[passing_Mscore, ["math_score"]]
#Returns the number of students with passing Math scores
Math_passers = passing_math_df["math_score"].count()
#Defines, filters passing READING scores (only reading scores >= 70 in the df) & creates new filtered df
passing_Rscore = District_Scores_df["reading_score"] >= 70
passing_reading_df = District_Scores_df.loc[passing_Rscore, ["reading_score"]]
#Returns the number of students with passing Reading scores
Reading_passers = passing_reading_df["reading_score"].count()
#Defines and filters number of students passing MATH AND READING (only math & reading scores >= 70 in the df) then creating a new filtered df
pass_math_AND_reading = passing_Mscore & passing_Rscore
studentspass_MandR_df = District_Scores_df.loc[pass_math_AND_reading, :]
#Returns the number of students with passing Math & Reading scores
Math_Reading_passers = studentspass_MandR_df["math_score"].count()
###Output
_____no_output_____
###Markdown
Percentage of District Students Passing Math & Reading
###Code
#Calculates percentage of students with MATH Passing scores
pct_Math_passing = (Math_passers / total_District_students) * 100
#Calculates percentage of students with READING Passing scores
pct_Reading_passing = (Reading_passers / total_District_students) * 100
#Calculates percentage of students with MATH & READING Passing scores
pct_MathReading_passing = (Math_Reading_passers / total_District_students) * 100
#Creates a dictionary of District results
District = {
"Total Schools": [total_District_schools],
"Total Students": [total_District_students],
"Total Budget": [total_District_budget],
"Average Math Score": [Math_Avg],
"Average Reading Score": [Reading_Avg],
"% Math Passing": [pct_Math_passing],
"% Reading Passing": [pct_Reading_passing],
"% Overall Passing": [pct_MathReading_passing]
}
#Creates summary dataframe that includes dictionary of District results
District_Summary_df = pd.DataFrame(District)
#Fixes formatting of total students and total budget values
District_Summary_df['Total Budget'] = District_Summary_df['Total Budget'].map('${:,.2f}'.format)
District_Summary_df['Total Students'] = District_Summary_df['Total Students'].map('{:,.0f}'.format)
District_Summary_df.index = [' ']
District_Summary_df
###Output
_____no_output_____
###Markdown
School Summary
###Code
#Creates a dataframe with selected columns of interest from the merged data
allschools_df = school_data_complete[["Student ID","school_name", "type", "size", "budget", "math_score", "reading_score"]]
###Output
_____no_output_____
###Markdown
Per Student Budget
###Code
#Creates a new column in the df above that includes the calculated amount of budget per student
allschools_df["Per Student Budget"] = allschools_df["budget"] / allschools_df["size"]
###Output
C:\Users\jabuk\Anaconda3\lib\site-packages\ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
###Markdown
School Students with Passing Scores in Math, Reading and Overall (Math & Reading)
###Code
#Total number of students (to be used in calculating % Math, % Reading & Overall Passing)
#Creates a df grouped by school name with total number of students
student_total = allschools_df.groupby(['school_name'])["Student ID"].count()
#Filters df with MATH passers (only include math scores >= 70) from all schools
schoolMath_passing = allschools_df.loc[allschools_df["math_score"] >= 70]
#Creates a df grouped by school name with total number of students, who PASSED MATH ONLY
group_MathPassers = schoolMath_passing.groupby(['school_name'])['Student ID'].count()
#Filters df with READING passers (only include reading scores >= 70) from all schools
schoolReading_passing = allschools_df.loc[allschools_df["reading_score"] >= 70]
#Creates a df grouped by school name with total number of students, who PASSED READING ONLY
group_ReadingPassers = schoolReading_passing.groupby(['school_name'])['Student ID'].count()
#Creates a variable that includes students with MATH and READING passing scores
MathReading_pass_scores = (allschools_df["math_score"] >= 70) & (allschools_df["reading_score"] >= 70)
#Filters df with MATH & READING passers (only include math & reading scores >= 70) from all schools
schoolMathReading_passing = allschools_df.loc[MathReading_pass_scores]
#Creates a df grouped by school name with total number of students, who PASSED MATH & READING
group_MathReading_Passers = schoolMathReading_passing.groupby(['school_name'])['Student ID'].count()
###Output
_____no_output_____
###Markdown
Passing Rates for Math, Reading & Overall
###Code
#Calculate Math, Reading & Overall passing rates
pct_schoolMath = (group_MathPassers / student_total)*100
pct_schoolReading = (group_ReadingPassers / student_total)*100
pct_schoolMathReading = (group_MathReading_Passers / student_total)*100
#Set up School Summary Dataframe & clean up stage
#Uses the base df created set in the very beginning for school summary only (allschools_df)
school_groups_df = allschools_df.groupby(["school_name"]).mean()
#Adds type back (disappeared after the groupby above since non-numeric column)
school_groups_df["School Type"] = allschools_df.groupby(["school_name"])['type'].min()
#Add columns for Passing Rates
school_groups_df["% Math Passing"] = pct_schoolMath
school_groups_df["% Reading Passing"] = pct_schoolReading
school_groups_df["% Overall Passing"] = pct_schoolMathReading
#Drops school ID (only needed to clarify that we are aggregating students)
school_groups_df = school_groups_df.drop(["Student ID"], axis=1)
#Renames some columns
school_groups_df = school_groups_df.rename(columns={"size":"Total Students",
"budget":"Total School Budget",
"math_score":"Average Math Score",
"reading_score":"Average Reading Score"})
#Removes index title
school_groups_df.index.names = ['']
#Fixes formatting of total budget, total students and per student budget values
school_groups_df['Total School Budget'] = school_groups_df['Total School Budget'].map('${:,.2f}'.format)
school_groups_df['Per Student Budget'] = school_groups_df['Per Student Budget'].map('{:.2f}'.format)
#Rearranged columns for final summary
school_summary_df = school_groups_df[["School Type", "Total Students",
"Total School Budget", "Per Student Budget",
"Average Math Score", "Average Reading Score",
"% Math Passing", "% Reading Passing",
"% Overall Passing"]]
school_summary_df
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing)
###Code
#Sorts and displays the top five performing schools by % overall passing
topfive_Overall_df = school_summary_df.sort_values('% Overall Passing', ascending=False).head()
topfive_Overall_df
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing)
###Code
#Sorts and displays the five worst-performing schools by % overall passing
bottomfive_Overall_df = school_summary_df.sort_values('% Overall Passing', ascending=True).head()
bottomfive_Overall_df
###Output
_____no_output_____
###Markdown
Math Scores by Grade
###Code
#Uses merged data as base df
#Narrows down df by selecting columns of interest and set index to the name of school
bygrade_df = school_data_complete[["school_name", "grade", "math_score", "reading_score"]]
indexed_bygrade = bygrade_df.set_index(["school_name"])
#Filters df for 9th grade only
nineth_students = (indexed_bygrade["grade"] == "9th")
nineth_scores = indexed_bygrade.loc[nineth_students]
#Creates series for average math scores for 9th graders
avgMath_ninth = nineth_scores.groupby(["school_name"])['math_score'].mean()
#Filters df for 10th grade only
tenth_students = (indexed_bygrade["grade"] == "10th")
tenth_scores = indexed_bygrade.loc[tenth_students]
#Creates series for average math scores for 10th graders
avgMath_tenth = tenth_scores.groupby(["school_name"])['math_score'].mean()
#Filters df for 11th grade only
eleventh_students = (indexed_bygrade["grade"] == "11th")
eleventh_scores = indexed_bygrade.loc[eleventh_students]
#Creates series for average math scores for 11th graders
avgMath_eleventh = eleventh_scores.groupby(["school_name"])['math_score'].mean()
#Filters df for 12th grade only
twelveth_students = (indexed_bygrade["grade"] == "12th")
twelveth_scores = indexed_bygrade.loc[twelveth_students]
#Creates series for average math scores for 12th graders
avgMath_twelveth = twelveth_scores.groupby(["school_name"])['math_score'].mean()
#Creates final dataframe summary for Math Scores by Grade
Math_byGrade = pd.DataFrame({
"9th": avgMath_ninth,
"10th": avgMath_tenth,
"11th": avgMath_eleventh,
"12th": avgMath_twelveth
})
Math_byGrade.index.names = ['']
Math_byGrade
###Output
_____no_output_____
###Markdown
Reading Score by Grade
###Code
#Creates series for average Reading scores for 9th graders
avgReading_ninth = nineth_scores.groupby(["school_name"])['reading_score'].mean()
#Creates series for average Reading scores for 10th graders
avgReading_tenth = tenth_scores.groupby(["school_name"])['reading_score'].mean()
#Creates series for average Reading scores for 11th graders
avgReading_eleventh = eleventh_scores.groupby(["school_name"])['reading_score'].mean()
#Creates series for average Reading scores for 12th graders
avgReading_twelveth = twelveth_scores.groupby(["school_name"])['reading_score'].mean()
#Creates final dataframe summary for Reading Scores by Grade
Reading_byGrade = pd.DataFrame({
"9th": avgReading_ninth,
"10th": avgReading_tenth,
"11th": avgReading_eleventh,
"12th": avgReading_twelveth
})
Reading_byGrade.index.names = ['']
Reading_byGrade
###Output
_____no_output_____
###Markdown
Scores by School Spending
###Code
#Uses school_summary_df as base df and deletes columns that are not needed
school_spending_df = school_summary_df.drop(["Total Students", "Total School Budget"], axis=1)
#Creates a series that contains the list of bin data (Per Student Budget)
#Convert values from strings to float since values need to be a float to bin
cut_series = school_spending_df["Per Student Budget"].astype(float)
#Creates list of breakpoints & bin labels (Per Student Budget: min=578, max=655)
bin_breakpoints = [0, 584.9, 609.9, 634.9, 659.9]
bin_labels = ["<$585", "$585-610", "$610-635", "$635-660"]
#Adds the new series above (cut_series) to the current df as a new column
school_spending_df["School Spending (per Student)"] = pd.cut(
x=cut_series,
bins=bin_breakpoints,
labels=bin_labels,
include_lowest=True
)
#Creates df grouped by average budget per student
avg_budget_df = school_spending_df.groupby(["School Spending (per Student)"]).mean()
#Fixes formatting
avg_budget_df['Average Math Score'] = avg_budget_df['Average Math Score'].map('{:.2f}'.format)
avg_budget_df['Average Reading Score'] = avg_budget_df['Average Reading Score'].map('{:.2f}'.format)
avg_budget_df['% Math Passing'] = avg_budget_df['% Math Passing'].map('{:.2f}'.format)
avg_budget_df['% Reading Passing'] = avg_budget_df['% Reading Passing'].map('{:.2f}'.format)
avg_budget_df['% Overall Passing'] = avg_budget_df['% Overall Passing'].map('{:.2f}'.format)
avg_budget_df
###Output
_____no_output_____
###Markdown
Scores by School Size
###Code
#Creates a series that contains the list of bin data (Total Students)
cut_series2 = school_summary_df["Total Students"]
#list of breakpoints or bins to fill & labels
bin_ranges = [0, 999.9, 1999.9, 4999.9]
size_labels = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
#Adds the new series above (cut_series2) to the current df as a new column
school_summary_df["School Size"] = pd.cut(
x=cut_series2,
bins=bin_ranges,
labels=size_labels,
include_lowest=True
)
#Creates df grouped by average school size
avg_schoolsize_df = school_summary_df.groupby(["School Size"]).mean()
avg_schoolsize_df.drop(["Total Students"], axis=1)
###Output
_____no_output_____
###Markdown
Scores by School Type
###Code
#Creates df grouped by type of school
school_type = school_summary_df.groupby(["School Type"]).mean()
#Deletes column not needed for summary
school_type.drop(["Total Students"], axis=1)
###Output
_____no_output_____
###Markdown
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
import numpy as np
import random
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head()
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math **and** reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
#Total number of schools and students by doing count
total_schools = school_data["school_name"].count()
total_students = student_data["student_name"].count()
#Total budget by doing sum of the budget
total_budget = school_data["budget"].sum()
#Calculating average math and reading scores by using mean
avg_math = student_data["math_score"].mean()
avg_reading = student_data["reading_score"].mean()
#Calculating math, reading, and total passing and finding the percentage by multiplying it by 100
math_passing = student_data.loc[student_data["math_score"] >= 70].count()['Student ID']
math_passing_per = (math_passing/total_students)*100
reading_passing = student_data.loc[student_data["reading_score"] >= 70].count()['Student ID']
reading_passing_per = (reading_passing/total_students)*100
total_passing = student_data.loc[(student_data["math_score"] >= 70) & (student_data["reading_score"] >= 70)].count()['Student ID']
total_passing_per = (total_passing/total_students)*100
#Putting all of the findings in a DataFrame
district_summary = pd.DataFrame({
"Total Schools": [total_schools],
"Total Students": [total_students],
"Total Budget": [total_budget],
"Average Math Score": [avg_math],
"Average Reading Score": [avg_reading],
"% Passing Math": [math_passing_per],
"% Passing Reading": [reading_passing_per],
"% Overall Passing": [total_passing_per]
})
#Adding all necessary formatting
district_summary['Total Students'] = district_summary['Total Students'].map("{:,}".format)
district_summary['Total Budget'] = district_summary['Total Budget'].map("${:,.2f}".format)
#district_summary['% Passing Math'] = district_summary['% Passing Math'].map("{:,.2f}".format)
district_summary
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results
###Code
#Finding school name, school type, total students, total budget per school and per student below by school name
by_school = school_data_complete.groupby('school_name')
school_type = by_school['type'].first()
student_per_school = by_school['Student ID'].count()
#print(student_per_school)
school_budget = by_school['budget'].mean()
student_budget = school_budget/student_per_school
#print(school_budget/student_per_school)
#Finding the average math and reading scores by each school using mean
average_math = by_school['math_score'].mean()
average_reading = by_school['reading_score'].mean()
#Finding the percent of passing for math, reading and overall using groupby
math_passing = school_data_complete.loc[school_data_complete["math_score"] >= 70].groupby('school_name')['Student ID'].count()/student_per_school
reading_passing = school_data_complete.loc[school_data_complete["reading_score"] >= 70].groupby('school_name')['Student ID'].count()/student_per_school
overall_passing = school_data_complete.loc[(school_data_complete["math_score"] >= 70) & (school_data_complete["reading_score"] >= 70)].groupby('school_name')['Student ID'].count()/student_per_school
#multiplying all of the passing percentages by 100 to move it over 2 decimal places
math_passing_total = (math_passing)*100
reading_passing_total = (reading_passing)*100
overall_passing_total = (overall_passing)*100
#Putting all the findings in a DataFrame
school_summary = pd.DataFrame({
"School Type": school_type,
"Total Students": student_per_school,
"Total School Budget": school_budget,
"Budget Per Student": student_budget,
"Average Math Score": average_math,
"Average Reading Score": average_reading,
"% Passing Math": math_passing_total,
"% Passing Reading": reading_passing_total,
"% Overall Passing": overall_passing_total
})
#Adding all necessary formatting
school_summary['Total School Budget'] = school_summary['Total School Budget'].map("${:,.2f}".format)
#school_summary['Budget Per Student'] = school_summary['Budget Per Student'].map("${:}".format)
#school_summary['Average Math Score'] = school_summary['Average Math Score'].map("{:,.2f}".format)
#school_summary['Average Reading Score'] = school_summary['Average Reading Score'].map("{:,.2f}".format)
#school_summary['% Passing Math'] = school_summary['% Passing Math'].map("{:,.2}".format)
#school_summary['% Passing Reading'] = school_summary['% Passing Reading'].map("{:,.2}".format)
#school_summary['% Overall Passing'] = school_summary['% Overall Passing'].map("{:,.2}".format)
school_summary
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing) * Sort and display the top five performing schools by % overall passing.
###Code
#Using the sort values function to sort the schools and display the top 5 performing schools
top_five_schools = school_summary.sort_values(['% Overall Passing'], ascending=[False])
top_five_schools.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing) * Sort and display the five worst-performing schools by % overall passing.
###Code
#Using the sort values function to sort the schools and display the bottom 5 performing schools
bottom_five_schools = school_summary.sort_values(['% Overall Passing'], ascending=[True])
bottom_five_schools.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
#Calculating the average math score for each grade by school
math_avg_9th = school_data_complete.loc[school_data_complete["grade"] == '9th'].groupby('school_name')['math_score'].mean()
math_avg_10th = school_data_complete.loc[school_data_complete["grade"] == '10th'].groupby('school_name')['math_score'].mean()
math_avg_11th = school_data_complete.loc[school_data_complete["grade"] == '11th'].groupby('school_name')['math_score'].mean()
math_avg_12th = school_data_complete.loc[school_data_complete["grade"] == '12th'].groupby('school_name')['math_score'].mean()
#Putting all findings in a DataFrame
math_by_grade = pd.DataFrame({"9th": math_avg_9th,
"10": math_avg_10th,
"11th": math_avg_11th,
"12th": math_avg_12th
})
math_by_grade
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
#Calculating the average reading score for each grade by school
reading_avg_9th = school_data_complete.loc[school_data_complete["grade"] == '9th'].groupby('school_name')['reading_score'].mean()
reading_avg_10th = school_data_complete.loc[school_data_complete["grade"] == '10th'].groupby('school_name')['reading_score'].mean()
reading_avg_11th = school_data_complete.loc[school_data_complete["grade"] == '11th'].groupby('school_name')['reading_score'].mean()
reading_avg_12th = school_data_complete.loc[school_data_complete["grade"] == '12th'].groupby('school_name')['reading_score'].mean()
#Putting all findings in a DataFrame
reading_by_grade = pd.DataFrame({"9th": reading_avg_9th,
"10": reading_avg_10th,
"11th": reading_avg_11th,
"12th": reading_avg_12th
})
reading_by_grade
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
#creating bins and labels
bins = [0,585, 630, 645, 675]
labels = ['$0-585', '$585-629', '$630-644', '$645-675']
#binning school summary from the column Budget Per Student and adding the new columns
school_summary["Spending Ranges"] = pd.cut(school_summary["Budget Per Student"], bins, labels= labels)
spending_groups = school_summary.loc[:, ["Spending Ranges", "Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing"]].groupby("Spending Ranges")
spending_groups.mean()
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
#creating bins and labels
bins = [0,1000, 2000, 5000]
labels = ['Small (<1000)', 'Medium (1000-2000)', 'Large (2000-5000)']
#binning school summary from the column Total Students and adding the new columns
school_summary["School Size"] = pd.cut(school_summary["Total Students"], bins, labels= labels)
spending_groups = school_summary.loc[:, ["School Size", "Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing"]].groupby("School Size")
spending_groups.mean()
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type
###Code
#creating a new variable to group by school type
school_type = school_summary
#converting it to a DataFrame
school_type = pd.DataFrame(school_type)
#grouping it by the school type and determining the average scores and passing scores per school type
school_type = school_summary.groupby(['School Type'])['Average Math Score',
'Average Reading Score',
'% Passing Math',
'% Passing Reading',
'% Overall Passing'
].mean()
school_type
###Output
/Users/mehamarathe/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:8: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
district_true = school_data_complete['type'] == 'District'
district_data = school_data_complete[district_true]
district_data.head()
# Make new dataframe and populate it with corresponding values
district_summary= pd.DataFrame([0])
# Calculate the total number of schools
# Calculate the total number of students
district_summary["Number of Schools"] = len(district_data['School ID'].value_counts())
district_summary["Number of Students"] = district_data['Student ID'].count()
# Calculate the total budget
budget_vals = district_data['budget'].unique()
district_summary["Total Budget"] = budget_vals.sum()
# Calculate the average math score
math_score = district_data["math_score"]
district_summary["Average Math Score"] = math_score.mean()
# Calculate the average reading score
reading_score = district_data["reading_score"]
district_summary["Average Reading Score"] = reading_score.mean()
# Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2
district_summary["Overall Average Score"] = (reading_score + math_score)/2
# Calculate the percentage of students with a passing math score (70 or greater)
math_score = district_data["math_score"]
district_summary["% Passing Math"] = (math_score >= 70).mean() * 100
# Calculate the percentage of students with a passing reading score (70 or greater)
passing_reading_score = district_data["reading_score"]
district_summary["% Passing Reading"] = (passing_reading_score >= 70).mean() * 100
district_summary = district_summary.drop([0], axis=1)
district_summary
###Output
_____no_output_____
###Markdown
School Summary* Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two) * Create a dataframe to hold the above results
###Code
# Create an overview table that summarizes key metrics about each school, including:
schools_summary= school_data_complete.drop(columns=['Student ID','student_name', 'gender', 'grade', 'School ID'])
schools_summary = schools_summary.groupby(['school_name', 'type']).mean()
schools_summary = schools_summary.reset_index(drop=False)
schools_summary = schools_summary.set_index('school_name')
# Total Students
# Total School Budget
# Per Student Budget
# Average Reading Score
schools_summary = schools_summary.rename(columns={"type": "School Type", "reading_score" : "Average Reading Score", "math_score"
: "Average Math Score", "size": "Total Students", "budget": "Total School Budget"})
budget = schools_summary['Total School Budget'].values
students = schools_summary['Total Students'].values
schools_summary['Per Student Budget'] = budget/students
# % Passing Math
schools_summary2 = school_data_complete
passing_math = school_data_complete.loc[schools_summary2['math_score']>69,:]
passing_math = passing_math.groupby('school_name').math_score.count().reset_index()
passing_math = passing_math.rename(columns={"math_score":"% Passing Math"})
# Merge the two dataframes
schools_summary = passing_math.merge(schools_summary, on="school_name")
schools_summary['% Passing Math'] = (schools_summary['% Passing Math'] / schools_summary['Total Students']) * 100
# % Passing Reading
schools_summary2 = school_data_complete
passing_reading = school_data_complete.loc[schools_summary2['reading_score']>69,:]
passing_reading = passing_reading.groupby('school_name').reading_score.count().reset_index()
passing_reading = passing_reading.rename(columns={"reading_score":"% Passing Reading"})
schools_summary = passing_reading.merge(schools_summary, on="school_name")
schools_summary['% Passing Reading'] = (schools_summary['% Passing Reading'] / schools_summary['Total Students']) * 100
# Overall Passing Rate (Average of the above two)
schools_summary['% Overall Passing'] = (schools_summary['% Passing Math'] + schools_summary['% Passing Reading']) / 2
schools_summary = schools_summary.set_index('school_name')
schools_summary = schools_summary.rename_axis("")
schools_summary
###Output
_____no_output_____
###Markdown
Top Performing Schools (By Passing Rate)* Sort and display the top five schools in overall passing rate
###Code
top_schools = schools_summary.sort_values(by='% Overall Passing', ascending=False).head()
top_schools = top_schools.rename_axis("")
top_schools
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By Passing Rate) * Sort and display the five worst-performing schools
###Code
bottom_schools = schools_summary.sort_values(by='% Overall Passing', ascending=True).head()
bottom_schools = bottom_schools.rename_axis("")
bottom_schools
###Output
_____no_output_____
###Markdown
Math Scores By Grade * Create a table that lists the average Math Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
# Create a table that displays each school's math grade by grade level
math_scores_by_grade = school_data_complete.drop(columns=['Student ID','student_name', 'gender', 'School ID', 'size', 'budget', 'reading_score'])
# Find averages
math_scores_by_grade = math_scores_by_grade.groupby(['school_name', 'grade']).mean()
# Reset index to make it more clear
math_scores_by_grade = math_scores_by_grade.reset_index(drop=False)
math_scores_by_grade = math_scores_by_grade.set_index('school_name')
# Pivot table to display grade index as columns
math_scores_by_grade = math_scores_by_grade.pivot(columns='grade', values='math_score')
math_scores_by_grade = math_scores_by_grade.rename_axis("", axis=0)
math_scores_by_grade = math_scores_by_grade.rename_axis("", axis=1)
math_scores_by_grade
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# Create a table that displays each school's reading grade by grade level
reading_scores_by_grade = school_data_complete.drop(columns=['Student ID','student_name', 'gender', 'School ID', 'size', 'budget', 'math_score'])
# Find averages
reading_scores_by_grade = reading_scores_by_grade.groupby(['school_name', 'grade']).mean()
# Reset index to make it more clear
reading_scores_by_grade = reading_scores_by_grade.reset_index(drop=False)
reading_scores_by_grade = reading_scores_by_grade.set_index('school_name')
# Pivot table to display grade index as columns
reading_scores_by_grade = reading_scores_by_grade.pivot(columns='grade', values='reading_score')
reading_scores_by_grade = reading_scores_by_grade.rename_axis("", axis=0)
reading_scores_by_grade = reading_scores_by_grade.rename_axis("", axis=1)
reading_scores_by_grade
###Output
_____no_output_____
###Markdown
Scores by School Spending* Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
school_spending = schools_summary[['Average Math Score', 'Average Reading Score', '% Passing Reading', '% Passing Math', '% Overall Passing', 'Per Student Budget']]
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$585-615", "$615-645", "$645-675"]
school_spending["Spending Ranges (Per Student)"] = pd.cut(school_spending["Per Student Budget"], spending_bins, labels=group_names)
school_spending = school_spending.drop(columns=['Per Student Budget'])
school_spending = school_spending.groupby(school_spending["Spending Ranges (Per Student)"], as_index=True)
# school_spending = school_spending.set_index('Spending Ranges (Per Student)').mean()
school_spending.mean()
###Output
C:\Users\megam\Anaconda3\envs\PythonData\lib\site-packages\ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
"""Entry point for launching an IPython kernel.
###Markdown
Scores by School Size* Perform the same operations as above, based on school size.
###Code
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
school_size = schools_summary[['Average Math Score', 'Average Reading Score', '% Passing Reading', '% Passing Math', '% Overall Passing', 'Total Students']]
school_size["Size"] = pd.cut(school_size["Total Students"], size_bins, labels=group_names)
school_size = school_size.drop(columns=['Total Students'])
school_size = school_size.groupby(school_size["Size"], as_index=True)
# school_size = school_size.set_index('Total Students').mean()
school_size.mean()
###Output
C:\Users\megam\Anaconda3\envs\PythonData\lib\site-packages\ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
"""Entry point for launching an IPython kernel.
###Markdown
Scores by School Type* Perform the same operations as above, based on school type.
###Code
schools_summary = schools_summary.rename_axis("school_name", axis=1)
schools_summary = schools_summary.reset_index()
school_type = schools_summary[['Average Math Score', 'Average Reading Score', '% Passing Reading', '% Passing Math', '% Overall Passing', 'school_name']]
df_to_merge = school_data_complete.drop(columns=['Student ID','student_name', 'gender', 'size', 'School ID', 'budget', 'reading_score', 'grade', 'math_score'])
school_type = schools_summary.merge(df_to_merge, on='school_name', how='inner', copy=False)
school_type = school_type.drop(columns=['index', 'Total Students', 'Total School Budget', 'School Type'] )
school_type = school_type.drop_duplicates()
school_type = school_type.reset_index(drop=True)
school_type
# Classification
school_type = school_type.groupby(school_type["type"], as_index=True).mean()
school_type = school_type.rename_axis("", axis=0)
school_type
###Output
_____no_output_____
###Markdown
PyCity Schools Analysis & Observations• Overall, all schools performed better in reading (100%) rather than Math (92%) since the average reading score is higher than the average math score at every school in the city. • The charter schools performed better at math and reading than the district schools • Performance did not correlate to amount spent per student, but to the size of the school and type. The smaller schools, which were all charter schools, performed better. • District schools performed far low across the board in math and reading scores than charter schools and the lowest scores were also observed among schools that spend the most per student, and the largest schools) • Higher Per Student Budget not necessary lead to a higher grade, for example Cabrera High School, who has the highest passing rate spent less than 600 dollar budget per student, and the bottom 5 school all have budget per student higher than 600 dollar. In addition, schools spent lower than 615 dollar per student has significant (15%) higher scores than over 615 dollar. The highest spending category 645-675 got the lowest score. • School type matters. All Top 5 school are Charter school, and 5 Bottom schools are District Schools. Overall variance between these two on passing rate are 23%, Charter schools are higher. Charter school performed especially well in math score. • Larger size schools doesn’t show good performance in score, compared to median size and small size school. However once the school size goes below 2000 students, the change are not significant. Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head()
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math **and** reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
#Calculate the total number of schools
num_of_schools = school_data['school_name'].count()
#print(num_of_schools )
#Calculate the total number of students
num_of_students = student_data['Student ID'].count()
#print(num_of_students)
#Calculate the total budget
total_budget = school_data['budget'].sum()
#print(total_budget)
#Calculate the average math score
avg_math_score = school_data_complete['math_score'].mean()
#print(avg_math_score)
#Calculate the average reading score
avg_reading_score = school_data_complete['reading_score'].mean()
#print(avg_reading_score)
#Calculate the percentage of students with a passing math score (70 or greater)
pass_math = school_data_complete[(school_data_complete['math_score'] >= 70)].count() ['student_name']
#print(pass_math)
math_percent = (pass_math / float(num_of_students))*100
#print(math_percent)
#Calculate the percentage of students with a passing reading score (70 or greater)
pass_reading = school_data_complete[(school_data_complete['reading_score'] >= 70)].count() ['student_name']
#print(pass_reading)
reading_percent = (pass_reading / float(num_of_students))*100
#print(reading_percent)
#Calculate the percentage of students who passed math **and** reading (% Overall Passing)
pass_math_reading = school_data_complete[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >= 70) ].count() ['student_name']
#print(pass_math_reading)
math_reading_percent = (pass_math_reading / float(num_of_students))*100
#print(math_reading_percent)
###Output
_____no_output_____
###Markdown
The brackets are unnecessary when referencing variables - this is in the dictionary for district_summary.ValueError: If using all scalar values, you must pass an index
###Code
#Create a dataframe to hold the above results
#Optional: give the displayed data cleaner formatting
# district_summary = pd.DataFrame ({'total_schools': [num_of_schools],'total_students': [num_of_students],
# 'total_budget': [total_budget], 'avg_math_score': [avg_math_score],
# 'avg_reading_score': [avg_reading_score],'percentage_pass_math': [math_percent],
# 'percentage_pass_reading': [reading_percent], 'overall pass percent': [math_reading_percent]
# })
district_summary = pd.DataFrame({'total_schools': num_of_schools,'total_students': num_of_students,
'total_budget': total_budget, 'avg_math_score': avg_math_score,
'avg_reading_score': avg_reading_score,'percentage_pass_math': math_percent,
'percentage_pass_reading': reading_percent, 'overall pass percent': math_reading_percent}
,index =[0] )
district_summary['total_students'] = district_summary['total_students'].map("{:,}".format)
district_summary['total_budget'] = district_summary['total_budget'].map("${:,.2f}".format)
district_summary
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results Why do you set the school type for school_type to the index?school_type = school_data['type'] - when i tried like this i got all NAN values in the rows from 0-14. So i thought i need to set the index.
###Code
#School Summary - School name
school_summary = school_data_complete.groupby("school_name")
#print(school_summary["school_name"].unique())
#school Type
school_type = school_data.set_index(["school_name"])['type']
#school_type = school_data['type']
#print(school_type)
#Total number of students per school
total_students = school_data_complete.groupby(["school_name"]).count()['Student ID']
#print(total_students)
#Total School Budget
total_school_budget = school_data_complete.groupby(["school_name"]).mean()['budget']
#print(total_school_budget)
#Per Student Budget
per_student_budget = total_school_budget/total_students
#print(per_student_budget)
#Average Math score and Passing Percecntage
avg_math_score_per_student = school_summary['math_score'].mean()
#print(avg_math_score_per_student)
passing_math = school_data_complete[(school_data_complete['math_score'] >= 70)]
#print(passing_math)
percent_passing_math = (passing_math.groupby(["school_name"]).count()['Student ID'] / total_students)*100
#print(percent_passing_math)
#Average Reading score and Passing Percentage
avg_reading_score_per_student = school_summary['reading_score'].mean()
#print(avg_reading_score_per_student)
passing_reading = school_data_complete[(school_data_complete['reading_score'] >= 70)]
#print(passing_reading)
percent_passing_reading = (passing_reading.groupby(["school_name"]).count()['Student ID'] / total_students)*100
#print(percent_passing_reading)
#Overall Passing Percentage
overall_passing = school_data_complete[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >= 70)]
#print(overall_passing)
overall_passing_percent = (overall_passing.groupby(["school_name"]).count()['Student ID'] / total_students)*100
#print(overall_passing_percent)
schools_summary = pd.DataFrame ({'School Type': school_type,'Total students': total_students,
'Total School Budget': total_school_budget,
'Per Student Budget': per_student_budget,
'Average Math Score': avg_math_score_per_student,
'Average Reading Score': avg_reading_score_per_student,
'% Passing Math': percent_passing_math,
'% Passing Reading': percent_passing_reading,
'% Overall Passing': overall_passing_percent
})
schools_summary['Total School Budget'] = schools_summary['Total School Budget'].map("${:,.2f}".format)
schools_summary['Per Student Budget'] = schools_summary['Per Student Budget'].map("${:.2f}".format)
schools_summary
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing) * Sort and display the top five performing schools by % overall passing.
###Code
top_performing = schools_summary.sort_values("% Overall Passing", ascending = False)
top_performing.head()
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing) * Sort and display the five worst-performing schools by % overall passing.
###Code
bottom_performing = schools_summary.sort_values("% Overall Passing")
bottom_performing.head()
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
ninth_grade_math = student_data.loc[student_data['grade'] == '9th'].groupby('school_name')["math_score"].mean()
tenth_grade_math = student_data.loc[student_data['grade'] == '10th'].groupby('school_name')["math_score"].mean()
eleventh_grade_math = student_data.loc[student_data['grade'] == '11th'].groupby('school_name')["math_score"].mean()
twelvth_grade_math = student_data.loc[student_data['grade'] == '12th'].groupby('school_name')["math_score"].mean()
math_scores_grade = pd.DataFrame({
"9th": ninth_grade_math,
"10th": tenth_grade_math,
"11th": eleventh_grade_math,
"12th": twelvth_grade_math
})
math_scores_grade.head(15)
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
ninth_grade_reading = student_data.loc[student_data['grade'] == '9th'].groupby('school_name')["reading_score"].mean()
tenth_grade_reading = student_data.loc[student_data['grade'] == '10th'].groupby('school_name')["reading_score"].mean()
eleventh_grade_reading = student_data.loc[student_data['grade'] == '11th'].groupby('school_name')["reading_score"].mean()
twelvth_grade_reading = student_data.loc[student_data['grade'] == '12th'].groupby('school_name')["reading_score"].mean()
reading_scores_grade = pd.DataFrame({
"9th": ninth_grade_reading,
"10th": tenth_grade_reading,
"11th": eleventh_grade_reading,
"12th": twelvth_grade_reading
})
reading_scores_grade.head(15)
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
bins = [0,585,630,645,675]
group_names = ["< $585","$585 - $629","$630 - $644","$645 - $675"]
school_data_complete['Spending Ranges (Per Student)'] = pd.cut(school_data_complete['budget']/school_data_complete['size'], bins, labels = group_names)
score_by_budget = school_data_complete.groupby('Spending Ranges (Per Student)')
avg_math = score_by_budget['math_score'].mean()
avg_read = score_by_budget['reading_score'].mean()
pass_math = school_data_complete[school_data_complete['math_score'] >= 70].groupby('Spending Ranges (Per Student)')['Student ID'].count()/score_by_budget['Student ID'].count() * 100
pass_read = school_data_complete[school_data_complete['reading_score'] >= 70].groupby('Spending Ranges (Per Student)')['Student ID'].count()/score_by_budget['Student ID'].count() * 100
overall = school_data_complete[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >= 70)].groupby('Spending Ranges (Per Student)')['Student ID'].count()/score_by_budget['Student ID'].count() * 100
scores_by_budget = pd.DataFrame({
"Average Math Score": avg_math,
"Average Reading Score": avg_read,
"% Passing Math": pass_math,
"% Passing Reading": pass_read,
"% Overall Passing": overall
})
scores_by_budget['Average Math Score'] = scores_by_budget['Average Math Score'].map("{:,.2f}".format)
scores_by_budget['Average Reading Score'] = scores_by_budget['Average Reading Score'].map("{:,.2f}".format)
scores_by_budget['% Passing Math'] = scores_by_budget['% Passing Math'].map("{:,.2f}".format)
scores_by_budget['% Passing Reading'] = scores_by_budget['% Passing Reading'].map("{:,.2f}".format)
scores_by_budget['% Overall Passing'] = scores_by_budget['% Overall Passing'].map("{:,.2f}".format)
scores_by_budget
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
bins = [0, 1000, 2000, 5000]
group_names = ["Small(<1000)", "Medium (1000 - 2000)" , "Large (2000 - 5000)"]
school_data_complete['School Size'] = pd.cut(school_data_complete['size'], bins, labels = group_names)
score_by_size = school_data_complete.groupby('School Size')
avg_math = score_by_size['math_score'].mean()
avg_read = score_by_size['reading_score'].mean()
pass_math = school_data_complete[school_data_complete['math_score'] >= 70].groupby('School Size')['Student ID'].count()/score_by_size['Student ID'].count() * 100
pass_read = school_data_complete[school_data_complete['reading_score'] >= 70].groupby('School Size')['Student ID'].count()/score_by_size['Student ID'].count() * 100
overall = school_data_complete[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >= 70)].groupby('School Size')['Student ID'].count()/score_by_size['Student ID'].count() * 100
scores_by_size = pd.DataFrame({
"Average Math Score": avg_math,
"Average Reading Score": avg_read,
"% Passing Math": pass_math,
"% Passing Reading": pass_read,
"% Overall Passing ": overall
})
scores_by_size
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type
###Code
score_by_type = school_data_complete.groupby('type')
avg_math = score_by_type['math_score'].mean()
avg_read = score_by_type['reading_score'].mean()
pass_math = school_data_complete[school_data_complete['math_score'] >= 70].groupby('type')['Student ID'].count()/score_by_type['Student ID'].count() * 100
pass_read = school_data_complete[school_data_complete['reading_score'] >= 70].groupby('type')['Student ID'].count()/score_by_type['Student ID'].count() * 100
overall = school_data_complete[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >= 70)].groupby('type')['Student ID'].count()/score_by_type['Student ID'].count() * 100
scores_by_type = pd.DataFrame({
"Average Math Score": avg_math,
"Average Reading Score": avg_read,
"% Passing Math": pass_math,
"% Passing Reading": pass_read,
"% Overall Passing": overall})
scores_by_type.index.names = ['School Type']
scores_by_type
###Output
_____no_output_____
###Markdown
Option 2: Academy of Py District Summary* Create a high level snapshot (in table form) of the district's key metrics, including: * Total Schools * Total Students * Total Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# import dependencies
import pandas as pd
# create path for file
schools_file = "./Resources/schools_complete.csv"
students_file = "./Resources/students_complete.csv"
# read csv and store in Pandas DataFrame
schools_df = pd.read_csv(schools_file)
students_df = pd.read_csv(students_file)
schools_df.head()
students_df.head()
# Calculate the total of schools in the Schools DataFrame
schools_total = len(schools_df["school_name"].unique())
# Calculate the total of students in the Schools DataFrame
students_total = len(students_df["Student ID"].unique())
# Calculate the Total Budget
budget_total = schools_df["budget"].sum()
# Calculate the Average Math Score
avg_math_score = round(students_df["math_score"].mean())
#Calculate the Average Math Score
avg_reading_score = round(students_df["reading_score"].mean())
#Find total of Students Passing Math (>69)
passing_math_ttl = students_df.loc[students_df["math_score"]>69].count()["student_name"]
# Calculate % of Students Passing Math
passing_math_pct = (passing_math_ttl/students_total)
#Find total of Students Passing Reading (>69)
passing_reading_ttl = students_df.loc[students_df["reading_score"]>69].count()["student_name"]
# Calculate % of Students Passing Reading
passing_reading_pct =(passing_reading_ttl/students_total)
# Calculate Overall Passing Rate (Average of the above two)
overall_passing = (passing_math_pct + passing_reading_pct)/2
# Create District Summary DataFrame using calculation to create a table showing a "high level snapshot"
district_summary = pd.DataFrame ({"Total Schools":[schools_total],
"Total Student":[students_total],
"Total Budget":[budget_total],
"Average Math Score":[avg_math_score],
"Average Reading Score":[avg_reading_score],
"% Passing Math":[passing_math_pct],
"% Passing Reading":[passing_reading_pct],
"Overall Passing Rate":[overall_passing]})
# Format Results in Table accordingly
district_summary = district_summary.style.format({
'Total Student': '{:,.0f}'.format,
'Total Budget':'${:,.0f}'.format,
'% Passing Math': '{:,.0%}'.format,
'% Passing Reading': '{:,.0%}'.format,
'Overall Passing Rate': '{:,.0%}'.format})
district_summary
###Output
_____no_output_____
###Markdown
School Summary* Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# Merge the two dataframes using full outer join
merged_data = pd.merge(students_df, schools_df, how = "left" , on= ["school_name"])
merged_data.head()
# Find Type using set_index
school_types = schools_df.set_index(["school_name"])["type"]
# Find Total Of Students per School using merged data value_counts
students_per_sch = (merged_data["school_name"].value_counts()).map("{:,.0f}".format)
# Find Budget using groupby mean of budget column
school_budget = merged_data.groupby(["school_name"]).mean()["budget"].map("${:,.0f}".format)
# Find Budget_Per_Student using school budget divided by # students per school
student_budget = ((merged_data.groupby(["school_name"]).mean()["budget"])
/(merged_data["school_name"].value_counts())).map("${:,.0f}".format)
# Find the Average Math Score for each school using groupby mean of math score column
avg_math = merged_data.groupby(["school_name"]).mean()["math_score"].map("{:,.0f}".format)
# Find the Average Reading Score for each school using groupby mean of reading score column
#avg_read = school_groupby["reading_score"].mean()
avg_read = merged_data.groupby(["school_name"]).mean()["reading_score"].map("{:,.0f}".format)
#Find total of Students Passing Math (>69)
passing_math = (merged_data[merged_data["math_score"]>69].groupby("school_name")["Student ID"].count()
/ (merged_data["school_name"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
math_unformatted = (merged_data[merged_data["math_score"]>69].groupby("school_name")["Student ID"].count()
/ (merged_data["school_name"].value_counts()))
#Find total of Students Passing Reading (>69)
passing_read = (merged_data[merged_data["reading_score"]>69].groupby("school_name")["Student ID"].count()
/ (merged_data["school_name"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
read_unformatted = (merged_data[merged_data["reading_score"]>69].groupby("school_name")["Student ID"].count()
/ (merged_data["school_name"].value_counts()))
# Calculate Overall Passing Rate (Average of the above two)
overall_pass = ((math_unformatted + read_unformatted) / 2).map("{:,.2%}".format)
# Create School Summary DataFrame using above calculations to create an overview table
school_summary = pd.DataFrame({"School Type": school_types,
"Total Students": students_per_sch,
"School Budget": school_budget,
"Per Student Budget" :student_budget,
"Avg Math Score": avg_math,
"Avg Reading Score": avg_read,
"% Passing Math": passing_math,
"% Passing Reading": passing_read,
"Overall Passing": overall_pass})
school_summary
###Output
_____no_output_____
###Markdown
Top Performing Schools (By Passing Rate)* Create a table that highlights the top 5 performing schools based on Overall Passing Rate. Include: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# Sort Overall Passing Column values in descending order
top_five = school_summary.sort_values("Overall Passing", ascending = False)
top_five.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By Passing Rate)* Create a table that highlights the bottom 5 performing schools based on Overall Passing Rate. Include all of the same metrics as above.
###Code
# Sort Overall Passing Column values in ascending order
top_five = school_summary.sort_values("Overall Passing", ascending = True)
top_five.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade* Create a table that lists the average Math Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
###Code
# Find Average Math Scores for each grade level at each school
# Use .loc to find grade, use .groupby to group by each school, use .mean to find the Average Math Score
math_9th = merged_data.loc[merged_data["grade"] == "9th"].groupby("school_name")["math_score"].mean()
# Repeat above for each grade
math_10th = merged_data.loc[merged_data["grade"] == "10th"].groupby("school_name")["math_score"].mean()
math_11th = merged_data.loc[merged_data["grade"] == "11th"].groupby("school_name")["math_score"].mean()
math_12th = merged_data.loc[merged_data["grade"] == "12th"].groupby("school_name")["math_score"].mean()
# Create Summary DataFrame using above calculations to create a table
math_scores_by_grade = pd.DataFrame ({"9th Grade": math_9th,
"10th Grade": math_10th,
"11th Grade": math_11th,
"12th Grade": math_12th})
# Format Results to limit decimal places
math_scores_by_grade = math_scores_by_grade.style.format({
"9th Grade": '{:,.2f}'.format,
"10th Grade":'{:,.2f}'.format,
"11th Grade": '{:,.2f}'.format,
"12th Grade": '{:,.2f}'.format,})
# StackOverflow trick to get titles on the same line
math_scores_by_grade.index.name = "Math Scores by Grade"
math_scores_by_grade.columns.name = math_scores_by_grade.index.name
math_scores_by_grade.index.name = None
math_scores_by_grade
###Output
_____no_output_____
###Markdown
Reading Scores by Grade* Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
###Code
# Use .loc to find grade, use .groupby to group by each school, use .mean to find the Average Reading Score
# Repeat for each grade
reading_9th = merged_data.loc[merged_data["grade"] == "9th"].groupby("school_name")["reading_score"].mean()
reading_10th = merged_data.loc[merged_data["grade"] == "10th"].groupby("school_name")["reading_score"].mean()
reading_11th = merged_data.loc[merged_data["grade"] == "11th"].groupby("school_name")["reading_score"].mean()
reading_12th = merged_data.loc[merged_data["grade"] == "12th"].groupby("school_name")["reading_score"].mean()
# Create Summary DataFrame using above calculations to create a table
reading_scores_by_grade = pd.DataFrame ({"9th Grade": reading_9th,
"10th Grade": reading_10th,
"11th Grade": reading_11th,
"12th Grade": reading_12th})
# Format Results to limit decimal places
reading_scores_by_grade = reading_scores_by_grade.style.format({
"9th Grade": '{:,.2f}'.format,
"10th Grade":'{:,.2f}'.format,
"11th Grade": '{:,.2f}'.format,
"12th Grade": '{:,.2f}'.format,})
# StackOverflow trick to get titles on the same line
reading_scores_by_grade.index.name = "Reading Scores by Grades"
reading_scores_by_grade.columns.name = reading_scores_by_grade.index.name
reading_scores_by_grade.index.name = None
reading_scores_by_grade
###Output
_____no_output_____
###Markdown
Scores by School Spending* Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# Create the bins in which Data will be held
bins = [0,580,620,650,1000]
# Create the names for the four bins
bin_labels = ["<580","580-619","620-650",">650"]
# Place the data series into a new column inside of the DataFrame - dived budget column by size column to get the budget per student
merged_data["spending_ranges"] = pd.cut(merged_data["budget"]/merged_data["size"], bins, labels=bin_labels)
# Find the Average (.mean) Math and Reading score Grouped by Spending Ranges
avg_math = merged_data.groupby(["spending_ranges"]).mean()["math_score"].map("{:,.0f}".format)
avg_read = merged_data.groupby(["spending_ranges"]).mean()["reading_score"].map("{:,.0f}".format)
#Find total of Students Passing Math (>69)
passing_math = (merged_data[merged_data["math_score"]>69].groupby("spending_ranges")['Student ID'].count()
/ (merged_data["spending_ranges"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
math_unformatted = (merged_data[merged_data["math_score"]>69].groupby("spending_ranges")['Student ID'].count()
/ (merged_data["spending_ranges"].value_counts()))
#Find total of Students Passing Reading (>69)
passing_read = (merged_data[merged_data["reading_score"]>69].groupby("spending_ranges")["Student ID"].count()
/ (merged_data["spending_ranges"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
read_unformatted = (merged_data[merged_data["reading_score"]>69].groupby("spending_ranges")["Student ID"].count()
/ (merged_data["spending_ranges"].value_counts()))
# Calculate Overall Passing Rate (Average of the above two)
overall_pass = ((math_unformatted + read_unformatted) / 2).map("{:,.2%}".format)
# Create School Summary DataFrame using above calculations to create an overview table
spending_summary = pd.DataFrame({"Avg Math Score": avg_math,
"Avg Reading Score": avg_read,
"% Passing Math": passing_math,
"% Passing Reading": passing_read,
"Overall Passing": overall_pass})
# StackOverflow trick to get titles on the same line
spending_summary.index.name = "Spending Ranges"
spending_summary.columns.name = spending_summary.index.name
spending_summary.index.name = None
spending_summary
###Output
_____no_output_____
###Markdown
Scores by School Size* Repeat the above breakdown, but this time group schools based on a reasonable approximation of school size (Small, Medium, Large).
###Code
# Create the bins in which Data will be held
bins = [0,1000,4000,9999]
# Create the names for the four bins
bin_labels = ["Small","Medium","Large"]
# Place the data series into a new column inside of the DataFrame
merged_data["School Size"] = pd.cut(merged_data["size"], bins, labels=bin_labels)
# Group the DataFrame by School Size
size_groupby = merged_data.groupby("School Size")
# Find the Average (.mean) Math and Reading score Grouped by School Size
avg_math = merged_data.groupby(["School Size"]).mean()["math_score"].map("{:,.0f}".format)
avg_read = merged_data.groupby(["School Size"]).mean()["reading_score"].map("{:,.0f}".format)
#Find total of Students Passing Math (>69)
passing_math = (merged_data[merged_data["math_score"]>69].groupby("School Size")["Student ID"].count()
/ (merged_data["School Size"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
math_unformatted = (merged_data[merged_data["math_score"]>69].groupby("School Size")["Student ID"].count()
/ (merged_data["School Size"].value_counts()))
#Find total of Students Passing Reading (>69)
passing_read = (merged_data[merged_data["reading_score"]>69].groupby("School Size")["Student ID"].count()
/ (merged_data["School Size"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
read_unformatted = (merged_data[merged_data["reading_score"]>69].groupby("School Size")["Student ID"].count()
/ (merged_data["School Size"].value_counts()))
# Calculate Overall Passing Rate (Average of the above two)
overall_pass = ((math_unformatted + read_unformatted) / 2).map("{:,.2%}".format)
# Create School Summary DataFrame using above calculations to create an overview table
size_summary = pd.DataFrame({"Avg Math Score": avg_math,
"Avg Reading Score": avg_read,
"% Passing Math": passing_math,
"% Passing Reading": passing_read,
"Overall Passing": overall_pass})
# StackOverflow trick to get titles on the same line
size_summary.index.name = "School Size"
size_summary.columns.name = size_summary.index.name
size_summary.index.name = None
size_summary
###Output
_____no_output_____
###Markdown
Scores by School Type* Repeat the above breakdown, but this time group schools based on school type (Charter vs. District).
###Code
# Find the Average (.mean) Math and Reading score Grouped by "type"
avg_math = merged_data.groupby(["type"]).mean()["math_score"].map("{:,.0f}".format)
avg_read = merged_data.groupby(["type"]).mean()["reading_score"].map("{:,.0f}".format)
#Find total of Students Passing Math (>69)
passing_math = (merged_data[merged_data["math_score"]>69].groupby("type")["Student ID"].count()
/ (merged_data["type"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
math_unformatted = (merged_data[merged_data["math_score"]>69].groupby("type")["Student ID"].count()
/ (merged_data["type"].value_counts()))
#Find total of Students Passing Reading (>69)
passing_read = (merged_data[merged_data["reading_score"]>69].groupby("type")["Student ID"].count()
/ (merged_data["type"].value_counts())).map("{:,.0%}".format)
# Took format off for overall passing calculation
read_unformatted = (merged_data[merged_data["reading_score"]>69].groupby("type")["Student ID"].count()
/ (merged_data["type"].value_counts()))
# Calculate Overall Passing Rate (Average of the above two)
overall_pass = ((math_unformatted + read_unformatted) / 2).map("{:,.2%}".format)
# Create School Summary DataFrame using above calculations to create an overview table
type_summary = pd.DataFrame({"Avg Math Score": avg_math,
"Avg Reading Score": avg_read,
"% Passing Math": passing_math,
"% Passing Reading": passing_read,
"Overall Passing": overall_pass})
# StackOverflow trick to get titles on the same line
type_summary.index.name = "School Type"
type_summary.columns.name = type_summary.index.name
type_summary.index.name = None
type_summary
###Output
_____no_output_____
###Markdown
PyCitySchools
###Code
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head()
###Output
_____no_output_____
###Markdown
District Summary
###Code
# Calculations for District Summary
# Total number of schools
total_number_schools = len(school_data_complete["school_name"].unique())
print(total_number_schools)
# Total number of students
total_number_students = school_data_complete["student_name"].count()
print(total_number_students)
# Total budget
total_budget = school_data_complete["budget"].unique().sum()
print(total_budget)
# Average math score
average_math_score = school_data_complete["math_score"].mean()
print(average_math_score)
# Average reading score
average_reading_score = school_data_complete["reading_score"].mean()
print(average_reading_score)
# Percentage of students passing math
passing_math = school_data_complete["math_score"] >= 70
percentage_passing_math= school_data_complete[passing_math]["math_score"].count()/total_number_students*100
print(percentage_passing_math)
# Percentage of students passing reading
passing_reading = school_data_complete["reading_score"] >= 70
percentage_passing_reading= school_data_complete[passing_reading]["reading_score"].count()/total_number_students*100
print(percentage_passing_reading)
# Percentage of students passing
passing = school_data_complete[passing_math & passing_reading]["student_name"].count()
percentage_passing = passing/total_number_students*100
print(percentage_passing)
# Create Summary Data Frame
summary_df = pd.DataFrame({"Total Schools": [total_number_schools],
"Total Students": [total_number_students],
"Total Budget": [total_budget],
"Average Math Score": [average_math_score],
"Average Reading Score": [average_reading_score],
"% Passing Math": [percentage_passing_math],
"% Passing Reading": [percentage_passing_reading],
"% Overall Passing": [percentage_passing]})
summary_df.head()
#Checking data types of summary table
summary_df.dtypes
# Converting integer columns to floats
summary_df.loc[:, "Total Schools"] = summary_df["Total Schools"].astype("float")
summary_df.loc[:, "Total Students"] = summary_df["Total Students"].astype("float")
summary_df.loc[:, "Total Budget"] = summary_df["Total Budget"].astype("float")
summary_df.dtypes
# Use Map to format columns correctly
summary_df["Total Schools"] = summary_df["Total Schools"].map("{:.0f}".format)
summary_df["Total Students"] = summary_df["Total Students"].map("{:,.0f}".format)
summary_df["Total Budget"] = summary_df["Total Budget"].map("${:,.2f}".format)
summary_df.head()
###Output
_____no_output_____
###Markdown
School Summary
###Code
#Calculate for School summary
# Grouped CSV data frame by School Name
grouped_school_df = school_data_complete.groupby(['school_name'])
grouped_school_df.count().head()
# School type
school_type = grouped_school_df['type'].first()
print(school_type)
# Total Students per school
total_students = grouped_school_df.size()
print(total_students)
# School Total Budget
school_budget = grouped_school_df['budget'].first()
print(school_budget)
# School Budget per Student
school_budget_per_student = school_budget/total_students
print(school_budget_per_student)
# Average math score
average_math_score = grouped_school_df['math_score'].mean()
print(average_math_score)
# Average reading score
average_reading_score = grouped_school_df['reading_score'].mean()
print(average_reading_score)
# Percentange of Student Passing Math
grouped_passing_math = school_data_complete[passing_math].groupby(['school_name']).size()
school_percent_passing_math = (grouped_passing_math/total_students)*100
print(school_percent_passing_math)
# Percentange of Student Passing Reading
grouped_passing_reading = school_data_complete[passing_reading].groupby(['school_name']).size()
school_percent_passing_reading = (grouped_passing_reading/total_students)*100
print(school_percent_passing_reading)
# Percentange of Student Passing both Math & Reading
grouped_passing = school_data_complete[passing_math & passing_reading].groupby(['school_name']).size()
school_percent_passing = (grouped_passing/total_students)*100
print(school_percent_passing)
# Create Summary Data Frame
school_summary_df = pd.DataFrame({'School Type': school_type, 'Total Students':total_students,
'Total School Budget': school_budget, 'Per Student Budget': school_budget_per_student,
'Average Math Score': average_math_score, 'Average Reading Score': average_reading_score,
'% Passing Math': school_percent_passing_math, '% Passing Reading': school_percent_passing_reading,
'% Overall Passing Rate': school_percent_passing})
school_summary_df
#Checking data types of table
school_summary_df.dtypes
# Converting integer columns to floats
school_summary_df.loc[:, "Total Students"] = school_summary_df["Total Students"].astype("float")
school_summary_df.loc[:, "Total School Budget"] = school_summary_df["Total School Budget"].astype("float")
school_summary_df.dtypes
# Use Map to format columns correctly
school_summary_df["Total Students"] = school_summary_df["Total Students"].map("{:,.0f}".format)
school_summary_df["Total School Budget"] = school_summary_df["Total School Budget"].map("${:,.2f}".format)
school_summary_df["Per Student Budget"] = school_summary_df["Per Student Budget"].map("${:.2f}".format)
school_summary_df
# Delete fisrt column header
school_summary_df.index.name = None
school_summary_df
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing)
###Code
# Top 5 Performing schools by Percent Passing
top_schools_df = school_summary_df.sort_values("% Overall Passing Rate", ascending = False)
top_schools_df.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing)
###Code
# Bottom 5 Performing schools by Percent Passing
bottom_schools_df = school_summary_df.sort_values("% Overall Passing Rate", ascending = True)
bottom_schools_df.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade
###Code
# Calculate 9th Grade Math Scores
freshman = school_data_complete.loc[school_data_complete["grade"]=="9th"].groupby("school_name")
freshman_math_score = freshman["math_score"].mean()
print(freshman_math_score)
# Calculate 10th Grade Math Scores
sophmore = school_data_complete.loc[school_data_complete["grade"]=="10th"].groupby("school_name")
sophmore_math_score = sophmore["math_score"].mean()
print(sophmore_math_score)
# Calculate 10th Grade Math Scores
junior = school_data_complete.loc[school_data_complete["grade"]=="11th"].groupby("school_name")
junior_math_score = junior["math_score"].mean()
print(junior_math_score)
# Calculate 10th Grade Math Scores
senior = school_data_complete.loc[school_data_complete["grade"]=="12th"].groupby("school_name")
senior_math_score = senior["math_score"].mean()
print(senior_math_score)
# Create Summary Data Frame
math_score_grade_df = pd.DataFrame({"9th": freshman_math_score,
"10th": sophmore_math_score,
"11th": junior_math_score,
"12th": senior_math_score})
math_score_grade_df
# Delete fisrt column header
math_score_grade_df.index.name = None
math_score_grade_df
###Output
_____no_output_____
###Markdown
Reading Score by Grade
###Code
# Calculate 9th Grade Math Scores
freshman_reading_score = freshman["reading_score"].mean()
print(freshman_reading_score)
# Calculate 10th Grade Math Scores
sophmore_reading_score = sophmore["reading_score"].mean()
print(sophmore_reading_score)
# Calculate 11th Grade Math Scores
junior_reading_score = junior["reading_score"].mean()
print(junior_reading_score)
# Calculate 10th Grade Math Scores
senior_reading_score = senior["reading_score"].mean()
print(senior_reading_score)
# Create Summary Data Frame
reading_score_grade_df = pd.DataFrame({"9th": freshman_reading_score,
"10th": sophmore_reading_score,
"11th": junior_reading_score,
"12th": senior_reading_score})
reading_score_grade_df
# Delete fisrt column header
reading_score_grade_df.index.name = None
reading_score_grade_df
###Output
_____no_output_____
###Markdown
Scores by School Spending
###Code
# Create the bins in which Data will be held
# Create Bins based of school spending per student <585, 585-630, 630-645, 645-680
bins = [0, 585, 630, 645, 680]
# Create the name for the Bins
group_labels = ["<$585", "$585-630", "$630-645", "$645-680"]
# Copy School Summary Data Frame to create new data frame sorted by bins
per_student_spending_df = school_summary_df.copy()
per_student_spending_df
# Place the data series into a new column inside of the DataFrame
per_student_spending_df["Spending Ranges (Per Student)"] = pd.cut(school_budget_per_student, bins, labels=group_labels)
per_student_spending_df
# Group by Spending Range
groupped_spending_df = per_student_spending_df.groupby("Spending Ranges (Per Student)")
groupped_spending_df.count().head()
# Calculations for Per Student Spending Summary
# Average math score
average_math_score_spending = groupped_spending_df["Average Math Score"].mean()
print(average_math_score_spending)
# Average reading score
average_reading_score_spending = groupped_spending_df["Average Reading Score"].mean()
print(average_reading_score_spending)
# Percentage of students passing math
percentage_passing_math_spending = groupped_spending_df["% Passing Math"].mean()
print(percentage_passing_math_spending)
# Percentage of students passing reading
percentage_passing_reading_spending = groupped_spending_df["% Passing Reading"].mean()
print(percentage_passing_reading_spending)
# Percentage of students passing math
percentage_passing_spending = groupped_spending_df["% Overall Passing Rate"].mean()
print(percentage_passing_spending)
# Create Summary Data Frame
spending_df = pd.DataFrame({"Average Math Score": average_math_score_spending, "Average Reading Score": average_reading_score_spending,
"% Passing Math": percentage_passing_math_spending, "% Passing Reading": percentage_passing_reading_spending,
"% Overall Passing": percentage_passing_spending})
spending_df
# Use Map to format columns correctly
spending_df["Average Math Score"] = spending_df["Average Math Score"].map("{:.2f}".format)
spending_df["Average Reading Score"] = spending_df["Average Reading Score"].map("{:.2f}".format)
spending_df["% Passing Math"] = spending_df["% Passing Math"].map("{:.2f}".format)
spending_df["% Passing Reading"] = spending_df["% Passing Reading"].map("{:.2f}".format)
spending_df["% Overall Passing"] = spending_df["% Overall Passing"].map("{:.2f}".format)
spending_df
###Output
_____no_output_____
###Markdown
Scores by School Size
###Code
# Create the bins in which Data will be held
# Create Bins based of School Size <1000, 1000-2000, 2000-5000
bins_school_size = [0, 1000, 2000, 5000]
# Create the name for the Bins
group_school_size = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# Copy School Summary Data Frame to create new data frame sorted by bins
school_size_df = school_summary_df.copy()
school_size_df
# Place the data series into a new column inside of the DataFrame
school_size_df["School Size"] = pd.cut(total_students, bins_school_size, labels=group_school_size)
school_size_df
# Group by School Size
groupped_school_size_df = school_size_df.groupby("School Size")
groupped_school_size_df.count().head()
# Calculations for School Size Summary
# Average math score
average_math_school_size = groupped_school_size_df["Average Math Score"].mean()
print(average_math_school_size)
# Average reading score
average_reading_school_size = groupped_school_size_df["Average Reading Score"].mean()
print(average_reading_school_size)
# Percentage of students passing math
percentage_passing_math_school_size = groupped_school_size_df["% Passing Math"].mean()
print(percentage_passing_math_school_size)
# Percentage of students passing reading
percentage_passing_reading_school_size = groupped_school_size_df["% Passing Reading"].mean()
print(percentage_passing_reading_school_size)
# Percentage of students passing math
percentage_passing_school_size = groupped_school_size_df["% Overall Passing Rate"].mean()
print(percentage_passing_school_size)
# Create Summary Data Frame
size_df = pd.DataFrame({"Average Math Score": average_math_school_size, "Average Reading Score": average_reading_school_size,
"% Passing Math": percentage_passing_math_school_size, "% Passing Reading": percentage_passing_reading_school_size,
"% Overall Passing": percentage_passing_school_size})
size_df
# Use Map to format columns correctly
size_df["Average Math Score"] = size_df["Average Math Score"].map("{:.2f}".format)
size_df["Average Reading Score"] = size_df["Average Reading Score"].map("{:.2f}".format)
size_df["% Passing Math"] = size_df["% Passing Math"].map("{:.2f}".format)
size_df["% Passing Reading"] = size_df["% Passing Reading"].map("{:.2f}".format)
size_df["% Overall Passing"] = size_df["% Overall Passing"].map("{:.2f}".format)
size_df
###Output
_____no_output_____
###Markdown
Scores by School Type
###Code
# Group by School Type
groupped_school_type_df = school_summary_df.groupby("School Type")
groupped_school_type_df.count().head()
# Calculations for School Type Summary
# Average math score
average_math_school_type = groupped_school_type_df["Average Math Score"].mean()
print(average_math_school_type)
# Average reading score
average_reading_school_type = groupped_school_type_df["Average Reading Score"].mean()
print(average_reading_school_type)
# Percentage of students passing math
percentage_passing_math_school_type = groupped_school_type_df["% Passing Math"].mean()
print(percentage_passing_math_school_type)
# Percentage of students passing reading
percentage_passing_reading_school_type = groupped_school_type_df["% Passing Reading"].mean()
print(percentage_passing_reading_school_type)
# Percentage of students passing math
percentage_passing_school_type = groupped_school_type_df["% Overall Passing Rate"].mean()
print(percentage_passing_school_type)
# Create Summary Data Frame
type_df = pd.DataFrame({"Average Math Score": average_math_school_type, "Average Reading Score": average_reading_school_type,
"% Passing Math": percentage_passing_math_school_type, "% Passing Reading": percentage_passing_reading_school_type,
"% Overall Passing": percentage_passing_school_type})
type_df
# Use Map to format columns correctly
type_df["Average Math Score"] = type_df["Average Math Score"].map("{:.2f}".format)
type_df["Average Reading Score"] = type_df["Average Reading Score"].map("{:.2f}".format)
type_df["% Passing Math"] = type_df["% Passing Math"].map("{:.2f}".format)
type_df["% Passing Reading"] = type_df["% Passing Reading"].map("{:.2f}".format)
type_df["% Overall Passing"] = type_df["% Overall Passing"].map("{:.2f}".format)
type_df
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
def type_statistics(type_df, name, df):
# Calculate the total number of schools
num_schools = df['school_name'].nunique()
# Calculate the total number of students
num_students = df['student_name'].size
# Calculate the total budget
budget_df = type_df['budget'].sum()
budget = budget_df[name]
# Calculate the average math score
math_score = df['math_score'].mean()
# Calculate the average reading score
reading_score = df['reading_score'].mean()
# Calculate the overall passing rate (overall average score)
passing_score = (math_score + reading_score)/2
# Calculate the percentage of students with a passing math score (70 or greater)
passing_math_num = (df['math_score'] >= 70).sum()
passing_math_avg = passing_math_num / num_students * 100
# Calculate the percentage of students with a passing reading score (70 or greater)
passing_read_num = (df['reading_score'] >= 70).sum()
passing_read_avg = passing_read_num / num_students * 100
# TODO:
# Create a dataframe to hold the above results
results = pd.DataFrame([{'Type' : name,
'Number of Schools' : num_schools,
'Number of Students' : num_students,
'Budget' : budget,
'Average Math Score' : math_score,
'Average Reading Score' : reading_score,
'Overall Passing Average' : passing_score,
'Passing Math Average' : passing_math_avg,
'Passing Reading Average' : passing_read_avg}])
# order the columns
results = results[['Type',
'Number of Schools',
'Number of Students',
'Budget',
'Average Math Score',
'Average Reading Score',
'Overall Passing Average',
'Passing Math Average',
'Passing Reading Average']]
return results
# District
type_df = school_data.groupby(['type'])
dist_stats_df = type_statistics(type_df, 'District', district_group)
ch_stats_df = type_statistics(type_df, 'Charter', charter_group)
stats_df = pd.concat([dist_stats_df, ch_stats_df], ignore_index=True)
# Use Map to format all the columns
stats_df["Budget"] = stats_df["Budget"].map("${:,.0f}".format)
stats_df["Average Math Score"] = stats_df["Average Math Score"].map("{:.2f}%".format)
stats_df["Average Reading Score"] = stats_df["Average Reading Score"].map("{:.2f}%".format)
stats_df["Overall Passing Average"] = stats_df["Overall Passing Average"].map("{:.2f}%".format)
stats_df["Passing Math Average"] = stats_df["Passing Math Average"].map("{:.2f}%".format)
stats_df["Passing Reading Average"] = stats_df["Passing Reading Average"].map("{:.2f}%".format)
stats_df
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two) * Create a dataframe to hold the above results Top Performing Schools (By Passing Rate) * Sort and display the top five schools in overall passing rate
###Code
def school_stats(school_list):
# School Name
# School Type
# Total Students
# Total School Budget
# Per Student Budget
# Average Math Score
# Average Reading Score
# % Passing Math
# % Passing Reading
# Overall Passing Rate (Average of the above two)
# Create a dataframe to hold the above results
school_name = school_list[0]
school_data = school_list[1]
school_type = school_data.iloc[0]['type']
total_students = school_data['Student ID'].count()
school_budget = school_data.iloc[0]['budget']
per_student_budget = school_budget/total_students
average_math_score = school_data['math_score'].mean()
average_reading_score = school_data['reading_score'].mean()
num_passing_math = (school_data['math_score'] >= 70).sum()
percent_passing_math = num_passing_math/total_students*100
num_passing_reading = (school_data['reading_score'] >= 70).sum()
percent_passing_reading = num_passing_reading/total_students*100
overall_passing_rate = (num_passing_math + num_passing_reading) / 2 / total_students * 100
results = pd.DataFrame([{"School Name" : school_name,
"School Type" : school_type,
"Total Students" : total_students,
"Total School Budget" : school_budget,
"Per Student Budget" : per_student_budget,
"Average Math Score" : average_math_score,
"Average Reading Score" : average_reading_score,
"% Passing Math" : percent_passing_math,
"% Passing Reading" : percent_passing_reading,
"Overall Passing Rate" : overall_passing_rate
}])
return results
# Create an overview table that summarizes key metrics about each school, including:
school_df = school_data_complete.groupby(['school_name'])
num_students = school_df['school_name'].count()
num_schools = school_df['school_name'].nunique().size
# print(num_schools)
school_stats_df = pd.DataFrame()
one_school_stats_df = pd.DataFrame()
for i in range(0, num_schools):
one_school_stats_df = school_stats(list(school_df)[i])
school_stats_df = pd.concat([school_stats_df, one_school_stats_df], ignore_index=True)
# order the columns
school_stats_df = school_stats_df[['School Name',
'School Type',
'Total Students',
'Total School Budget',
'Per Student Budget',
'Average Math Score',
'Average Reading Score',
'% Passing Math',
'% Passing Reading',
'Overall Passing Rate']]
# Use Map to format all the columns
school_stats_df["Total School Budget"] = school_stats_df["Total School Budget"].map("${:,.0f}".format)
school_stats_df["Per Student Budget"] = school_stats_df["Per Student Budget"].map("${:,.0f}".format)
school_stats_df["Average Math Score"] = school_stats_df["Average Math Score"].map("{:.2f}%".format)
school_stats_df["Average Reading Score"] = school_stats_df["Average Reading Score"].map("{:.2f}%".format)
school_stats_df["% Passing Math"] = school_stats_df["% Passing Math"].map("{:.2f}%".format)
school_stats_df["% Passing Reading"] = school_stats_df["% Passing Reading"].map("{:.2f}%".format)
school_stats_df["Overall Passing Rate"] = school_stats_df["Overall Passing Rate"].map("{:.2f}%".format)
print("School Summary")
school_stats_df
###Output
School Summary
###Markdown
Top Performing Schools (By Passing Rate) * Sort and display the five best-performing schools
###Code
print("Best Schools")
school_stats_df.sort_values(by='Overall Passing Rate', ascending=False).head()
###Output
Best Schools
###Markdown
Bottom Performing Schools (By Passing Rate) * Sort and display the five worst-performing schools
###Code
print("Worst Schools")
school_stats_df.sort_values(by='Overall Passing Rate', ascending=True).head()
###Output
Worst Schools
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# Group the data by school and grade
school_grades_df = school_data_complete.groupby(['school_name', 'grade'])
# Accumulate the data
# TODO: for efficiency, use a list and only make a DataFrame one time at the list
grades_df = pd.DataFrame({'School' : [], 'Grade' : [], 'Math Score' : [], 'Reading Score': []})
for name_of_the_group, group in school_grades_df:
math_avg = group['math_score'].mean()
reading_avg = group['reading_score'].mean()
grades_df = grades_df.append({'School' : name_of_the_group[0],
'Grade' : name_of_the_group[1],
'Math Score' : math_avg,
'Reading Score' : reading_avg}, ignore_index=True)
# Format the percents
grades_df["Math Score"] = grades_df["Math Score"].map("{:.2f}%".format)
grades_df["Reading Score"] = grades_df["Reading Score"].map("{:.2f}%".format)
grades_df
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["0-585", "585-615", "615-645", "645-675"]
# remove all the dollar signs
colstocheck = school_stats_df.columns
school_stats_df[colstocheck] = school_stats_df[colstocheck].replace({'\$':''}, regex = True)
# remove all the percent signs
school_stats_df[colstocheck] = school_stats_df[colstocheck].replace({'\%':''}, regex = True)
# turn the budget column into an int
school_stats_df['Per Student Budget'] = pd.to_numeric(school_stats_df['Per Student Budget'])
# turn the % Passing Math column into a float
school_stats_df['% Passing Math'] = pd.to_numeric(school_stats_df['% Passing Math'])
# turn the % Passing Reading column into a float
school_stats_df['% Passing Reading'] = pd.to_numeric(school_stats_df['% Passing Reading'])
# Save school_stats_df
orig_school_stats_df = school_stats_df.copy()
# cut into bins and add a column
school_stats_df["Per Student Budget Summary"] = pd.cut(school_stats_df["Per Student Budget"], spending_bins, labels=group_names)
# groupby the bins
# Group the data by school and total students
spending_summary_df = pd.DataFrame()
school_spending = school_stats_df.groupby(['School Name', 'Total Students'])
for spending, spending_data in school_spending:
avg_math_score = spending_data.get('Average Math Score').item()
avg_reading_score = spending_data.get('Average Reading Score').item()
passing_math = spending_data.get('% Passing Math').item()
passing_reading = spending_data.get('% Passing Reading').item()
overall_passing = (passing_math + passing_reading) / 2
spending_summary_df = spending_summary_df.append({'School' : spending[0],
'Spending' : spending[1],
'Average Math Score' : avg_math_score,
'Average Reading Score' : avg_reading_score,
'% Passing Math' : passing_math,
'% Passing Reading' : passing_reading,
'Overall Passing Rate' : overall_passing,
}, ignore_index=True)
spending_summary_df = spending_summary_df.sort_values(by=['Spending', 'Overall Passing Rate'], ascending=False)
spending_summary_df
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
# bins.
size_bins = [0, 1000, 2000, 5000]
size_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
size_school_stats_df = orig_school_stats_df.copy()
#print(size_school_stats_df)
# cut into bins and add a column
size_school_stats_df["Student Size Summary"] = pd.cut(size_school_stats_df["Total Students"]
, size_bins, labels=size_names)
#print(size_school_stats_df)
# groupby the bins
# Group the data by school and student size
size_summary_df = pd.DataFrame()
school_size = size_school_stats_df.groupby(['Student Size Summary', 'Total Students'])
#print(school_size)
for size, size_data in school_size:
avg_math_score = size_data.get('Average Math Score').item()
avg_reading_score = size_data.get('Average Reading Score').item()
passing_math = size_data.get('% Passing Math').item()
passing_reading = size_data.get('% Passing Reading').item()
overall_passing = (passing_math + passing_reading) / 2
size_summary_df = size_summary_df.append({'Student Size Summary' : size[0],
'Total Students' : size[1],
'Average Math Score' : avg_math_score,
'Average Reading Score' : avg_reading_score,
'% Passing Math' : passing_math,
'% Passing Reading' : passing_reading,
'Overall Passing Rate' : overall_passing,
}, ignore_index=True)
#print(size_summary_df.columns)
size_summary_df = size_summary_df.sort_values(by=['Student Size Summary', 'Overall Passing Rate'], ascending=False)
size_summary_df
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type.
###Code
orig_school_stats_df.set_index(["School Name"])["School Type"]
school_type_stats_df = orig_school_stats_df.sort_values(by='Overall Passing Rate', ascending=False)
school_type_stats_df
###Output
_____no_output_____
###Markdown
Main PyCitySchools Script* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head()
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math **and** reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
# Calculate the total number of schools
total_schools = len(school_data_complete['school_name'].unique())
# total_schools
# school_data_complete['school_name'].value_counts()
# school_data_complete.describe()
# Calculate the total number of students
#### WARNING: There's a large discrepancy between the total of student names and student ids (~6000)- do some kids have multiple ids??
# total_names = len(school_data_complete['student_name'].unique())
# total_names
total_students = len(school_data_complete['Student ID'].unique())
# total_students
# Calculate the total budget
total_budget = school_data_complete['budget'].unique()
total_budget = total_budget.sum()
# total_budget
# Calculate the average math score
avg_math = school_data_complete['math_score'].mean()
# avg_math
# Calculate the average reading score
avg_reading = school_data_complete['reading_score'].mean()
# avg_reading
# Calculate the percentage of students with a passing math score (70 or greater)
pass_math = (len(school_data_complete.loc[school_data_complete["math_score"] >= 70])/total_students)*100
# # pass_math
# Calculate the percentage of students with a passing reading score (70 or greater)
pass_reading = (len(school_data_complete.loc[school_data_complete["reading_score"] >= 70])/total_students)*100
# pass_reading
# Calculate the percentage of students who passed math and reading (% Overall Passing)
readers = school_data_complete.loc[school_data_complete['reading_score'] >= 70]
# pass_overall = readers.loc[readers['math_score'] >= 70]
pass_overall = (len(readers.loc[readers['math_score'] >= 70])/total_students)*100
# pass_overall
# Create a District Summary Table - if needed, fix with ", index=[0]"
district_summary_df = pd.DataFrame({'Total Schools': total_schools,
'Total Students': total_students,
'Total Budget': total_budget,
'Average Math Score': avg_math,
'Average Reading Score': avg_reading,
'% Passing Math': pass_math,
'% Passing Reading': pass_reading,
'% Overall Passing':pass_overall},
index=[0])
district_summary_df
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results
###Code
school_data_complete.head()
# Grouping the school_data_complete by school_name & creating passing math & reading columns
school_data_complete['pass_math'] = school_data_complete["math_score"] >= 70
school_data_complete['pass_reading'] = school_data_complete["reading_score"] >= 70
school_group = school_data_complete.groupby(['school_name'])
# School Name
school_name = school_group['school_name'].unique()
# school_name
# School Type
school_type = school_group['type'].unique()
# school_type
# Total Students
total_students = school_group['Student ID'].count()
# total_students
# Total School Budget
school_budget = school_group['budget'].mean()
# school_budget
# Per Student Budget
per_stu_budget = school_budget/total_students
# per_stu_budget
# Average Math Score
avg_math = school_group['math_score'].mean()
# avg_math
# Average Reading Score
avg_reading = school_group['reading_score'].mean()
# avg_reading
# % Passing Math
pass_math = school_group["pass_math"].mean()
# pass_math
# % Passing Reading
pass_reading = school_group["pass_reading"].mean()
# pass_reading
# % Overall Passing (The percentage of students that passed math and reading.)
pass_overall = school_group["pass_math" and "pass_reading"].mean()
# pass_overall
# Create a District Summary Table - if needed, fix with ", index=[0]"
school_summary_df = pd.DataFrame({'School Type':school_type,
'Total Students': total_students,
'Total School Budget':school_budget,
'Per Student Budget':per_stu_budget,
'Average Math Score':avg_math,
'Average Reading Score':avg_reading,
'% Passing Math':pass_math,
'% Passing Reading':pass_reading,
'% Overall Passing':pass_overall,
})
school_summary_df
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing) * Sort and display the top five performing schools by % overall passing.
###Code
top_schools = school_summary_df.groupby(['% Overall Passing'])
top_schools.sort_values()
top_schools.head(4)
top_schools = top_schools.loc[top_schools['% Overall Passing'].max()
###Output
_____no_output_____
###Markdown
Academy of Py Problem Statement Well done! Having spent years analyzing financial records for big banks, you've finally scratched your idealistic itch and joined the education sector. In your latest role, you've become the Chief Data Scientist for your city's school district. In this capacity, you'll be helping the school board and mayor make strategic decisions regarding future school budgets and priorities.As a first task, you've been asked to analyze the district-wide standardized test results. You'll be given access to every student's math and reading scores, as well as various information on the schools they attend. Your responsibility is to aggregate the data to and showcase obvious trends in school performance. Analysis Report 1. For the total 15 schools at district level, the percentage of students passing reading (about 86%) is greater than the percentage of students passing math (about 75%). 2. The top performing 5 schools are all Charter schools with overall passing rate of about 95%.3. The worst performing 5 schools are all District schools with overall passing rate of about 73%.4. The higher school budget is not impacting student scores. Schools with comparatively low budget (less than 615 dollars) are having higher scores in math and reading as comapred to the schools with budget higher than 615 dollars.5. Considering school size, schools with less than 2000 students are performing better with higher average and overall scores in math and reading as compared to schools with more than 2000 students.6. Talking about the school type, Charter schools are performing far better specially in math with passing math percentage of 94% as compared to District schools with passing math percentage as 67%. Reading the input files and creating the dataframes
###Code
# Dependencies and Setup
import pandas as pd
import os
import matplotlib
# Files to Load
school_data_to_load = os.path.join('Resources' , 'schools_complete.csv')
student_data_to_load = os.path.join('Resources', 'students_complete.csv')
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how='left', on=['school_name', 'school_name'])
school_data.head(3)
student_data.head(3)
school_data_complete.head(3)
# Checking the data for null values
school_data_complete.isnull().sum()
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting Calculations for district summary dataframe
###Code
# calculating total number of schools
total_school_count = school_data_complete['school_name'].nunique()
# calculating total number of students (combined in all schools)
total_student_count = school_data_complete['student_name'].count()
# calculating total budget (for all schools)
total_budget = school_data_complete['budget'].unique().sum()
# calculating average math score at district level
avg_math_score = school_data_complete['math_score'].mean()
# calculating average reading score at district level
avg_reading_score = school_data_complete['reading_score'].mean()
# Calculating the overall passing rate (overall average score)
overall_passing_rate = (avg_math_score + avg_reading_score) / 2
# calculating percentage of students passing in math at district level
student_passing_math = len(school_data_complete[school_data_complete['math_score'] >= 70])
percent_passing_math = (student_passing_math / total_student_count) * 100
# calculating percentage of students passing in math at district level
student_passing_reading = len(school_data_complete[school_data_complete['reading_score'] >= 70])
percent_passing_reading = (student_passing_reading / total_student_count) * 100
###Output
_____no_output_____
###Markdown
Creating the district summary datafarme
###Code
distric_summary_df = pd.DataFrame({'Total Schools': total_school_count,
'Total Students': f"{total_student_count:,}",
'Total Budget': f"${total_budget:,.2f}",
'Average Math Score': avg_math_score,
'Average Reading Score': avg_reading_score,
'% Passing Math': percent_passing_math,
'% Passing Reading': percent_passing_reading,
'% Overall Passing Rate': overall_passing_rate}, index = [0])
distric_summary_df
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two) * Create a dataframe to hold the above results Calculations for school summary dataframe
###Code
# grouping the data by school
school_group = school_data_complete.groupby('school_name')
# finding the school type for each school (charter or district)
school_type = school_group['type'].first()
# calculating total students in each school
students_per_school = school_group['student_name'].count()
# retrieving budget per school
budget_per_school = school_group['budget'].first()
# calculating per student budget per school
per_student_budget = budget_per_school / students_per_school
# calculating average math score per school
avg_math_score_per_school = school_group['math_score'].mean()
# calculating average reading score per school
avg_reading_score_per_school = school_group['reading_score'].mean()
# calculating percentage of students passing in math per school
total_passing_math_per_school = school_data_complete[school_data_complete['math_score'] >= 70]['school_name'].value_counts()
percent_passing_math_per_school = (total_passing_math_per_school / students_per_school) * 100
# calculating percentage of students passing in reading per school
total_passing_reading_per_school = school_data_complete[school_data_complete['reading_score'] >= 70]['school_name'].value_counts()
percent_passing_reading_per_school = (total_passing_reading_per_school / students_per_school) * 100
# calculating overall passing rate per school
overall_passing_rate_per_school = (percent_passing_math_per_school + percent_passing_reading_per_school) / 2
###Output
_____no_output_____
###Markdown
Creating the School summary dataframe
###Code
school_summary_df = pd.DataFrame({'School Type': school_type,
'Total Students': students_per_school,
'Total School Budget': budget_per_school,
'Per Student Budget': per_student_budget,
'Average Math Score': avg_math_score_per_school,
'Average Reading Score': avg_reading_score_per_school,
'% Passing Math': percent_passing_math_per_school,
'% Passing Reading': percent_passing_reading_per_school,
'% Overall Passing Rate': overall_passing_rate_per_school
})
school_summary_df.index.name = None
# Didn't use style.format because head() does not work with dataframe formatted using styler object
school_summary_df['Total School Budget'] = school_summary_df['Total School Budget'].map("${:,.2f}".format)
# commented the formatting for 'per student budget' as this column is used for calculations in 'scores by school spending df'
# school_summary_df['Per Student Budget'] = school_summary_df['Per Student Budget'].map("${:,.2f}".format)
school_summary_df.head()
###Output
_____no_output_____
###Markdown
Top Performing Schools (By Passing Rate) * Sort and display the top five schools in overall passing rate
###Code
# Finding the top 5 schools based on the overall passing rate
top_performing_school = school_summary_df.sort_values('% Overall Passing Rate', ascending=False).head()
top_performing_school
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By Passing Rate) * Sort and display the five worst-performing schools
###Code
# Finding the bottom 5 schools based on the overall passing rate
bottom_performing_school = school_summary_df.sort_values('% Overall Passing Rate', ascending=True).head()
bottom_performing_school
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting Calculations for grade-wise math score per school
###Code
# One way of calculating the grade-wise math scores (based on the instructions provided)
# ninth_grade_math = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['math_score'].mean()
# tenth_grade_math = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['math_score'].mean()
# eleventh_grade_math = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['math_score'].mean()
# twelth_grade_math = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['math_score'].mean()
# grade_mscore_df = pd.DataFrame({'9th': ninth_grade_math,
# '10th': tenth_grade_math,
# '11th': eleventh_grade_math,
# '12th': twelth_grade_math})
# grade_mscore_df
# My preferred way of calculating the grade-wise math scores
cols = ['9th', '10th', '11th', '12th']
math_score_by_grade = pd.DataFrame(school_data_complete.groupby(["school_name", "grade"])["math_score"].mean()) \
.reset_index() \
.pivot('school_name', columns='grade', values='math_score')
math_score_by_grade = math_score_by_grade[cols]
math_score_by_grade.index.name = None
math_score_by_grade
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# One way of calculating the grade-wise reading scores (based on the instructions provided)
# ninth_grade_reading = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['reading_score'].mean()
# tenth_grade_reading = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['reading_score'].mean()
# eleventh_grade_reading = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['reading_score'].mean()
# twelth_grade_reading = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['reading_score'].mean()
# grade_rscore_df = pd.DataFrame({'9th': ninth_grade_reading, '10th': tenth_grade_reading, '11th': eleventh_grade_reading, '12th': twelth_grade_reading})
# grade_rscore_df
# My preferred way of calculating the grade-wise reading scores
reading_score_by_grade = pd.DataFrame(school_data_complete.groupby(["school_name", "grade"])["reading_score"].mean()) \
.reset_index() \
.pivot('school_name', columns='grade', values='reading_score')
reading_score_by_grade = reading_score_by_grade[cols]
reading_score_by_grade.index.name = None
reading_score_by_grade
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
spending_bins = [0, 585, 615, 645, 675]
spending_labels = ["<$585", "$585-615", "$615-645", "$645-675"]
summary_cols = ['Average Math Score', 'Average Reading Score', '% Passing Math', '% Passing Reading', '% Overall Passing Rate']
scores_by_school_spending_df = school_summary_df[summary_cols] \
.assign(spending_ranges = pd.cut(school_summary_df['Per Student Budget'], spending_bins, labels=spending_labels)) \
.groupby('spending_ranges').mean() \
.rename_axis('Spending Ranges (Per Student)')
scores_by_school_spending_df
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
size_bins = [0, 1000, 2000, 5000]
size_labels = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
scores_by_school_spending_df = school_summary_df[summary_cols] \
.assign(school_size = pd.cut(school_summary_df['Total Students'], size_bins, labels=size_labels)) \
.groupby('school_size').mean() \
.rename_axis('School Size')
scores_by_school_spending_df
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type.
###Code
school_summary_df['School Type'].unique()
type_bins = [0, 1, 2]
type_labels = ['Charter', 'District']
scores_by_school_type_df = school_summary_df[['School Type', 'Average Math Score', 'Average Reading Score',
'% Passing Math', '% Passing Reading', '% Overall Passing Rate']] \
.replace({'School Type': {'Charter': 1, 'District': 2}})
scores_by_school_type_df = scores_by_school_type_df.assign(school_type = pd.cut(scores_by_school_type_df['School Type'], type_bins, labels=type_labels)) \
.groupby('school_type').mean() \
.drop('School Type', axis=1) \
.rename_axis('School Type')
scores_by_school_type_df
###Output
_____no_output_____
###Markdown
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math **and** reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
school_data_complete.head()
Total_Schools = len(school_data_complete["school_name"].unique())
#Total_Schools
Total_Students = school_data_complete["student_name"].count()
#Total_Students
Unique_budget = school_data_complete["budget"].unique()
Total_budget = Unique_budget.sum()
Total_budget_format = Total_budget.copy()
Total_budget_format
#print(f"${Total_budget:0,.2f}")
Math_Average = school_data_complete["math_score"].mean()
#Math_Average
Reading_Average = school_data_complete["reading_score"].mean()
#Reading_Average
math_pass_df = school_data_complete.loc[(school_data_complete["math_score"] >= 70),:]
#display(math_pass_df)
math_pass_pecent = ((math_pass_df["student_name"].count())/Total_Students)*100
#math_pass_pecent
reading_pass_df = school_data_complete.loc[(school_data_complete["reading_score"] >= 70),:]
#display(reading_pass_df)
reading_pass_pecent = ((reading_pass_df["student_name"].count())/Total_Students)*100
#reading_pass_pecent
pass_df = school_data_complete.loc[(school_data_complete["math_score"] >= 70) & (school_data_complete["reading_score"] >= 70),:]
#display(pass_df)
pass_pecent = ((pass_df["student_name"].count())/Total_Students)*100
#pass_pecent
raw_data = {"Total_Schools":[Total_Schools],
"Total_Students":[f"{Total_Students:,}"],
"Total_budget":[f"${Total_budget:0,.2f}"],
"Average Math Score":[Math_Average],
"Average Reading Score":[Reading_Average],
"% Passing Math":[math_pass_pecent],
"% Passing Reading":[reading_pass_pecent],
"pass_pecent":[pass_pecent]}
#raw_data
District_Summary = pd.DataFrame(raw_data)
District_Summary
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results
###Code
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
#display(school_data)
#display(school_data_complete)
school_name = school_data.loc[:,["school_name","type","size","budget"]]
school_df = school_name.sort_values(["school_name"])
#school_sdf
per_student_data = (school_df["budget"]/school_df["size"]).unique()
#school_summary_df.insert(4, 'Per Student Budget', per_student_data)
school_df["Per Student Budget"] = per_student_data
per_math_data = school_data_complete.groupby("school_name")["math_score"].mean()
per_math_data = pd.DataFrame(per_math_data)
per_math_data
per_reading_data = school_data_complete.groupby("school_name")["reading_score"].mean()
per_reading_data = pd.DataFrame(per_reading_data)
per_reading_data
score_df = pd.merge(per_math_data,per_reading_data,on="school_name" )
score_df
per_math_pass = math_pass_df.groupby("school_name")["math_score"].count()
per_math_pass_df= per_math_pass.reset_index(name = 'math_pass')
school_unique = math_pass_df.groupby("school_name")
school_unique_grouped = school_unique["size"].value_counts()
school_unique_df =school_unique_grouped.reset_index(name = 'school_unique')
per_reading_pass = reading_pass_df.groupby("school_name")["reading_score"].count()
per_reading_pass= per_reading_pass.reset_index(name = 'reading_pass')
df_math=pd.merge(per_math_pass_df,school_unique_df,on="school_name")
df_math["% Passing Math"] =(df_math['math_pass']/df_math['size'])*100
df1=pd.merge(df_math,per_reading_pass,on="school_name")
df1["% Passing Reading"] =(df1['reading_pass']/df_math['size'])*100
df_read=df1.drop(['math_pass','school_unique','reading_pass'], axis = 1)
df_read
per_overall_pass = pass_df.groupby("school_name")["math_score"].count()
per_overall_pass_df= per_overall_pass.reset_index(name = 'overall_pass')
df_overall = pd.merge(df_read,per_overall_pass_df,on="school_name")
df_overall['% Overall Passing']=(df_overall['overall_pass']/df_overall['size'])*100
df_overall
school_summary_df = pd.merge(school_df,score_df,on = "school_name")
school_summary_df_final = pd.merge(school_summary_df,df_overall,on = "school_name")
school_summary_df_finaldf=school_summary_df_final.drop(['size_y','overall_pass',], axis = 1)
school_summary_df_finaldf
school_summary_df = school_summary_df_finaldf.rename(columns = {
"school_name" :"",
"type" : "School Type",
"size_x" : "Total Students",
"budget" : "Total School Budget",
"Per Student Budget" : "Per Student Budget",
"math_score" : "Average Math Score",
"reading_score" : "Average Reading Score"
})
summary_df = school_summary_df.set_index("")
summary_df_final = summary_df.copy()
summary_df_final["Total School Budget"] = summary_df["Total School Budget"].map("${:,}".format)
summary_df_final["Per Student Budget"] = summary_df["Per Student Budget"].map("${:,}".format)
summary_df_final
###Output
_____no_output_____
###Markdown
Top Performing Schools (By % Overall Passing) * Sort and display the top five performing schools by % overall passing.
###Code
top_school_df = summary_df.copy()
top_school = top_school_df.sort_values(["% Overall Passing"],ascending=False)
top_school["Total School Budget"] = top_school["Total School Budget"].map("${:,.2f}".format)
top_school["Per Student Budget"] = top_school["Per Student Budget"].map("${:,.2f}".format)
top_school.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing) * Sort and display the five worst-performing schools by % overall passing.
###Code
bottom_school_df = summary_df.copy()
bottom_school = bottom_school_df.sort_values(["% Overall Passing"])
bottom_school["Total School Budget"] = bottom_school["Total School Budget"].map("${:,.2f}".format)
bottom_school["Per Student Budget"] = bottom_school["Per Student Budget"].map("${:,.2f}".format)
bottom_school.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
data_copy = (school_data_complete).copy()
data_copy = data_copy.drop(columns = ["Student ID","student_name","gender","School ID","size","budget","type","reading_score"])
#data_copy
math_9_df = data_copy[data_copy["grade"] == "9th"].groupby(["school_name"]).mean()
math_10_df = data_copy[data_copy["grade"] == "10th"].groupby(["school_name"]).mean()
math_11_df = data_copy[data_copy["grade"] == "11th"].groupby(["school_name"]).mean()
math_12_df = data_copy[data_copy["grade"] == "12th"].groupby(["school_name"]).mean()
math_score1_df = pd.merge(math_9_df,math_10_df,on = "school_name",suffixes = ('_9th','_10th'))
#math_score1_df
math_score2_df = pd.merge(math_11_df,math_12_df,on = "school_name",suffixes = ('_11th','_12th'))
#math_score2_df
grade_math_df1 = pd.merge(math_score1_df,math_score2_df,on = "school_name")
grade_math_df1.reset_index(inplace=True)
grade_math_df = grade_math_df1.rename(columns = {
"school_name": "",
"math_score_9th" : "9th",
"math_score_10th": "10th",
"math_score_11th": "11th",
"math_score_12th": "12th"
})
grade_math_df = grade_math_df.set_index([""])
grade_math_df
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
data_copy2 = (school_data_complete).copy()
data_copy2
#data_copy2 = data_copy2.drop(columns = ["Student ID","student_name","gender","School ID","size","budget","type","math_score"])
#data_copy2
reading_9_df = data_copy2[data_copy2["grade"] == "9th"].groupby(["school_name"]).mean()
reading_10_df = data_copy2[data_copy2["grade"] == "10th"].groupby(["school_name"]).mean()
reading_11_df = data_copy2[data_copy2["grade"] == "11th"].groupby(["school_name"]).mean()
reading_12_df = data_copy2[data_copy2["grade"] == "12th"].groupby(["school_name"]).mean()
reading_score1_df = pd.merge(reading_9_df,reading_10_df,on = "school_name",suffixes = ('_9th','_10th'))
reading_score2_df = pd.merge(reading_11_df,reading_12_df,on = "school_name",suffixes = ('_11th','_12th'))
grade_reading_df1 = pd.merge(reading_score1_df,reading_score2_df,on = "school_name")
grade_reading_df1.reset_index(inplace=True)
#grade_reading_df1
grade_reading_df1 = grade_reading_df1.rename(columns = {
"school_name": "",
"reading_score_9th" : "9th",
"reading_score_10th": "10th",
"reading_score_11th": "11th",
"reading_score_12th": "12th"
})
grade_reading_df1 = grade_reading_df1.set_index([""])
grade_reading_df_final =grade_reading_df1[['9th','10th',"11th","12th"]]
grade_reading_df_final
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
df = summary_df.copy()
df.sort_values(["Per Student Budget"],ascending=False)
df["Spending Ranges (Per Student)"] = pd.cut(df["Per Student Budget"],bins = [1,585,630,645,680],labels = ["<$585","$585-630","$630-645","$645-680"],include_lowest = True)
df1 = df.groupby(["Spending Ranges (Per Student)"]).mean()
df_school_spend =df1.drop(['Total Students','Total School Budget','Per Student Budget'],axis=1)
df_school_spend.round(2)
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
df["School Size"] = pd.cut(df["Total Students"],bins = [0,1000,2000,5000],labels = ["Small (<1000)","Medium (1000-2000)","Large (2000-5000)"],include_lowest = True)
df1 = df.groupby(["School Size"]).mean()
df_school_size =df1.drop(['Total Students','Total School Budget','Per Student Budget'],axis=1)
df_school_size
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type
###Code
df_school_type = df.groupby(["School Type"]).mean()
df_school_type_final =df_school_type.drop(['Total Students','Total School Budget','Per Student Budget'],axis=1)
df_school_type_final
###Output
_____no_output_____
###Markdown
**District Summary*** Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math and reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
#Convert size, scores, and budgets into integers
schools_df["size"]=pd.to_numeric(schools_df["size"])
schools_df["budget"]=pd.to_numeric(schools_df["budget"])
merged_df["size"]=pd.to_numeric(merged_df["size"])
merged_df["budget"]=pd.to_numeric(merged_df["budget"])
merged_df["math_score"]=pd.to_numeric(merged_df["math_score"])
merged_df["reading_score"]=pd.to_numeric(merged_df["reading_score"])
#Calculate the total number of schools
school_count = len(merged_df["school_name"].unique())
#Calculate total number of students
student_count = len(merged_df["Student ID"].unique())
#Calculate the total budget
total_budget = schools_df["budget"].sum()
#Calculate average math score
avg_math = merged_df["math_score"].mean()
#Calculate average reading score
avg_reading = merged_df["reading_score"].mean()
#Calculate the percentage of students with a passing math score (70 or greater)
percent_passing_math = merged_df.loc[merged_df["math_score"]>=70]["math_score"].count()/student_count*100
#Calculate the percentage of students with a passing reading score (70 or greater)
percent_passing_reading = merged_df.loc[merged_df["reading_score"]>=70]["reading_score"].count()/student_count*100
#Calculate overall passing percentage
percent_passing_overall = merged_df[(merged_df["math_score"]>=70)& (merged_df["reading_score"]>=70)]["student_name"].count()/student_count*100
#Create district summary dataframe
#Store values in a dictionary
district_summary = pd.DataFrame({
"Total Schools": [school_count],
"Total Students": [student_count],
"Total Budget": [total_budget],
"Average Math Score": [avg_math],
"Average Reading Score": [avg_reading],
"% Passing Math": [percent_passing_math],
"% Passing Reading": [percent_passing_reading],
"% Passing Overall": [percent_passing_overall]})
#create new dataframe to change format
district_summary = district_summary[["Total Schools",
"Total Students",
"Total Budget",
"Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Passing Overall"]]
#format dataframe
district_summary["Total Students"] = district_summary["Total Students"].map("{:,}".format)
district_summary["Total Budget"] = district_summary["Total Budget"].map("${:,}".format)
district_summary["Average Math Score"] = district_summary["Average Math Score"].map("{:,.2f}".format)
district_summary["Average Reading Score"] = district_summary["Average Reading Score"].map("{:,.2f}".format)
district_summary["% Passing Math"] = district_summary["% Passing Math"].map("{:,.2f}".format)
district_summary["% Passing Reading"] = district_summary["% Passing Reading"].map("{:,.2f}".format)
district_summary["% Passing Overall"] = district_summary["% Passing Overall"].map("{:,.2f}".format)
district_summary
###Output
_____no_output_____
###Markdown
**School Summary** * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math and reading.)* Create a dataframe to hold the above results
###Code
#Group by school
school = merged_df.set_index("school_name").groupby(["school_name"])
#School types
school_type = schools_df.set_index("school_name")["type"]
#Total students per school
students_per_school = school["Student ID"].count()
#Total school budget
school_budget = schools_df.set_index("school_name")["budget"]
#school budget per student
school_size = schools_df.set_index("school_name")["size"]
per_student_budget = school_budget/students_per_school
#Average math score by school
avg_math_by_school = school["math_score"].mean()
#Average reading score by school
avg_reading_by_school = school["reading_score"].mean()
#Percent passing math by school
perc_pass_math_by_school = merged_df.loc[merged_df["math_score"]>=70].groupby("school_name")["Student ID"].count()/school_size*100
#Percent passing reading by school
perc_pass_reading_by_school = merged_df.loc[merged_df["reading_score"]>=70].groupby("school_name")["Student ID"].count()/school_size*100
#Percent passing ovewrall by school
perc_pass_overall_by_school = merged_df[(merged_df["math_score"]>=70)&(merged_df["reading_score"]>=70)].groupby("school_name")["Student ID"].count()/school_size*100
#Create a data frame for the above information
#Store Values in Dictionary
by_school_summary = pd.DataFrame({
"School Type": school_type,
"Total Students": students_per_school,
"Total School Budget": school_budget,
"Per Student Budget": per_student_budget,
"Average Math Score": avg_math_by_school,
"Average Reading Score": avg_reading_by_school,
"% Passing Math": perc_pass_math_by_school,
"% Passing Reading": perc_pass_reading_by_school,
"% Passing Overall": perc_pass_overall_by_school
})
#Dataframe for data munging
by_school_summary = by_school_summary[["School Type",
"Total Students",
"Total School Budget",
"Per Student Budget",
"Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Passing Overall"]]
#format the dataframe
by_school_summary["Total Students"] = by_school_summary["Total Students"].map("{:,}".format)
by_school_summary["Total School Budget"] = by_school_summary["Total School Budget"].map("${:,.2f}".format)
by_school_summary["Per Student Budget"] = by_school_summary["Per Student Budget"].map("{:,.2f}".format)
by_school_summary["Average Math Score"] = by_school_summary["Average Math Score"].map("{:,.2f}".format)
by_school_summary["Average Reading Score"] = by_school_summary["Average Reading Score"].map("{:,.2f}".format)
by_school_summary["% Passing Math"] = by_school_summary["% Passing Math"].map("{:,.2f}".format)
by_school_summary["% Passing Reading"] = by_school_summary["% Passing Reading"].map("{:,.2f}".format)
by_school_summary["% Passing Overall"] = by_school_summary["% Passing Overall"].map("{:,.2f}".format)
by_school_summary
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By %Overall Passing) * Sort and display he five worst-performing schools by % overall passing.
###Code
#Sort by_school_sumamry dataframe
sorting_by_school=by_school_summary.sort_values(["% Passing Overall"], ascending=[False])
#Display top five schools
sorting_by_school.head()
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By % Overall Passing)* Sort and display the five worst-performing schools by % overall passing.
###Code
#Pull the bottom 5 from the above sort
bottom_schools=sorting_by_school.tail()
#Sort the bottom 5 so the worst is at the top
bottom_schools=bottom_schools.sort_values(["% Passing Overall"], ascending=True)
bottom_schools
###Output
_____no_output_____
###Markdown
Math Scores by Grade* Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
#Create a table to store average math score for each grade grouped by school
avg_math_9 = merged_df.loc[merged_df["grade"]== "9th"].groupby("school_name")["math_score"].mean()
avg_math_10 = merged_df.loc[merged_df["grade"]== "10th"].groupby("school_name")["math_score"].mean()
avg_math_11 = merged_df.loc[merged_df["grade"]== "11th"].groupby("school_name")["math_score"].mean()
avg_math_12 = merged_df.loc[merged_df["grade"]== "12th"].groupby("school_name")["math_score"].mean()
#Create dataframe for the math averages by grade
avg_math_by_grade_df= pd.DataFrame({"9th": avg_math_9,
"10th": avg_math_10,
"11th": avg_math_11,
"12th": avg_math_12
})
avg_math_by_grade_df = avg_math_by_grade_df[["9th",
"10th",
"11th",
"12th"]]
#format avg_math_by_grade_df
avg_math_by_grade_df["9th"] = avg_math_by_grade_df["9th"].map("{:,.2f}".format)
avg_math_by_grade_df["10th"] = avg_math_by_grade_df["10th"].map("{:,.2f}".format)
avg_math_by_grade_df["11th"] = avg_math_by_grade_df["11th"].map("{:,.2f}".format)
avg_math_by_grade_df["12th"] = avg_math_by_grade_df["12th"].map("{:,.2f}".format)
avg_math_by_grade_df
###Output
_____no_output_____
###Markdown
Reading Score by Grade* Perform the same operations as above for reading
###Code
#Create a table to store average math score for each grade grouped by school
avg_reading_9 = merged_df.loc[merged_df["grade"]== "9th"].groupby("school_name")["reading_score"].mean()
avg_reading_10 = merged_df.loc[merged_df["grade"]== "10th"].groupby("school_name")["reading_score"].mean()
avg_reading_11 = merged_df.loc[merged_df["grade"]== "11th"].groupby("school_name")["reading_score"].mean()
avg_reading_12 = merged_df.loc[merged_df["grade"]== "12th"].groupby("school_name")["reading_score"].mean()
#Create dataframe for the math averages by grade
avg_reading_by_grade_df= pd.DataFrame({"9th": avg_reading_9,
"10th": avg_reading_10,
"11th": avg_reading_11,
"12th": avg_reading_12
})
avg_reading_by_grade_df = avg_reading_by_grade_df[["9th",
"10th",
"11th",
"12th"]]
#format avg_math_by_grade_df
avg_reading_by_grade_df["9th"] = avg_reading_by_grade_df["9th"].map("{:,.2f}".format)
avg_reading_by_grade_df["10th"] = avg_reading_by_grade_df["10th"].map("{:,.2f}".format)
avg_reading_by_grade_df["11th"] = avg_reading_by_grade_df["11th"].map("{:,.2f}".format)
avg_reading_by_grade_df["12th"] = avg_reading_by_grade_df["12th"].map("{:,.2f}".format)
avg_reading_by_grade_df.index.name = "School"
avg_reading_by_grade_df
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
#create bins
bins = [0, 584.99, 629.99, 644.99, 675]
bin_names= ["<$585", "$585-629", "$630-644", "$645-675"]
#Put schools into bins
spending_bins=pd.cut(by_school_summary["Per Student Budget"], bins, labels=bin_names)
by_school_summary["Spending Per Student"]=spending_bins
bin_groups=by_school_summary.groupby("Spending Per Student")
#Calculations by per student spending bins
average_math= bin_groups["Average Math Score"].mean()
average_reading= bin_groups["Average Reading Score"].mean()
math_pass_perc=bin_groups["% Passing Math"].mean()
reading_pass_perc= bin_groups["% Passing Reading"].mean()
overall_pass_perc=overall_pass_perc = (math_pass_perc + reading_pass_perc)/2
#Create dataframe
scores_by_spending = pd.DataFrame({"Average Math Score": average_math,
"Average Reading Score": average_reading,
"% Passing Math": math_pass_perc,
"% Passing Reading": reading_pass_perc,
"% Passing Overall": overall_pass_perc})
scores_by_spending = scores_by_spending[["Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Passing Overall"]]
scores_by_spending
###Output
_____no_output_____
###Markdown
Scores By School Size * Perform the same operations as above, based on school size.
###Code
#Create bins for school size
bins = [0,999,1999,999999999]
bin_names = ["Small (<1000)", "Medium (1000-2000)" , "Large (>2000)"]
merged_df["School Size"] = pd.cut(merged_df["size"], bins, labels = bin_names)
#group by spending
bin_groups=by_school_summary.groupby("Spending Per Student")
#calculations
average_math= bin_groups["Average Math Score"].mean()
average_reading= bin_groups["Average Reading Score"].mean()
math_pass_perc=bin_groups["% Passing Math"].mean()
reading_pass_perc= bin_groups["% Passing Reading"].mean()
overall_pass_perc=overall_pass_perc = (math_pass_perc + reading_pass_perc)/2
#Create DF
score_by_school_size = pd.DataFrame({
"Average Math Score": avg_math_score,
"Average Reading Score": avg_read_score,
'% Passing Math': pass_math_per,
'% Passing Reading': pass_read_per,
"% Passing Overall": overall_per})
score_by_school_size=score_by_school_size[[
"Average Math Score",
"Average Reading Score",
'% Passing Math',
'% Passing Reading',
"% Passing Overall"
]]
score_by_school_size
###Output
_____no_output_____
###Markdown
Scores by School Type* Perform the same operations as above, based on school type
###Code
#Group by Type
scores_by_type=by_school_summary.groupby("School Type")
#calculations
mean_math = scores_by_type["Average Math Score"].mean()
mean_read = scores_by_type["Average Reading Score"].mean()
pass_math_perc = scores_by_type["% Passing Math"].mean()
pass_read_perc = scores_by_type["% Passing Math"].mean()
overall_pass_perc = (pass_math_perc + pass_read_perc)/2
# df build
scores_by_type = pd.DataFrame({
"Average Math Score": mean_math,
"Average Reading Score": mean_read,
'% Passing Math': pass_math_perc,
'% Passing Reading': pass_read_perc,
"% Passing Overall": overall_pass_perc})
#reorder columns
scores_by_type = scores_by_type[[
"Average Math Score",
"Average Reading Score",
'% Passing Math',
'% Passing Reading',
"% Passing Overall"
]]
scores_by_type.index.name = "Type of School"
scores_by_type
###Output
_____no_output_____
###Markdown
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
# Calculate the Totals (School and Students)
school_count = len(school_data_complete["school_name"].unique())
student_count = school_data_complete["Student ID"].count()
# Total Budget
total_budget = school_data["budget"].sum()
# Average Scores
ave_math_score = school_data_complete["math_score"].mean()
ave_reading_score = school_data_complete["reading_score"].mean()
overall_passing_rate = (ave_math_score + ave_reading_score) / 2
# Percentage Pass Rate
passing_math_count = school_data_complete[(school_data_complete["math_score"] >= 70)].count()["student_name"]
passing_math_percentage = passing_math_count / float(student_count) * 100
passing_reading_count = school_data_complete[(school_data_complete["reading_score"] >= 70)].count()["student_name"]
passing_reading_percentage = passing_reading_count / float(student_count) * 100
# Data Cleanup
district_summary = pd.DataFrame({"Total Schools": [school_count],
"Total Students": [student_count],
"Total Budget": [total_budget],
"Average Math Score": [ave_math_score],
"Average Reading Score": [ave_reading_score],
"% Passing Math": [passing_math_percentage],
"% Passing Reading": [passing_reading_percentage],
"% Overall Passing Rate": [overall_passing_rate]})
district_summary = district_summary[["Total Schools", "Total Students", "Total Budget",
"Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Overall Passing Rate"]]
district_summary["Total Students"] = district_summary["Total Students"].map("{:,}".format)
district_summary["Total Budget"] = district_summary["Total Budget"].map("${:,.2f}".format)
# Display the data frame
district_summary
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two) * Create a dataframe to hold the above results
###Code
# School Type
school_types = school_data.set_index(["school_name"])["type"]
# Calculate the total student count
per_school_counts = school_data_complete["school_name"].value_counts()
# Calculate the total school budget and per capita spending
# per_school_budget = school_data_complete.groupby(["school_name"]).mean()["budget"]
per_school_budget = school_data_complete.groupby(["school_name"]).mean()["budget"]
per_school_capita = per_school_budget / per_school_counts
# Calculate the average test scores
per_school_math = school_data_complete.groupby(["school_name"]).mean()["math_score"]
per_school_reading = school_data_complete.groupby(["school_name"]).mean()["reading_score"]
# Calculate the passing scores by creating a filtered data frame
school_passing_math = school_data_complete[(school_data_complete["math_score"] >= 70)]
school_passing_reading = school_data_complete[(school_data_complete["reading_score"] >= 70)]
per_school_passing_math = school_passing_math.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100
per_school_passing_reading = school_passing_reading.groupby(["school_name"]).count()["student_name"] / per_school_counts * 100
overall_passing_rate = (per_school_passing_math + per_school_passing_reading) / 2
# Convert to data frame
per_school_summary = pd.DataFrame({"School Type": school_types,
"Total Students": per_school_counts,
"Total School Budget": per_school_budget,
"Per Student Budget": per_school_capita,
"Average Math Score": per_school_math,
"Average Reading Score": per_school_reading,
"% Passing Math": per_school_passing_math,
"% Passing Reading": per_school_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data munging
per_school_summary = per_school_summary[["School Type", "Total Students", "Total School Budget", "Per Student Budget",
"Average Math Score", "Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
per_school_summary["Total School Budget"] = per_school_summary["Total School Budget"].map("${:,.2f}".format)
per_school_summary["Per Student Budget"] = per_school_summary["Per Student Budget"].map("${:,.2f}".format)
# Display the data frame
per_school_summary
###Output
_____no_output_____
###Markdown
Top Performing Schools (By Passing Rate) * Sort and display the top five schools in overall passing rate
###Code
# Sort and show top five schools
top_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=False)
top_schools.head(5)
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By Passing Rate) * Sort and display the five worst-performing schools
###Code
# Sort and show bottom five schools
bottom_schools = per_school_summary.sort_values(["% Overall Passing Rate"], ascending=True)
bottom_schools.head(5)
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
# Create data series of scores by grade levels using conditionals
ninth_graders = school_data_complete[(school_data_complete["grade"] == "9th")]
tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")]
eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")]
twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")]
# Group each by school name
ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["math_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"]
# Combine series into single data frame
scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
# Minor data munging
scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]]
scores_by_grade.index.name = None
# Display the data frame
scores_by_grade
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# Create data series of scores by grade levels using conditionals
ninth_graders = school_data_complete[(school_data_complete["grade"] == "9th")]
tenth_graders = school_data_complete[(school_data_complete["grade"] == "10th")]
eleventh_graders = school_data_complete[(school_data_complete["grade"] == "11th")]
twelfth_graders = school_data_complete[(school_data_complete["grade"] == "12th")]
# Group each by school name
ninth_graders_scores = ninth_graders.groupby(["school_name"]).mean()["reading_score"]
tenth_graders_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"]
eleventh_graders_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"]
twelfth_graders_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"]
# Combine series into single data frame
scores_by_grade = pd.DataFrame({"9th": ninth_graders_scores, "10th": tenth_graders_scores,
"11th": eleventh_graders_scores, "12th": twelfth_graders_scores})
# Minor data munging
scores_by_grade = scores_by_grade[["9th", "10th", "11th", "12th"]]
scores_by_grade.index.name = None
# Display the data frame
scores_by_grade
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$585-615", "$615-645", "$645-675"]
# Categorize the spending based on the bins
per_school_summary["Spending Ranges (Per Student)"] = pd.cut(per_school_capita, spending_bins, labels=group_names)
spending_math_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"]
spending_reading_scores = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"]
spending_passing_math = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"]
spending_passing_reading = per_school_summary.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"]
overall_passing_rate = (spending_passing_math + spending_passing_reading) / 2
# Assemble into data frame
spending_summary = pd.DataFrame({"Average Math Score" : spending_math_scores,
"Average Reading Score": spending_reading_scores,
"% Passing Math": spending_passing_math,
"% Passing Reading": spending_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data munging
spending_summary = spending_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
# Display results
spending_summary
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# Categorize the spending based on the bins
per_school_summary["School Size"] = pd.cut(per_school_summary["Total Students"], size_bins, labels=group_names)
# Calculate the scores based on bins
size_math_scores = per_school_summary.groupby(["School Size"]).mean()["Average Math Score"]
size_reading_scores = per_school_summary.groupby(["School Size"]).mean()["Average Reading Score"]
size_passing_math = per_school_summary.groupby(["School Size"]).mean()["% Passing Math"]
size_passing_reading = per_school_summary.groupby(["School Size"]).mean()["% Passing Reading"]
overall_passing_rate = (size_passing_math + size_passing_reading) / 2
# Assemble into data frame
size_summary = pd.DataFrame({"Average Math Score" : size_math_scores,
"Average Reading Score": size_reading_scores,
"% Passing Math": size_passing_math,
"% Passing Reading": size_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data munging
size_summary = size_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math", "% Passing Reading",
"% Overall Passing Rate"]]
# Display results
size_summary
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type.
###Code
# Type | Average Math Score | Average Reading Score | % Passing Math | % Passing Reading | % Overall Passing Rate
type_math_scores = per_school_summary.groupby(["School Type"]).mean()["Average Math Score"]
type_reading_scores = per_school_summary.groupby(["School Type"]).mean()["Average Reading Score"]
type_passing_math = per_school_summary.groupby(["School Type"]).mean()["% Passing Math"]
type_passing_reading = per_school_summary.groupby(["School Type"]).mean()["% Passing Reading"]
overall_passing_rate = (type_passing_math + type_passing_reading) / 2
# Assemble into data frame
type_summary = pd.DataFrame({"Average Math Score" : type_math_scores,
"Average Reading Score": type_reading_scores,
"% Passing Math": type_passing_math,
"% Passing Reading": type_passing_reading,
"% Overall Passing Rate": overall_passing_rate})
# Minor data munging
type_summary = type_summary[["Average Math Score",
"Average Reading Score",
"% Passing Math",
"% Passing Reading",
"% Overall Passing Rate"]]
# Display results
type_summary
###Output
_____no_output_____
###Markdown
PyCity Schools Analysis* As a whole, schools with higher budgets, did not yield better test results. By contrast, schools with higher spending per student actually ($645-$675) underperformed compared to schools with smaller budgets (<$585 per student).* As a whole, smaller and medium sized schools dramatically out-performed large sized schools on passing math performances (89-91% passing vs 67%).* As a whole, charter schools out-performed the public district schools across all metrics. However, more analysis will be required to glean if the effect is due to school practices or the fact that charter schools tend to serve smaller student populations per school. ---
###Code
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load
school_data = "Resources/schools_complete.csv"
student_data = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data)
student_data = pd.read_csv(student_data)
# Combine the data into a single dataset
schl_data = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
schl_data.head()
###Output
_____no_output_____
###Markdown
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
###Code
# total number of schools
total_dist_schools = schl_data['School ID'].nunique()
# total number of students
total_dist_students = schl_data['Student ID'].nunique()
# total budget
total_dist_budget = (schl_data['budget']/schl_data['size']).sum()
# average math score
avg_dist_math = schl_data.math_score.mean()
# average reading score
avg_dist_reading = schl_data.reading_score.mean()
# % passing math
dist_pass_math = ((schl_data['math_score'] >= 70).sum() / total_dist_students) * 100
# % passing reading
dist_pass_reading = ((schl_data['reading_score'] >= 70).sum() / total_dist_students) * 100
# overall passing rate
dist_overall_pass = (dist_pass_math + dist_pass_reading) / 2
dist_summary = pd.DataFrame({'Total Schools': [total_dist_schools],
'Total Students': [total_dist_students],
'Total Budget': [total_dist_budget],
'Average Math Score': [avg_dist_math],
'Average Reading Score': [avg_dist_reading],
'% Passing Math': [dist_pass_math],
'% Passing Reading': [dist_pass_reading],
'% Overall Passing Rate': [dist_overall_pass]})
dist_summary
###Output
_____no_output_____
###Markdown
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two) * Create a dataframe to hold the above results
###Code
# set up
schl_summary = schl_data
unique_schl = schl_summary.drop_duplicates(subset = 'School ID', keep = 'first')
index_schl = unique_schl.set_index(['school_name'])
# school type
index_schl['School Type'] = index_schl['type']
# total students
index_schl['Total Students'] = index_schl['size']
# total school budget
index_schl['School Budget'] = index_schl['budget']
# per student budget
index_schl['Budget Per Student'] = (index_schl['budget'])/(index_schl['size'])
# avg math score
index_schl['Average Math Score'] = schl_summary.groupby(['school_name']).math_score.mean()
# avg reading score
index_schl['Average Reading Score'] = schl_summary.groupby(['school_name']).reading_score.mean()
# % passing math
num_math = schl_summary[schl_summary['math_score'] >= 70]
math_schl = num_math.groupby(['school_name']).count()['Student ID']
index_schl['Percent Passing Math'] = (math_schl/(index_schl['size'])) * 100
# % passing reading
num_reading = schl_summary[schl_summary['reading_score'] >= 70]
reading_schl = num_reading.groupby(['school_name']).count()['Student ID']
index_schl['Percent Passing Reading'] = (reading_schl/(index_schl['size'])) * 100
# overall passing
index_schl['Overall Passing Rate'] = (((math_schl/(index_schl['size'])) * 100)+((reading_schl/(index_schl['size'])) * 100)) / 2
schl_summ = index_schl[['School Type', 'Total Students', 'School Budget', 'Budget Per Student', 'Average Math Score', 'Average Reading Score', 'Percent Passing Math', 'Percent Passing Reading', 'Overall Passing Rate']]
schl_summ
###Output
_____no_output_____
###Markdown
Top Performing Schools (By Passing Rate) * Sort and display the top five schools in overall passing rate
###Code
schl_summ.sort_values(by = ['Overall Passing Rate'], ascending = False).head()
###Output
_____no_output_____
###Markdown
Bottom Performing Schools (By Passing Rate) * Sort and display the five worst-performing schools
###Code
schl_summ.sort_values(by = ['Overall Passing Rate']).head()
###Output
_____no_output_____
###Markdown
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
###Code
# ninth
nine = schl_data.loc[schl_data['grade'] == '9th']
math9 = nine.groupby('school_name')['math_score'].mean()
# tenth
ten = schl_data.loc[schl_data['grade'] == '10th']
math10 = ten.groupby('school_name')['math_score'].mean()
# eleventh
eleven = schl_data.loc[schl_data['grade'] == '11th']
math11 = eleven.groupby('school_name')['math_score'].mean()
# twenfth
twelve = schl_data.loc[schl_data['grade'] == '12th']
math12 = twelve.groupby('school_name')['math_score'].mean()
math_grades = pd.DataFrame({'9th':math9,
'10th':math10,
'11th':math11,
'12th':math12})
math_grades
###Output
_____no_output_____
###Markdown
Reading Score by Grade * Perform the same operations as above for reading scores
###Code
# ninth
nine = schl_data.loc[schl_data['grade'] == '9th']
read9 = nine.groupby('school_name')['reading_score'].mean()
# tenth
ten = schl_data.loc[schl_data['grade'] == '10th']
read10 = ten.groupby('school_name')['reading_score'].mean()
# eleventh
eleven = schl_data.loc[schl_data['grade'] == '11th']
read11 = eleven.groupby('school_name')['reading_score'].mean()
# twenfth
twelve = schl_data.loc[schl_data['grade'] == '12th']
read12 = twelve.groupby('school_name')['reading_score'].mean()
reading_grades = pd.DataFrame({'9th':read9,
'10th':read10,
'11th':read11,
'12th':read12})
reading_grades
###Output
_____no_output_____
###Markdown
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
###Code
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$586-615", "$616-645", "$645-675"]
score_spending = schl_summ[['Average Math Score', 'Average Reading Score', "Percent Passing Math", "Percent Passing Reading", 'Overall Passing Rate']].groupby(pd.cut(schl_summ["Budget Per Student"], bins = spending_bins, labels = group_names)).mean()
score_spending
###Output
_____no_output_____
###Markdown
Scores by School Size * Perform the same operations as above, based on school size.
###Code
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
size_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
score_size = schl_summ[['Average Math Score', 'Average Reading Score', 'Percent Passing Math', 'Percent Passing Reading', 'Overall Passing Rate']].groupby(pd.cut(schl_summ['Total Students'], bins = size_bins, labels = size_names)).mean()
score_size
###Output
_____no_output_____
###Markdown
Scores by School Type * Perform the same operations as above, based on school type.
###Code
type_bins = [0, 1, 2]
type_names = ['District', 'Charter']
schl_typ = schl_summ
schl_typ['School Type'] = schl_summ['School Type'].replace({'Charter': 1, 'District':2})
score_type = schl_typ[['Average Math Score', 'Average Reading Score', 'Percent Passing Math', 'Percent Passing Reading', 'Overall Passing Rate']].groupby(pd.cut(schl_typ['School Type'], bins = type_bins, labels = type_names)).mean()
score_type
###Output
_____no_output_____ |
.ipynb_checkpoints/explore_embeddings-checkpoint.ipynb | ###Markdown
loading node2vec embeddings
###Code
with open('gae/data/saved/node2vec/disease_weighted.emb') as f:
embeddings = f.readlines()[1:]
embeddings.sort(key=lambda l: int(l.replace('\n', '').split(' ')[0]))
embeddings = [[float(n) for n in l.split(' ')] for l in embeddings]
node_list = [e[0] for e in embeddings]
embeddings = [e[1:] for e in embeddings]
embedding_matrix = np.stack(embeddings)
embedding_matrix.shape
###Output
_____no_output_____
###Markdown
loading gmvae embeddings
###Code
z = np.load('gae/data/saved/disease_network_z.npy')
z.shape
with open('gae/data/diseasome/node_list.txt') as f:
node_order = f.readlines()
node_order = [int(n.replace('\n', '')) for n in node_order]
# sorted z
z = np.stack(sorted(zip(node_order, z.tolist()), key=lambda x: x[0]))
z = np.stack(list(zip(*z))[1])
###Output
_____no_output_____
###Markdown
Dataframe
###Code
import pandas as pd
df = pd.DataFrame()
df['node2vec'] = embedding_matrix.tolist()
df['gmvae'] = z.tolist()
df = df.set_index(pd.Series(node_list))
df.head()
###Output
_____no_output_____
###Markdown
tsne-plot
###Code
from sklearn.manifold import TSNE
embeddings_tsne = TSNE(n_components=2).fit_transform(embedding_matrix)
embeddings_tsne.shape
plt.figure(figsize=(16, 10))
sns.scatterplot(
x=embeddings_tsne[:, 0], y=embeddings_tsne[:, 1],
palette=sns.color_palette("hls", 10),
legend='full',
alpha=0.5,
color='red'
)
###Output
_____no_output_____ |
Jupyter_Intro.ipynb | ###Markdown
Jupyter Notebook* Possible to write MarkdownI can write also chemical formulas in $L^{A}T_{E}X$ and $Al_2O_3$Formatting in __bold__ and *italic*.
###Code
# Since this is running on a python 3 kernel, we can comment our code.
a=2
b=3
a+b
name=input("Enter your name.")
print("Hello", name)
import numpy as numpy
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
#plot setup---------------------------------
fig=plt.figure(figsize=(5,3), dpi=200)
plt.style.use('default')
mpl.rcParams.update({'font.size': 10})
#-------------------------------------------
x=numpy.linspace(0,10,1000)
y=numpy.sin(x)
plt.plot(x,y,'r-', label="sine")
y_prime=numpy.gradient(y)
plt.plot(x,y_prime*100, 'b-', label="1st derivative of sine") #scale factor of 100
plt.plot(x-numpy.pi/2,y,'k--', label="cosine")
plt.xlabel("Time, s")
plt.ylabel("Intensity, a.u.")
plt.legend(frameon=False)
plt.show()
###Output
_____no_output_____
###Markdown
Challenge: Vectors from two devices (i.e. Mass Spectrometer and Temperature controller) how do you synchronize those two?
###Code
from scipy.interpolate import griddata
time_T=[0,10,20,30,40,50,60]
Temperature=numpy.array([25,25,50,75,100,125,150])
time_MS=numpy.linspace(0,60,200)
MassSpec=numpy.linspace(1e-6,2e-6,200)+numpy.random.rand(200)*1e-7
#plt.plot(time_T,Temperature)
plt.plot(time_MS, MassSpec)
plt.show()
###Output
_____no_output_____ |
ml_workflow_notebook_template_v01.ipynb | ###Markdown
1. GoalsState your Goals:y choice goals: - What is your y, what are you trying to predict?- Where are you? What are you looking at?- Where do you want to be? What are you looking for?- How will you get there (to where you want to be)?- vs. "solving a user problem"...it could be translated into this, but often is not.What are your end-goals, even if there are also intermediate goals along the way?Model Choice Goals:Also, do you have any goals related to model size or model 'explainability' or model flexibility etc? Are the constraints on what kind of model you will, can, or want to use?
###Code
###Output
_____no_output_____
###Markdown
*E.g. The goal is to predict the species of iris.* 2. Standards, Best Practice, and Terminology 3. Hardware & SoftwarePick hardware and software on which to run: *E.g. the solution should not require special hardware, or should run in a colab-notebook. Standard desktop operating systems (windows, linux, maxOS) should suffice. in AWS, no specific (EC2) instance is needed* 4. Environment(s)Set up your Virtual Environments, Kernels, etc. *the python environment will be a venv environment* (in a docker container) 5. Software LibrariesImport your (main, initial) libraries noting versions: - keep track of what versions you are using and what versions you need of packages- best practice: document what all of your software package and library versions are - os - python - conda - pandas - etc.- maybe create a bundle of that software to be used later by other people *E.g. the solution will us linux, python, venv, sklearn, docker, AWS, and notebooks* 6. Get DataGet files/data sets/ etc - obtaining data: source, scraping- raw data: issues, file formats- loaded data: dataframe, arrays, database 7. EDA: Exploratory Data Analysis Initial Exploration 1: first observations, 8. Cleaning & Wrangle Data - 90% of time will be spent cleaning data) 9. Formatting, "feature engineering," etc. 10. Features & Focus- What is y (what you will predict) 11. HypothesisArticulate your Hypothesis? What is your Null Hypothesis? 12. Baseline PredictionEstablis 13. Split your Dataset: Make your sets: Train, Val, & Test sets- issue: random or time based split- issue: split ratio 14. "Wrangle" Using a Functionmake sure the data processing steps that you did are standardized for easy repetition and application to your split datasets(for all: train,val,test) 15. Family of Variables(Make) Your Family of Variables 16. Try and Compare Suit of ModelsPick what models you will try running. Try and compare multiple types of models if possible. Try to 'argue' why you have picked and not picked types of models based on your situation. https://www.datasciencecentral.com/profiles/blogs/40-techniques-used-by-data-scientists
###Code
###Output
_____no_output_____
###Markdown
17. Pre-Pipeline phaseSome say: Before doing a pipeline do a model without the pipeline with each step separately for debugging.
###Code
###Output
_____no_output_____ |
gp_from_scratch.ipynb | ###Markdown
Gaussian Process Algorithm from Scratch
###Code
import numpy as np
from scipy.optimize import minimize
from scipy.linalg import cholesky, cho_solve
import sys
sys.path.insert(0, '/home/emmanuel/Drives/erc/code/kernellib')
sys.path.insert(0,'/Users/eman/Documents/code_projects/kernellib/')
from kernellib.kernels import ard_kernel
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
# Training data is 11 points in [0,1] inclusive regularly spaced# Traini
x_train = np.linspace(0, 1, 11).reshape(-1, 1)
# True function is sin(2*pi*x) with Gaussian noise
y_train = np.sin(x_train * (2 * np.pi)) + np.random.randn(x_train.shape[0], 1) * 0.2
y_train = np.squeeze(y_train)
x_test = np.linspace(0, 1, 51).reshape(-1, 1)
print(x_train.shape, y_train.shape, x_test.shape)
kernel = 'rbf'
jitter = 1e-9
random_state = 123
init_signal_variance = 1.0
init_length_scale = 1.0
init_likelihood_variance = 0.1
theta0 = np.array([init_signal_variance,
init_likelihood_variance,
init_likelihood_variance])
bounds = ((1e-7, 1e7), (1e-7, 1e7), (1e-7, 1e7))
from sklearn.gaussian_process.kernels import _check_length_scale
from scipy.spatial.distance import pdist, cdist, squareform
def ard_kernel(X, Y=None, length_scale=None, eval_gradient=False):
# Determine if the kernel is isotropic or not
anisotropic = np.iterable(length_scale) and len(length_scale) > 1
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if not anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
from sklearn.base import BaseEstimator, RegressorMixin
from scipy.optimize import fmin_l_bfgs_b
from scipy.linalg import cholesky, cho_solve, solve_triangular
from sklearn.utils.validation import check_X_y, check_array
from sklearn.gaussian_process.kernels import (_check_length_scale,
RBF, WhiteKernel, ConstantKernel)
from scipy.spatial.distance import pdist, cdist, squareform
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
def __init__(self, kernel='rbf', jitter=1e-10, random_state=None):
self.kernel = kernel
self.jitter = jitter
self.random_state = random_state
# Initialize heuristics
self.signal_variance = 1.0
self.likelihood_variance = 0.1
def fit(self, X, y):
# Check inputs
X, y = check_X_y(X, y)
self.X_train_ = X
self.y_train_ = y
# Determine Length_scale type
if self.kernel == 'rbf':
init_length_scale = 1.0
elif self.kernel == 'ard':
init_length_scale = np.ones(X.shape[1])
else:
raise ValueError('Unrecognised kernel.')
# Initial HyperParameters
theta0 = np.array([self.signal_variance,
self.likelihood_variance,
init_length_scale])
bounds = ((1e-5, 1e5), (1e-5, 1e5), (1e-5, 1e5))
# Gradient Descent (Negative Log Marginal Likelihood)
best_params = minimize(self.neg_log_marginal_likelihood,
x0=theta0, args=(), method='L-BFGS-B',
bounds=bounds, jac=True)
print(best_params)
# Get the best parameters
self.signal_variance, self.length_scale, self.likelihood_variance = \
self._get_hyperparams(best_params.x)
self.best_neg_log_likelihood = best_params.fun
self.marginal_likelihood = np.exp(-best_params.fun)
# Precompute Prediction quantities
print(self.length_scale, self.signal_variance, self.likelihood_variance)
self.kernel_ = self.set_kernel(length_scale=self.length_scale,
signal_variance=self.signal_variance,
likelihood_variance=self.likelihood_variance)
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.jitter
self.L_ = cholesky(K, lower=True)
if self.y_train_.ndim == 1:
y_train = self.y_train_[:, np.newaxis]
self.weights_ = cho_solve((self.L_, True), y_train.squeeze())
return self
def set_kernel(self, length_scale=None,
signal_variance=None,
likelihood_variance=None):
# Determine Kernel Type
if hasattr(self, 'length_scale'):
length_scale = self.length_scale
elif length_scale is None:
length_scale = 1.0
if hasattr(self, 'signal_variance'):
signal_variance = self.signal_variance
elif signal_variance is None:
signal_variance = 1.0
if hasattr(self, 'likelihood_variance'):
likelihood_variance = self.likelihood_variance
elif likelihood_variance is None:
likelihood_variance = 0.1
kernel = ConstantKernel(constant_value=signal_variance) \
* RBF(length_scale=length_scale) \
+ WhiteKernel(noise_level=likelihood_variance)
return kernel
def K(self, X, Y=None, length_scale=1.0, signal_variance=1.0,
likelihood_variance=0.01, eval_gradient=False):
"""Standard Kernel
K(x, x) = nu * exp(||x-y||^2) + noise * delta
Parameters
----------
X : array, (n_samples x d_dimensions)
Y : array, (n_samples x 1)
length_scale : float, array (1) or (d_dimensions)
signal_variance : float
likelihood_variance : float
eval_gradient : bool
Returns
-------
"""
kernel = ConstantKernel(constant_value=signal_variance) \
* RBF(length_scale=length_scale) \
+ WhiteKernel(noise_level=likelihood_variance)
if eval_gradient:
return kernel(X, Y, eval_gradient=True)
else:
return kernel(X, Y)
def predict(self, X, return_std=False, return_cov=False):
X = check_array(X)
if not hasattr(self, "X_train_"):
raise ValueError('Not fitted yet...')
K_trans = self.kernel_(X, self.X_train_)
y_mean = np.dot(K_trans, self.weights_)
if return_std:
return y_mean, np.sqrt(self.predictive_variance(X, K_trans))
elif return_cov:
return y_mean, self.predictive_covariance(X, K_trans)
else:
return y_mean
def predictive_covariance(self, X, K_trans=None):
X = check_array(X)
if K_trans is None:
K_trans = self.kernel_(X, self.X_train_)
v = cho_solve((self.L_, True), K_trans.T)
covariance = self.kernel_(X) - np.dot(K_trans, v)
return covariance
def predictive_variance(self, X, K_trans=None):
X = check_array(X)
if K_trans is None:
K_trans = self.kernel_(X, self.X_train_)
# Compute K_inv of K from it's cholesky decomposition
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = np.dot(L_inv, L_inv.T)
# Compute variance of predictive distribution
variance = self.kernel_.diag(X)
variance -= np.einsum("ij,ij->i", np.dot(K_trans, K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = variance < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
variance[y_var_negative] = 0.0
return variance
def _get_hyperparams(self, theta):
signal_variance = theta[0]
likelihood_variance = theta[1]
length_scale = theta[2:]
return signal_variance, length_scale, likelihood_variance
def neg_log_marginal_likelihood(self, theta):
# Unpack theta parameters
signal_variance, length_scale, likelihood_variance = \
self._get_hyperparams(theta)
# Get kernel
K, K_gradient = self.K(x_train,
length_scale=length_scale,
signal_variance=signal_variance,
likelihood_variance=likelihood_variance,
eval_gradient=True)
# Add the jitter term
K[np.diag_indices_from(K)] += self.jitter
# Solve
try:
L = cholesky(K, lower=True)
except np.linalg.LinAlgError:
return -np.inf, np.zeros_like(theta)
# multi-dimensional output of y_train
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
weights = cho_solve((L, True), y_train)
# -----------------------------
# Log likelihood
# -----------------------------
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, weights)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= (K.shape[0] / 2) * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1)
# ----------------------------
# Log Likelihood Gradient
# ----------------------------
prefactor = np.einsum("ik,jk->ijk", weights, weights)
prefactor -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", prefactor, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
return -log_likelihood, -log_likelihood_gradient
gp_model = GaussianProcessRegressor(kernel='rbf')
gp_model.fit(x_train, y_train);
y_pred = gp_model.predict(x_test)
y_pred, y_err = gp_model.predict(x_test, return_std=True)
def ax_plot_sklearn(ax, y_pred, title):
# get the condifence intervals
lower, upper = y_pred - y_err, y_pred + y_err
# plot the training data
ax.plot(x_train, y_train, 'r*')
# plot the predictive mean
ax.plot(x_test, y_pred, 'b')
# plot the confidence bounds
ax.fill_between(x_test.squeeze(), lower.squeeze(), upper.squeeze(), alpha=0.5, color='orange')
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Mean', 'Confidence'])
ax.set_title(title)
return None
f, ax = plt.subplots(1, 1, figsize=(7,5))
ax_plot_sklearn(ax, y_pred, 'Classical')
###Output
_____no_output_____ |
theory/GridSearch.ipynb | ###Markdown
Tuning Parameters w GridSearchCVFull text [here](https://github.com/justmarkham/scikit-learn-videos/blob/master/08_grid_search.ipynb)
###Code
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
import matplotlib.pyplot as plt
%matplotlib inline
iris = load_iris()
X = iris.data
y = iris.target
# search for an optimal value of K for KNN
k_range = list(range(1, 31))
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy')
k_scores.append(scores.mean())
print(k_scores)
# plot the value of K for KNN (x-axis) versus the cross-validated accuracy (y-axis)
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
###Output
_____no_output_____
###Markdown
Parameter Tuning with GridSearchCVMore efficient way to perform cross-validation
###Code
from sklearn.grid_search import GridSearchCV
# define the parameter values that should be searched
k_range = list(range(1, 31))
param_grid = {"n_neighbors": k_range}
grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy')
grid.fit(X, y)
# view the complete results (list of named tuples)
grid.grid_scores_
# examine the first tuple
print(grid.grid_scores_[0].parameters)
print(grid.grid_scores_[0].cv_validation_scores)
print(grid.grid_scores_[0].mean_validation_score)
# create a list of the mean scores only
grid_mean_scores = [result.mean_validation_score for result in grid.grid_scores_]
print(grid_mean_scores)
# plot the results
plt.plot(k_range, grid_mean_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
# examine the best model
print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_)
###Output
0.98
{'n_neighbors': 13}
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=13, p=2,
weights='uniform')
###Markdown
Searching multiple parameters simultaneously
###Code
# define the tuning parameter values
k_range = list(range(1, 31))
weight_options = ['uniform', 'distance']
# create a parameter grid
param_grid = dict(n_neighbors=k_range, weights=weight_options)
print(param_grid)
# instantiate and fit the grid
grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy')
grid.fit(X, y)
# examine the best model
print(grid.best_score_)
print(grid.best_params_)
###Output
0.98
{'n_neighbors': 13, 'weights': 'uniform'}
###Markdown
Now you know the best parameters, retrain your model! RandomizedSearchCV* Searches a subset of the parameters, and you control the computational "budget"* Less computationally demanding Basically it randomly tries out different values in a range
###Code
from sklearn.grid_search import RandomizedSearchCV
# specify "parameter distributions" rather than a "parameter grid"
param_dist = dict(n_neighbors=k_range, weights=weight_options)
###Output
_____no_output_____
###Markdown
**Important:** Specify a continuous distribution (rather than a list of values) for any continous param
###Code
# n_iter controls the number of searches
rand = RandomizedSearchCV(knn, param_dist, cv=10, scoring='accuracy', n_iter=10, random_state=5)
rand.fit(X, y)
rand.grid_scores_
# examine the best model
print(rand.best_score_)
print(rand.best_params_)
# "_" is a general purpose "throwaway" variable name to indicate that part of a
# function result is being deliberately ignored, as in code like:
#label, has_label, _ = text.partition(':')
# run RandomizedSearchCV 20 times (with n_iter=10) and record the best score
best_scores = []
for _ in range(20):
rand = RandomizedSearchCV(knn, param_dist, cv=10, scoring='accuracy', n_iter=10)
rand.fit(X, y)
best_scores.append(round(rand.best_score_, 3))
print(best_scores)
###Output
[0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.973, 0.98, 0.98, 0.973, 0.98, 0.973]
|
GPR_scikitlearn_practice.ipynb | ###Markdown
**Import necessary libraries:**
###Code
import matplotlib.pyplot as plt
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.metrics import r2_score
###Output
_____no_output_____
###Markdown
**Define a function:**
###Code
def f(x):
return 200+x*x+x*np.sin(x)-5*(x**1.4)
###Output
_____no_output_____
###Markdown
**Plot the function:**
###Code
xPlot = np.arange(10,30.1,0.1, dtype=float)
yPlot = f(xPlot)
plt.plot(xPlot, yPlot)
plt.show()
###Output
_____no_output_____
###Markdown
**Produce data using above defined function to practice GPR and generate a benchmark:**
###Code
np.random.seed(1)
xData = 10+20*np.random.rand(100)
yData = f(xData)
plt.scatter(xData, yData)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare data for scikitlearn functions:** Input array should be a 2d array, rows representing each data and columns each feature dimension *atleast_2d function converts 1d array to 2d and ravel does the reverse by placing/removing outer brackets*
###Code
xGPR = np.atleast_2d(xData).T
yGPR = f(xGPR).ravel()
###Output
_____no_output_____
###Markdown
**GPR model definitions:**- Kernels are usually defined as a multiplication of a constant kernel with RBF kernel. White kernels are also used for automatic noise level generation.- First input is length scale and second input is lower and upper bounds of length scale.- If length scale is an array, it defines length scale for each input feature dimension. *Example kernel with 2 input features: kernel = C(1.0, (1e-3, 1e3)) * RBF([5,5], (1e-2, 1e2))*
###Code
kernel = C(1.0, (1e-3,1e3))*RBF(10, (1e-3, 1e4))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=5, normalize_y=True, alpha=1e-3).fit(xGPR,yGPR)
###Output
_____no_output_____
###Markdown
**Check the final kernel parameters and R^2:**
###Code
print(gp.kernel_)
###Output
31.6**2 * RBF(length_scale=2.34)
###Markdown
**Check the predictions with some test inputs:**
###Code
xTest = np.atleast_2d(xPlot).T
yTest, MSE = gp.predict(xTest, return_std=True)
print('R2 =',r2_score(yPlot, yTest))
plt.plot(xPlot, yPlot, label='Test data')
plt.plot(xPlot, yTest.ravel(), label='Predictions')
plt.legend()
plt.show()
###Output
R2 = 0.9999999617621265
|
dev/Cs/.ipynb_checkpoints/Cs-checkpoint.ipynb | ###Markdown
Get SOsTransform Psi4-SOs to Cfour-SOs
###Code
Ls=wf.aotoso()
print(Ls.shape)
# Psi4 MOs in SO basis
C_SO=wf.Ca()
#Cb=np.array(wf.Cb())
C_SO.shape
###Output
((20, 13), (20, 7))
###Markdown
* Each AO can contribute only once, if at all.* The first AO is relevant; the following are on symmetry-equivalent atoms.* The mapping of the SOs is the arg-sorted Cfour-mapped first-AO list. * Create the first-AO list in Psi4 AO-order. * Create the first-AO list in Cfour AO-order. * Use `np.argsort` to find the so_c2p mapping. * Invert to find the so_p2c mapping of the Psi4-MO vectors.
###Code
irrep_lst = []
for isym in range(ptgr.order()):
print(f'\nSymmetry {isym}')
SOs=SymOrbs(Ls.nph[isym], order=wf.nirrep())
SOs.print()
print('Psi4 AO-order:', SOs.first_AOs())
cfour_first_AOs = p2c_map[SOs.first_AOs()]
print('Cfour AO-order:', cfour_first_AOs)
so_c2p = np.argsort(cfour_first_AOs)
print('Cfour argsorted', so_c2p)
so_p2c=so_c2p[so_c2p]
print('And inverted ', so_p2c)
scale=SOs.inv_coef()
print('scale', np.round(scale,3))
C=psi4_to_c4(C_SO.nph[isym], so_p2c, scale, use_scale=True)
irrep_lst.append(C)
C_SOr = psi4.core.Matrix.from_array(irrep_lst)
C_SOr.shape
#BASIS='PVDZ'
C4_cs = read_oldmos('../Cfour/SYM/Cs/OLDMOS.'+BASIS, n_mo_pi)
sym=0
Corg=C_SO.nph[sym]
Creo=C_SOr.nph[sym]
Cc4=C4_cs[sym]
naos=n_mo_pi[sym]
mo=6
print(' Psi4 reordered Cfour')
for k in range(naos):
print(f'{k:3d} {Corg[k,mo]:10.6f} {Creo[k,mo]:10.6f} {Cc4[k,mo]:10.6f}')
print(np.max(Creo[:,mo]-Cc4[:,mo]))
#
# comparison Psi4-MOs and Cfour-MOs in their SO representation
#
for i in range(wf.nirrep()):
print(np.max(abs(C_SOr.nph[i])-abs(C4_cs[i])))
write_oldmos('PSIMOS', Ca_C4, Cbs=Cb_C4)
###Output
_____no_output_____ |
train_rnet/train_RNet.ipynb | ###Markdown
训练模型
###Code
def train_rnet(imdb=None):
if imdb == None:
imagedb = ImageDB(annotation_file)
imdb = imagedb.load_imdb()
#print(imdb.num_images)
imdb = imagedb.append_flipped_images(imdb)
for run in RunBuilder.get_runs(params):
use_cuda=use_cuda= True if run.device == 'cuda' else False
#create model path
if not os.path.exists(model_store_path):
os.makedirs(model_store_path)
lossfn = LossFn()
network = RNet(is_train=True, use_cuda=use_cuda)
if use_cuda:
network.cuda()
optimizer = torch.optim.Adam(network.parameters(), lr=run.lr)
train_data=TrainImageReader(imdb,24,run.batch_size,shuffle=True)
comment = f'-{run}'
for epoch in range(end_epoch):
train_data.reset() # shuffle
epoch_acc = 0.0
for batch_idx,(image,(gt_label,gt_bbox,gt_landmark))in enumerate(train_data):
im_tensor = [ image_tools.convert_image_to_tensor(image[i,:,:,:]) for i in range(image.shape[0]) ]
im_tensor = torch.stack(im_tensor)
im_tensor = Variable(im_tensor)
gt_label = Variable(torch.from_numpy(gt_label).float())
gt_bbox = Variable(torch.from_numpy(gt_bbox).float())
#gt_landmark = Variable(torch.from_numpy(gt_landmark).float())
cls_pred, box_offset_pred = network(im_tensor)
cls_loss = lossfn.cls_loss(gt_label,cls_pred)
box_offset_loss = lossfn.box_loss(gt_label,gt_bbox,box_offset_pred)
all_loss = cls_loss*1.0+box_offset_loss*0.5
cls_pred, box_offset_pred = network(im_tensor)
if batch_idx%frequent==0:
accuracy=compute_accuracy(cls_pred,gt_label)
accuracy=compute_accuracy(cls_pred,gt_label)
show1 = accuracy.data.cpu().numpy()
show2 = cls_loss.data.cpu().numpy()
show3 = box_offset_loss.data.cpu().numpy()
# show4 = landmark_loss.data.cpu().numpy()
show5 = all_loss.data.cpu().numpy()
print("%s : Epoch: %d, Step: %d, accuracy: %s, det loss: %s, bbox loss: %s, all_loss: %s, lr:%s "%
(datetime.datetime.now(),epoch,batch_idx, show1,show2,show3,show5,run.lr))
epoch_acc = show1
#计算偏差矩阵
optimizer.zero_grad()
all_loss.backward()
optimizer.step()
pass
pass
print('save modle acc:', epoch_acc)
torch.save(network.state_dict(), os.path.join(model_store_path,"rnet_epoch_%d.pt" % epoch))
torch.save(network, os.path.join(model_store_path,"rnet_epoch_model_%d.pkl" % epoch))
pass
pass
pass
pass
if __name__ == '__main__':
print('train Rnet Process:...')
#加载图片文件
#imagedb = ImageDB(annotation_file,'./image/train')
#gt_imdb = imagedb.load_imdb()
#gt_imdb = imagedb.append_flipped_images(gt_imdb)
train_net()
print('finish....')
#print(gt_imdb[2])
###Output
_____no_output_____ |
_pages/projects/RecipeScraper/LS_AnnaExtension.ipynb | ###Markdown
V1: Take 5 recipes from the same blog. Aggregate ingredients (but not combine)
###Code
# df = pd.read_csv('minimalistbaker_links.csv')
# df = pd.read_csv('skinnytaste_links.csv')
# df = pd.read_csv('halfbakedharvest_links.csv')
###Output
_____no_output_____
###Markdown
Unfortunately, the below doesn't work for HBH, so here is a workaround
###Code
# ===============================================
# HBH WORKAROUND
# Also doesn't work for pinch of yum
# NOPE url = "https://pinchofyum.com/30-minute-vegetarian-meatballs"
# ===============================================
import json
url = "https://www.halfbakedharvest.com/one-pan-four-cheese-sun-dried-tomato-and-spinach-drunken-pasta-bake/"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
searched_word = 'wprmpuc_recipe_'
results = soup.body.find_all(string=re.compile('.*{0}.*'.format(searched_word)), recursive=True)
print('Found the word "{0}" {1} times'.format(searched_word, len(results)))
clean_result = results[0].split('=')[1].split(';')[0].strip()
info_dict = json.loads(clean_result)
# info_dict
# url = "https://www.gimmesomeoven.com/poblano-white-chicken-chili/"
# url = "https://www.skinnytaste.com/lentil-soup-with-butternut-and-kale/"
# url = "https://minimalistbaker.com/orange-cranberry-crisp-gluten-free-easy/"
# url = "https://www.twopeasandtheirpod.com/magic-cookie-bars/"
# url = "https://thedefineddish.com/miso-roasted-chicken/"
# url = "https://www.ambitiouskitchen.com/coconut-curried-brown-rice/"
# url = "https://whatsgabycooking.com/chicken-larb-bowls/"
url = "https://paleomg.com/paleo-blueberry-chai-muffins/"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
searched_word = 'Print'
results = soup.body.find_all(string=re.compile('.*{0}.*'.format(searched_word)), recursive=True)
print('Found the word "{0}" {1} times'.format(searched_word, len(results)))
results[0].parent['href']
###Output
Found the word "Print" 3 times
|
examples/Piecewise Exponential Models and Creating Custom Models.ipynb | ###Markdown
Piecewise Exponential models and creating custom modelsThis section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-dereasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is:$$ S(t) = \exp\left(-H(t)\right)$$Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: $$h(t) = \frac{dH(t)}{dt}$$which has the immediate relation to the survival function:$$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute that integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about vs if we define the hazard and ask questions about the other two. At first, it may be easier to think about the hazard, and that's fine, but so long as we are clever enough to also determine the cumulative hazard, then we can ride the computational train. This will be clear by the end of the tutorial. First, let's revisit some simplier parametric models. The Exponential modelRecall that the Exponential model has a constant hazard, that is:$$ h(t) = \frac{1}{\lambda} $$which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from lifelines.datasets import load_waltons
waltons = load_waltons()
T, E = waltons['T'], waltons['E']
from lifelines import ExponentialFitter
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
epf = ExponentialFitter().fit(T, E)
epf.plot_hazard(ax=ax[0])
epf.plot_cumulative_hazard(ax=ax[1])
ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard")
epf.print_summary(3)
###Output
<lifelines.ExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -771.913
hypothesis = lambda_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 51.840 12.490 27.360 76.320 <0.0005 14.379
###Markdown
This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the lack of fit is very obvious.
###Code
from lifelines import NelsonAalenFitter
ax = epf.plot(figsize=(8,5))
naf = NelsonAalenFitter().fit(T, E)
ax = naf.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. Piecewise Exponential modelsWhat if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as:$$ h(t) = \begin{cases} \lambda_0, & \text{if $t \le \tau_0$} \\ \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ ... \end{cases}$$This model should be flexible enough to fit better to our dataset. The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$.
###Code
from lifelines import PiecewiseExponentialFitter
# looking at the above plot, I think there may be breaks at t=40 and t=60.
pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E)
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax = pf.plot(ax=axs[1])
pf.plot_hazard(ax=axs[0])
ax = naf.plot(ax=ax, ci_show=False)
axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard")
pf.print_summary(3)
###Output
<lifelines.PiecewiseExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -647.118
hypothesis = lambda_0_ != 1, lambda_1_ != 1, lambda_2_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_0_ 162.989 27.144 109.787 216.191 <0.0005 28.630
lambda_1_ 31.366 4.043 23.442 39.290 <0.0005 43.957
lambda_2_ 4.686 0.624 3.462 5.910 <0.0005 28.055
###Markdown
We can see a much better fit in this model. A quantitative measure of better fit is to compare the log-likelihood mthe exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. Univarite models in _lifelines_I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented:```pythonclass ExponentialFitter(ParametericUnivariateFitter): _fitted_parameter_names = ["lambda_"] def _cumulative_hazard(self, params, times): lambda_ = params[0] return lambda_ * times```We only need to specify the cumulative hazard function because of the 1-1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. Defining our own survival modelsTo show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form.$$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. See `import` below.
###Code
from lifelines.fitters import ParametericUnivariateFitter
import autograd.numpy as np
class InverseTimeHazardFitter(ParametericUnivariateFitter):
# we tell the model what we want the names of the unknown parameters to be
_fitted_parameter_names = ['alpha_']
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: avector of times that will be passed in.
def _cumulative_hazard(self, params, times):
alpha = params[0]
return alpha /(80 - times)
itf = InverseTimeHazardFitter()
itf.fit(T, E)
itf.print_summary()
ax = itf.plot(figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
plt.legend()
###Output
<lifelines.InverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -697.840
hypothesis = alpha_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 21.51 1.72 18.13 24.88 <0.005 106.22
###Markdown
The best fit of the model to the data is:$$H_1(t) = \frac{21.51}{80-t}$$Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter:$$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems.
###Code
class TwoParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_']
# Sequence of (min, max) pairs for each element in x. None is used to specify no bound
_bounds = [(0, None), (75.0001, None)]
def _cumulative_hazard(self, params, times):
a, b = params
return a / (b - times)
two_f = TwoParamInverseTimeHazardFitter()
two_f.fit(T, E)
two_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
two_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.TwoParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -685.572
hypothesis = alpha_ != 1, beta_ != 76
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 16.50 1.51 13.55 19.46 <0.005 79.98
beta_ 76.55 0.38 75.80 77.30 0.15 2.73
###Markdown
From the output, we see that the value of 76.55 is the suggested asymptote, that is:$$H_2(t) = \frac{16.50} {76.55 - t}$$The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$
###Code
from lifelines.fitters import ParametericUnivariateFitter
class ThreeParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', 'gamma_']
_bounds = [(0, None), (75.0001, None), (0, None)]
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a numpy vector of times that will be passed in by the optimizer
def _cumulative_hazard(self, params, times):
a, b, c = params
return a / (b - times) ** c
three_f = ThreeParamInverseTimeHazardFitter()
three_f.fit(T, E)
three_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
ax = two_f.plot(ax=ax, ci_show=False)
ax = three_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.ThreeParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -649.378
hypothesis = alpha_ != 1, beta_ != 76, gamma_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 1588776.28 3775137.44 -5810357.13 8987909.70 0.67 0.57
beta_ 100.88 5.88 89.35 112.41 <0.005 15.38
gamma_ 3.83 0.50 2.85 4.81 <0.005 25.82
###Markdown
Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. Why fit parametric models anyways?Taking a step back, we are fitting parameteric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine take a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistical efficient._ 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity.
###Code
fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True)
new_timeline = np.arange(0, 85)
three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline)
three_f.plot_hazard(label='hazard', ax=axs[0]).legend()
three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend()
three_f.plot_survival_function(label='survival function', ax=axs[2]).legend()
fig.subplots_adjust(hspace=0)
# Hide x labels and tick labels for all but bottom plot.
for ax in axs:
ax.label_outer()
###Output
_____no_output_____
###Markdown
Piecewise exponential models and creating custom modelsThis section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-decreasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is:$$ S(t) = \exp\left(-H(t)\right)$$Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: $$h(t) = \frac{dH(t)}{dt}$$which has the immediate relation to the survival function:$$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute than integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about versus if we define the hazard and ask questions about the other two.First, let's revisit some simpler parametric models. The Exponential modelRecall that the Exponential model has a constant hazard, that is:$$ h(t) = \frac{1}{\lambda} $$which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from lifelines.datasets import load_waltons
waltons = load_waltons()
T, E = waltons['T'], waltons['E']
from lifelines import ExponentialFitter
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
epf = ExponentialFitter().fit(T, E)
epf.plot_hazard(ax=ax[0])
epf.plot_cumulative_hazard(ax=ax[1])
ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard")
epf.print_summary(3)
###Output
<lifelines.ExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = 418612.094
hypothesis = lambda_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 51.840 12.490 27.360 76.320 <0.0005 14.379
###Markdown
This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the Exponential's lack of fit is very obvious.
###Code
from lifelines import NelsonAalenFitter
ax = epf.plot(figsize=(8,5))
naf = NelsonAalenFitter().fit(T, E)
ax = naf.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. Piecewise Exponential modelsWhat if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as:$$ h(t) = \begin{cases} \lambda_0, & \text{if $t \le \tau_0$} \\ \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ ... \end{cases}$$This model should be flexible enough to fit better to our dataset. The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$.
###Code
from lifelines import PiecewiseExponentialFitter
# looking at the above plot, I think there may be breaks at t=40 and t=60.
pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E)
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax = pf.plot(ax=axs[1])
pf.plot_hazard(ax=axs[0])
ax = naf.plot(ax=ax, ci_show=False)
axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard")
pf.print_summary(3)
###Output
<lifelines.PiecewiseExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -647.118
hypothesis = lambda_0_ != 1, lambda_1_ != 1, lambda_2_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_0_ 162.989 27.144 109.787 216.191 <0.0005 28.630
lambda_1_ 31.366 4.043 23.442 39.290 <0.0005 43.957
lambda_2_ 4.686 0.624 3.462 5.910 <0.0005 28.055
###Markdown
We can see a much better fit in this model. A quantitative measure of fit is to compare the log-likelihood between exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. Univarite models in _lifelines_I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented:```pythonclass ExponentialFitter(ParametricUnivariateFitter): _fitted_parameter_names = ["lambda_"] def _cumulative_hazard(self, params, times): lambda_ = params[0] return times / lambda_```We only need to specify the cumulative hazard function because of the 1:1:1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. Defining our own survival modelsTo show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form.$$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. Note the `import autograd.numpy as np` below.
###Code
from lifelines.fitters import ParametricUnivariateFitter
import autograd.numpy as np
class InverseTimeHazardFitter(ParametricUnivariateFitter):
# we tell the model what we want the names of the unknown parameters to be
_fitted_parameter_names = ['alpha_']
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a vector of times that will be passed in.
def _cumulative_hazard(self, params, times):
alpha = params[0]
return alpha /(80 - times)
itf = InverseTimeHazardFitter()
itf.fit(T, E)
itf.print_summary()
ax = itf.plot(figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
plt.legend()
###Output
<lifelines.InverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -697.840
hypothesis = alpha_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 21.51 1.72 18.13 24.88 <0.005 106.22
###Markdown
The best fit of the model to the data is:$$H_1(t) = \frac{21.51}{80-t}$$Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter:$$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems.
###Code
class TwoParamInverseTimeHazardFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_']
# Sequence of (min, max) pairs for each element in x. None is used to specify no bound
_bounds = [(0, None), (75.0001, None)]
def _cumulative_hazard(self, params, times):
alpha, beta = params
return alpha / (beta - times)
two_f = TwoParamInverseTimeHazardFitter()
two_f.fit(T, E)
two_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
two_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.TwoParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -685.572
hypothesis = alpha_ != 1, beta_ != 76
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 16.50 1.51 13.55 19.46 <0.005 79.98
beta_ 76.55 0.38 75.80 77.30 0.15 2.73
###Markdown
From the output, we see that the value of 76.55 is the suggested asymptote, that is:$$H_2(t) = \frac{16.50} {76.55 - t}$$The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$
###Code
from lifelines.fitters import ParametricUnivariateFitter
class ThreeParamInverseTimeHazardFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', 'gamma_']
_bounds = [(0, None), (75.0001, None), (0, None)]
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a numpy vector of times that will be passed in by the optimizer
def _cumulative_hazard(self, params, times):
a, b, c = params
return a / (b - times) ** c
three_f = ThreeParamInverseTimeHazardFitter()
three_f.fit(T, E)
three_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
ax = two_f.plot(ax=ax, ci_show=False)
ax = three_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.ThreeParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -649.378
hypothesis = alpha_ != 1, beta_ != 76, gamma_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 1588776.28 3775137.44 -5810357.13 8987909.70 0.67 0.57
beta_ 100.88 5.88 89.35 112.41 <0.005 15.38
gamma_ 3.83 0.50 2.85 4.81 <0.005 25.82
###Markdown
Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. Why fit parametric models anyways? Taking a step back, we are fitting parametric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine taking a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistically efficient._ 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity.
###Code
fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True)
new_timeline = np.arange(0, 85)
three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline)
three_f.plot_hazard(label='hazard', ax=axs[0]).legend()
three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend()
three_f.plot_survival_function(label='survival function', ax=axs[2]).legend()
fig.subplots_adjust(hspace=0)
# Hide x labels and tick labels for all but bottom plot.
for ax in axs:
ax.label_outer()
###Output
_____no_output_____
###Markdown
3-parameter Weibull distributionWe can easily extend the built-in Weibull model (`lifelines.WeibullFitter`) to include a new _location_ parameter:$$ H(t) = \left(\frac{t - \theta}{\lambda}\right)^\rho $$(When $\theta = 0$, this is just the 2-parameter case again). In *lifelines* custom models, this looks like:
###Code
import autograd.numpy as np
from autograd.scipy.stats import norm
# I'm shifting this to exaggerate the effect
T = T + 10
class ThreeParameterWeibullFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ["lambda_", "rho_", "theta_"]
_bounds = [(0, None), (0, None), (0, T.min()-0.001)]
def _cumulative_hazard(self, params, times):
lambda_, rho_, theta_ = params
return ((times - theta_) / lambda_) ** rho_
tpw = ThreeParameterWeibullFitter()
tpw.fit(T, E)
tpw.print_summary()
ax = tpw.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
<lifelines.ThreeParameterWeibullFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -666.715
hypothesis = lambda_ != 1, rho_ != 1, theta_ != 7
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 63.92 5.38 53.38 74.47 <0.005 102.58
rho_ 4.20 0.56 3.11 5.29 <0.005 26.67
theta_ 2.55 5.05 -7.35 12.45 0.28 1.83
###Markdown
Inverse Gaussian distributionThe inverse Gaussian distribution is another popular model for survival analysis. Unlike other model, it's hazard does not asympotically converge to 0, allowing for a long tail of survival. Let's model this, using the same parameterization from [Wikipedia](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)
###Code
from autograd.scipy.stats import norm
class InverseGaussianFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['lambda_', 'mu_']
def _cumulative_density(self, params, times):
mu_, lambda_ = params
v = norm.cdf(np.sqrt(lambda_ / times) * (times / mu_ - 1), loc=0, scale=1) + \
np.exp(2 * lambda_ / mu_) * norm.cdf(-np.sqrt(lambda_ / times) * (times / mu_ + 1), loc=0, scale=1)
return v
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
from lifelines.datasets import load_rossi
rossi = load_rossi()
igf = InverseGaussianFitter()
igf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 500))
igf.print_summary()
igf.plot_hazard()
###Output
<lifelines.InverseGaussianFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -729.797
hypothesis = lambda_ != 1, mu_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 7441.43 9296.67 -10779.69 25662.56 0.42 1.24
mu_ 47.86 3.31 41.38 54.35 <0.005 148.83
###Markdown
Bounded lifetimes using the beta distributionMaybe your data is bounded between 0 and some (unknown) upperbound M? That is, lifetimes can't be more than M. Maybe you know M, maybe you don't.
###Code
n = 100
T = 5 * np.random.random(n)**2
T_censor = 10 * np.random.random(n)**2
E = T < T_censor
T_obs = np.minimum(T, T_censor)
from autograd_gamma import betainc
class BetaFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', "m_"]
_bounds = [(0, None), (0, None), (T.max(), None)]
def _cumulative_density(self, params, times):
alpha_, beta_, m_ = params
return betainc(alpha_, beta_, times / m_)
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
beta_fitter = BetaFitter().fit(T_obs, E)
beta_fitter.plot()
beta_fitter.print_summary()
###Output
<lifelines.BetaFitter: fitted with 100 observations, 32 censored>
number of subjects = 100
number of events = 68
log-likelihood = -93.912
hypothesis = alpha_ != 1, beta_ != 1, m_ != 5
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 0.52 0.07 0.39 0.65 <0.005 42.21
beta_ 0.92 0.08 0.76 1.09 0.36 1.48
m_ 4.86 nan nan nan nan nan
###Markdown
Gompertz
###Code
class GompertzFitter(ParametricUnivariateFitter):
# this parameterization is slightly different than wikipedia.
_fitted_parameter_names = ['nu_', 'b_']
def _cumulative_hazard(self, params, times):
nu_, b_ = params
return nu_ * (np.expm1(times / b_))
ggf = GompertzFitter()
ggf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 120))
ggf.print_summary()
ggf.plot_survival_function()
###Output
<lifelines.GompertzFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -697.815
hypothesis = nu_ != 1, b_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
nu_ 0.20 0.11 -0.01 0.40 <0.005 45.19
b_ 55.19 19.26 17.45 92.93 <0.005 7.68
###Markdown
Piecewise exponential models and creating custom modelsThis section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-decreasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is:$$ S(t) = \exp\left(-H(t)\right)$$Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: $$h(t) = \frac{dH(t)}{dt}$$which has the immediate relation to the survival function:$$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute than integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about versus if we define the hazard and ask questions about the other two.First, let's revisit some simpler parametric models. The Exponential modelRecall that the Exponential model has a constant hazard, that is:$$ h(t) = \frac{1}{\lambda} $$which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from lifelines.datasets import load_waltons
waltons = load_waltons()
T, E = waltons['T'], waltons['E']
from lifelines import ExponentialFitter
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
epf = ExponentialFitter().fit(T, E)
epf.plot_hazard(ax=ax[0])
epf.plot_cumulative_hazard(ax=ax[1])
ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard")
epf.print_summary(3)
###Output
<lifelines.ExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = 418612.094
hypothesis = lambda_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 51.840 12.490 27.360 76.320 <0.0005 14.379
###Markdown
This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the Exponential's lack of fit is very obvious.
###Code
from lifelines import NelsonAalenFitter
ax = epf.plot(figsize=(8,5))
naf = NelsonAalenFitter().fit(T, E)
ax = naf.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. Piecewise Exponential modelsWhat if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as:$$ h(t) = \begin{cases} \lambda_0, & \text{if $t \le \tau_0$} \\ \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ ... \end{cases}$$This model should be flexible enough to fit better to our dataset. The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$.
###Code
from lifelines import PiecewiseExponentialFitter
# looking at the above plot, I think there may be breaks at t=40 and t=60.
pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E)
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax = pf.plot(ax=axs[1])
pf.plot_hazard(ax=axs[0])
ax = naf.plot(ax=ax, ci_show=False)
axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard")
pf.print_summary(3)
###Output
<lifelines.PiecewiseExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -647.118
hypothesis = lambda_0_ != 1, lambda_1_ != 1, lambda_2_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_0_ 162.989 27.144 109.787 216.191 <0.0005 28.630
lambda_1_ 31.366 4.043 23.442 39.290 <0.0005 43.957
lambda_2_ 4.686 0.624 3.462 5.910 <0.0005 28.055
###Markdown
We can see a much better fit in this model. A quantitative measure of fit is to compare the log-likelihood between exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. Univarite models in _lifelines_I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented:```pythonclass ExponentialFitter(ParametericUnivariateFitter): _fitted_parameter_names = ["lambda_"] def _cumulative_hazard(self, params, times): lambda_ = params[0] return times / lambda_```We only need to specify the cumulative hazard function because of the 1:1:1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. Defining our own survival modelsTo show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form.$$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. Note the `import autograd.numpy as np` below.
###Code
from lifelines.fitters import ParametericUnivariateFitter
import autograd.numpy as np
class InverseTimeHazardFitter(ParametericUnivariateFitter):
# we tell the model what we want the names of the unknown parameters to be
_fitted_parameter_names = ['alpha_']
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a vector of times that will be passed in.
def _cumulative_hazard(self, params, times):
alpha = params[0]
return alpha /(80 - times)
itf = InverseTimeHazardFitter()
itf.fit(T, E)
itf.print_summary()
ax = itf.plot(figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
plt.legend()
###Output
<lifelines.InverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -697.840
hypothesis = alpha_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 21.51 1.72 18.13 24.88 <0.005 106.22
###Markdown
The best fit of the model to the data is:$$H_1(t) = \frac{21.51}{80-t}$$Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter:$$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems.
###Code
class TwoParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_']
# Sequence of (min, max) pairs for each element in x. None is used to specify no bound
_bounds = [(0, None), (75.0001, None)]
def _cumulative_hazard(self, params, times):
alpha, beta = params
return alpha / (beta - times)
two_f = TwoParamInverseTimeHazardFitter()
two_f.fit(T, E)
two_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
two_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.TwoParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -685.572
hypothesis = alpha_ != 1, beta_ != 76
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 16.50 1.51 13.55 19.46 <0.005 79.98
beta_ 76.55 0.38 75.80 77.30 0.15 2.73
###Markdown
From the output, we see that the value of 76.55 is the suggested asymptote, that is:$$H_2(t) = \frac{16.50} {76.55 - t}$$The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$
###Code
from lifelines.fitters import ParametericUnivariateFitter
class ThreeParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', 'gamma_']
_bounds = [(0, None), (75.0001, None), (0, None)]
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a numpy vector of times that will be passed in by the optimizer
def _cumulative_hazard(self, params, times):
a, b, c = params
return a / (b - times) ** c
three_f = ThreeParamInverseTimeHazardFitter()
three_f.fit(T, E)
three_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
ax = two_f.plot(ax=ax, ci_show=False)
ax = three_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.ThreeParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -649.378
hypothesis = alpha_ != 1, beta_ != 76, gamma_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 1588776.28 3775137.44 -5810357.13 8987909.70 0.67 0.57
beta_ 100.88 5.88 89.35 112.41 <0.005 15.38
gamma_ 3.83 0.50 2.85 4.81 <0.005 25.82
###Markdown
Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. Why fit parametric models anyways? Taking a step back, we are fitting parameteric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine take a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistical efficient._ 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity.
###Code
fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True)
new_timeline = np.arange(0, 85)
three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline)
three_f.plot_hazard(label='hazard', ax=axs[0]).legend()
three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend()
three_f.plot_survival_function(label='survival function', ax=axs[2]).legend()
fig.subplots_adjust(hspace=0)
# Hide x labels and tick labels for all but bottom plot.
for ax in axs:
ax.label_outer()
###Output
_____no_output_____
###Markdown
3-parameter Weibull distributionWe can easily extend the built-in Weibull model (`lifelines.WeibullFitter`) to include a new _location_ parameter:$$ H(t) = \left(\frac{t - \theta}{\lambda}\right)^\rho $$(When $\theta = 0$, this is just the 2-parameter case again). In *lifelines* custom models, this looks like:
###Code
import autograd.numpy as np
from autograd.scipy.stats import norm
# I'm shifting this to exaggerate the effect
T = T + 10
class ThreeParameterWeibullFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ["lambda_", "rho_", "theta_"]
_bounds = [(0, None), (0, None), (0, T.min()-0.001)]
def _cumulative_hazard(self, params, times):
lambda_, rho_, theta_ = params
return ((times - theta_) / lambda_) ** rho_
tpw = ThreeParameterWeibullFitter()
tpw.fit(T, E)
tpw.print_summary()
ax = tpw.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
<lifelines.ThreeParameterWeibullFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -666.715
hypothesis = lambda_ != 1, rho_ != 1, theta_ != 7
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 63.92 5.38 53.38 74.47 <0.005 102.58
rho_ 4.20 0.56 3.11 5.29 <0.005 26.67
theta_ 2.55 5.05 -7.35 12.45 0.28 1.83
###Markdown
Inverse Gaussian distributionThe inverse Gaussian distribution is another popular model for survival analysis. Unlike other model, it's hazard does not asympotically converge to 0, allowing for a long tail of survival. Let's model this, using the same parameterization from [Wikipedia](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)
###Code
from autograd.scipy.stats import norm
class InverseGaussianFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['lambda_', 'mu_']
def _cumulative_density(self, params, times):
mu_, lambda_ = params
v = norm.cdf(np.sqrt(lambda_ / times) * (times / mu_ - 1), loc=0, scale=1) + \
np.exp(2 * lambda_ / mu_) * norm.cdf(-np.sqrt(lambda_ / times) * (times / mu_ + 1), loc=0, scale=1)
return v
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
from lifelines.datasets import load_rossi
rossi = load_rossi()
igf = InverseGaussianFitter()
igf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 500))
igf.print_summary()
igf.plot_hazard()
###Output
<lifelines.InverseGaussianFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -729.797
hypothesis = lambda_ != 1, mu_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 7441.43 9296.67 -10779.69 25662.56 0.42 1.24
mu_ 47.86 3.31 41.38 54.35 <0.005 148.83
###Markdown
Bounded lifetimes using the beta distributionMaybe your data is bounded between 0 and some (unknown) upperbound M? That is, lifetimes can't be more than M. Maybe you know M, maybe you don't.
###Code
n = 100
T = 5 * np.random.random(n)**2
T_censor = 10 * np.random.random(n)**2
E = T < T_censor
T_obs = np.minimum(T, T_censor)
from autograd_gamma import betainc
class BetaFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', "m_"]
_bounds = [(0, None), (0, None), (T.max(), None)]
def _cumulative_density(self, params, times):
alpha_, beta_, m_ = params
return betainc(alpha_, beta_, times / m_)
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
beta_fitter = BetaFitter().fit(T_obs, E)
beta_fitter.plot()
beta_fitter.print_summary()
###Output
<lifelines.BetaFitter: fitted with 100 observations, 32 censored>
number of subjects = 100
number of events = 68
log-likelihood = -93.912
hypothesis = alpha_ != 1, beta_ != 1, m_ != 5
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 0.52 0.07 0.39 0.65 <0.005 42.21
beta_ 0.92 0.08 0.76 1.09 0.36 1.48
m_ 4.86 nan nan nan nan nan
###Markdown
Gompertz
###Code
class GompertzFitter(ParametericUnivariateFitter):
# this parameterization is slightly different than wikipedia.
_fitted_parameter_names = ['nu_', 'b_']
def _cumulative_hazard(self, params, times):
nu_, b_ = params
return nu_ * (np.expm1(times / b_))
ggf = GompertzFitter()
ggf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 120))
ggf.print_summary()
ggf.plot_survival_function()
###Output
<lifelines.GompertzFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -697.815
hypothesis = nu_ != 1, b_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
nu_ 0.20 0.11 -0.01 0.40 <0.005 45.19
b_ 55.19 19.26 17.45 92.93 <0.005 7.68
###Markdown
Piecewise exponential models and creating custom modelsThis section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-decreasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is:$$ S(t) = \exp\left(-H(t)\right)$$Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: $$h(t) = \frac{dH(t)}{dt}$$which has the immediate relation to the survival function:$$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute than integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about versus if we define the hazard and ask questions about the other two.First, let's revisit some simpler parametric models. The Exponential modelRecall that the Exponential model has a constant hazard, that is:$$ h(t) = \frac{1}{\lambda} $$which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from lifelines.datasets import load_waltons
waltons = load_waltons()
T, E = waltons['T'], waltons['E']
from lifelines import ExponentialFitter
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
epf = ExponentialFitter().fit(T, E)
epf.plot_hazard(ax=ax[0])
epf.plot_cumulative_hazard(ax=ax[1])
ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard")
epf.print_summary(3)
###Output
_____no_output_____
###Markdown
This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the Exponential's lack of fit is very obvious.
###Code
from lifelines import NelsonAalenFitter
ax = epf.plot(figsize=(8,5))
naf = NelsonAalenFitter().fit(T, E)
ax = naf.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. Piecewise Exponential modelsWhat if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as:$$ h(t) = \begin{cases} \lambda_0, & \text{if $t \le \tau_0$} \\ \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ ... \end{cases}$$This model should be flexible enough to fit better to our dataset. The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$.
###Code
from lifelines import PiecewiseExponentialFitter
# looking at the above plot, I think there may be breaks at t=40 and t=60.
pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E)
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax = pf.plot(ax=axs[1])
pf.plot_hazard(ax=axs[0])
ax = naf.plot(ax=ax, ci_show=False)
axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard")
pf.print_summary(3)
###Output
_____no_output_____
###Markdown
We can see a much better fit in this model. A quantitative measure of fit is to compare the log-likelihood between exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. Univarite models in _lifelines_I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented:```pythonclass ExponentialFitter(ParametricUnivariateFitter): _fitted_parameter_names = ["lambda_"] def _cumulative_hazard(self, params, times): lambda_ = params[0] return times / lambda_```We only need to specify the cumulative hazard function because of the 1:1:1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. Defining our own survival modelsTo show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form.$$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. Note the `import autograd.numpy as np` below.
###Code
from lifelines.fitters import ParametricUnivariateFitter
import autograd.numpy as np
class InverseTimeHazardFitter(ParametricUnivariateFitter):
# we tell the model what we want the names of the unknown parameters to be
_fitted_parameter_names = ['alpha_']
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a vector of times that will be passed in.
def _cumulative_hazard(self, params, times):
alpha = params[0]
return alpha /(80 - times)
itf = InverseTimeHazardFitter()
itf.fit(T, E)
itf.print_summary()
ax = itf.plot(figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
plt.legend()
###Output
_____no_output_____
###Markdown
The best fit of the model to the data is:$$H_1(t) = \frac{21.51}{80-t}$$Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter:$$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems.
###Code
class TwoParamInverseTimeHazardFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_']
# Sequence of (min, max) pairs for each element in x. None is used to specify no bound
_bounds = [(0, None), (75.0001, None)]
def _cumulative_hazard(self, params, times):
alpha, beta = params
return alpha / (beta - times)
two_f = TwoParamInverseTimeHazardFitter()
two_f.fit(T, E)
two_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
two_f.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
From the output, we see that the value of 76.55 is the suggested asymptote, that is:$$H_2(t) = \frac{16.50} {76.55 - t}$$The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$
###Code
from lifelines.fitters import ParametricUnivariateFitter
class ThreeParamInverseTimeHazardFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', 'gamma_']
_bounds = [(0, None), (75.0001, None), (0, None)]
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a numpy vector of times that will be passed in by the optimizer
def _cumulative_hazard(self, params, times):
a, b, c = params
return a / (b - times) ** c
three_f = ThreeParamInverseTimeHazardFitter()
three_f.fit(T, E)
three_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
ax = two_f.plot(ax=ax, ci_show=False)
ax = three_f.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. Why fit parametric models anyways? Taking a step back, we are fitting parametric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine taking a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistically efficient._ 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity.
###Code
fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True)
new_timeline = np.arange(0, 85)
three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline)
three_f.plot_hazard(label='hazard', ax=axs[0]).legend()
three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend()
three_f.plot_survival_function(label='survival function', ax=axs[2]).legend()
fig.subplots_adjust(hspace=0)
# Hide x labels and tick labels for all but bottom plot.
for ax in axs:
ax.label_outer()
###Output
_____no_output_____
###Markdown
3-parameter Weibull distributionWe can easily extend the built-in Weibull model (`lifelines.WeibullFitter`) to include a new _location_ parameter:$$ H(t) = \left(\frac{t - \theta}{\lambda}\right)^\rho $$(When $\theta = 0$, this is just the 2-parameter case again). In *lifelines* custom models, this looks like:
###Code
import autograd.numpy as np
from autograd.scipy.stats import norm
# I'm shifting this to exaggerate the effect
T_ = T + 10
class ThreeParameterWeibullFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ["lambda_", "rho_", "theta_"]
_bounds = [(0, None), (0, None), (0, T.min()-0.001)]
def _cumulative_hazard(self, params, times):
lambda_, rho_, theta_ = params
return ((times - theta_) / lambda_) ** rho_
tpw = ThreeParameterWeibullFitter()
tpw.fit(T_, E)
tpw.print_summary()
ax = tpw.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T_, E).plot(ax=ax, ci_show=False)
###Output
_____no_output_____
###Markdown
Inverse Gaussian distributionThe inverse Gaussian distribution is another popular model for survival analysis. Unlike other model, it's hazard does not asympotically converge to 0, allowing for a long tail of survival. Let's model this, using the same parameterization from [Wikipedia](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)
###Code
from autograd.scipy.stats import norm
class InverseGaussianFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['lambda_', 'mu_']
def _cumulative_density(self, params, times):
mu_, lambda_ = params
v = norm.cdf(np.sqrt(lambda_ / times) * (times / mu_ - 1), loc=0, scale=1) + \
np.exp(2 * lambda_ / mu_) * norm.cdf(-np.sqrt(lambda_ / times) * (times / mu_ + 1), loc=0, scale=1)
return v
def _cumulative_hazard(self, params, times):
return -np.log(1-np.clip(self._cumulative_density(params, times), 1e-15, 1-1e-15))
igf = InverseGaussianFitter()
igf.fit(T, E)
igf.print_summary()
ax = igf.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
_____no_output_____
###Markdown
Gompertz
###Code
class GompertzFitter(ParametricUnivariateFitter):
# this parameterization is slightly different than wikipedia.
_fitted_parameter_names = ['nu_', 'b_']
def _cumulative_hazard(self, params, times):
nu_, b_ = params
return nu_ * (np.expm1(times * b_))
T, E = waltons['T'], waltons['E']
ggf = GompertzFitter()
ggf.fit(T, E)
ggf.print_summary()
ax = ggf.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
_____no_output_____
###Markdown
APGWFrom the paper, "A Flexible Parametric Modelling Framework for Survival Analysis", https://arxiv.org/pdf/1901.03212.pdf
###Code
class APGWFitter(ParametricUnivariateFitter):
# this parameterization is slightly different than wikipedia.
_fitted_parameter_names = ['kappa_', 'gamma_', 'phi_']
def _cumulative_hazard(self, params, t):
kappa_, phi_, gamma_ = params
return (kappa_ + 1) / kappa_ * ((1 + ((phi_ * t) ** gamma_) /(kappa_ + 1)) ** kappa_ -1)
apg = APGWFitter()
apg.fit(T, E)
apg.print_summary(2)
ax = apg.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
_____no_output_____
###Markdown
Bounded lifetimes using the beta distributionMaybe your data is bounded between 0 and some (unknown) upperbound M? That is, lifetimes can't be more than M. Maybe you know M, maybe you don't.
###Code
n = 100
T = 5 * np.random.random(n)**2
T_censor = 10 * np.random.random(n)**2
E = T < T_censor
T_obs = np.minimum(T, T_censor)
from autograd_gamma import betainc
class BetaFitter(ParametricUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', "m_"]
_bounds = [(0, None), (0, None), (T.max(), None)]
def _cumulative_density(self, params, times):
alpha_, beta_, m_ = params
return betainc(alpha_, beta_, times / m_)
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
beta_fitter = BetaFitter().fit(T_obs, E)
beta_fitter.plot()
beta_fitter.print_summary()
###Output
/Users/camerondavidson-pilon/code/lifelines/lifelines/fitters/__init__.py:936: StatisticalWarning: The diagonal of the variance_matrix_ has negative values. This could be a problem with BetaFitter's fit to the data.
It's advisable to not trust the variances reported, and to be suspicious of the
fitted parameters too. Perform plots of the cumulative hazard to help understand
the latter's bias.
To fix this, try specifying an `initial_point` kwarg in `fit`.
warnings.warn(warning_text, utils.StatisticalWarning)
/Users/camerondavidson-pilon/code/lifelines/lifelines/fitters/__init__.py:460: RuntimeWarning: invalid value encountered in sqrt
np.einsum("nj,jk,nk->n", gradient_at_times.T, self.variance_matrix_, gradient_at_times.T)
###Markdown
Piecewise Exponential models and creating custom modelsThis section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-dereasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is:$$ S(t) = \exp\left(-H(t)\right)$$Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: $$h(t) = \frac{dH(t)}{dt}$$which has the immediate relation to the survival function:$$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute that integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about vs if we define the hazard and ask questions about the other two. At first, it may be easier to think about the hazard, and that's fine, but so long as we are clever enough to also determine the cumulative hazard, then we can ride the computational train. This will be clear by the end of the tutorial. First, let's revisit some simplier parametric models. The Exponential modelRecall that the Exponential model has a constant hazard, that is:$$ h(t) = \frac{1}{\lambda} $$which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from lifelines.datasets import load_waltons
waltons = load_waltons()
T, E = waltons['T'], waltons['E']
from lifelines import ExponentialFitter
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
epf = ExponentialFitter().fit(T, E)
epf.plot_hazard(ax=ax[0])
epf.plot_cumulative_hazard(ax=ax[1])
ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard")
epf.print_summary(3)
###Output
<lifelines.ExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -771.913
hypothesis = lambda_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 51.831 4.149 43.699 59.963 <0.0005 112.218
###Markdown
This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the lack of fit is very obvious.
###Code
from lifelines import NelsonAalenFitter
ax = epf.plot(figsize=(8,5))
naf = NelsonAalenFitter().fit(T, E)
ax = naf.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. Piecewise Exponential modelsWhat if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as:$$ h(t) = \begin{cases} \lambda_0, & \text{if $t \le \tau_0$} \\ \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ ... \end{cases}$$This model should be flexible enough to fit better to our dataset. The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$.
###Code
from lifelines import PiecewiseExponentialFitter
# looking at the above plot, I think there may be breaks at t=40 and t=60.
pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E)
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax = pf.plot(ax=axs[1])
pf.plot_hazard(ax=axs[0])
ax = naf.plot(ax=ax, ci_show=False)
axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard")
pf.print_summary(3)
###Output
<lifelines.PiecewiseExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -647.118
hypothesis = lambda_0_ != 1, lambda_1_ != 1, lambda_2_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_0_ 162.989 27.144 109.787 216.191 <0.0005 28.630
lambda_1_ 31.366 4.043 23.442 39.290 <0.0005 43.957
lambda_2_ 4.686 0.624 3.462 5.910 <0.0005 28.055
###Markdown
We can see a much better fit in this model. A quantitative measure of better fit is to compare the log-likelihood mthe exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. Univarite models in _lifelines_I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented:```pythonclass ExponentialFitter(ParametericUnivariateFitter): _fitted_parameter_names = ["lambda_"] def _cumulative_hazard(self, params, times): lambda_ = params[0] return lambda_ * times```We only need to specify the cumulative hazard function because of the 1-1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. Defining our own survival modelsTo show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form.$$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. Note the `import autograd.numpy as np` below.
###Code
from lifelines.fitters import ParametericUnivariateFitter
import autograd.numpy as np
class InverseTimeHazardFitter(ParametericUnivariateFitter):
# we tell the model what we want the names of the unknown parameters to be
_fitted_parameter_names = ['alpha_']
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: avector of times that will be passed in.
def _cumulative_hazard(self, params, times):
alpha = params[0]
return alpha /(80 - times)
itf = InverseTimeHazardFitter()
itf.fit(T, E)
itf.print_summary()
ax = itf.plot(figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
plt.legend()
###Output
<lifelines.InverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -697.84
hypothesis = alpha_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 21.51 1.72 18.13 24.88 <0.005 106.22
###Markdown
The best fit of the model to the data is:$$H_1(t) = \frac{21.51}{80-t}$$Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter:$$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems.
###Code
class TwoParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_']
# Sequence of (min, max) pairs for each element in x. None is used to specify no bound
_bounds = [(0, None), (75.0001, None)]
def _cumulative_hazard(self, params, times):
alpha, beta = params
return alpha / (beta - times)
two_f = TwoParamInverseTimeHazardFitter()
two_f.fit(T, E)
two_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
two_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.TwoParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -685.57
hypothesis = alpha_ != 1, beta_ != 76.0001
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 16.50 1.51 13.55 19.46 <0.005 79.98
beta_ 76.55 0.38 75.80 77.30 0.15 2.73
###Markdown
From the output, we see that the value of 76.55 is the suggested asymptote, that is:$$H_2(t) = \frac{16.50} {76.55 - t}$$The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$
###Code
from lifelines.fitters import ParametericUnivariateFitter
class ThreeParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', 'gamma_']
_bounds = [(0, None), (75.0001, None), (0, None)]
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a numpy vector of times that will be passed in by the optimizer
def _cumulative_hazard(self, params, times):
a, b, c = params
return a / (b - times) ** c
three_f = ThreeParamInverseTimeHazardFitter()
three_f.fit(T, E)
three_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
ax = two_f.plot(ax=ax, ci_show=False)
ax = three_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.ThreeParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -649.38
hypothesis = alpha_ != 1, beta_ != 76.0001, gamma_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 1588776.28 3775137.44 -5810357.13 8987909.70 0.67 0.57
beta_ 100.88 5.88 89.35 112.41 <0.005 15.38
gamma_ 3.83 0.50 2.85 4.81 <0.005 25.82
###Markdown
Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. Why fit parametric models anyways?Taking a step back, we are fitting parameteric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine take a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistical efficient._ 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity.
###Code
fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True)
new_timeline = np.arange(0, 85)
three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline)
three_f.plot_hazard(label='hazard', ax=axs[0]).legend()
three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend()
three_f.plot_survival_function(label='survival function', ax=axs[2]).legend()
fig.subplots_adjust(hspace=0)
# Hide x labels and tick labels for all but bottom plot.
for ax in axs:
ax.label_outer()
###Output
_____no_output_____
###Markdown
3-parameter Weibull distributionWe can easily extend the built-in Weibull model (`lifelines.WeibullFitter`) to include a new _location_ parameter:$$ H(t) = \left(\frac{t - \theta}{\lambda}\right)^\rho $$(When $\theta = 0$, this is just the 2-parameter case again). In *lifelines* custom models, this looks like:
###Code
import autograd.numpy as np
from autograd.scipy.stats import norm
# I'm shifting this to exaggerate the effect
T = T + 10
class ThreeParameterWeibullFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ["lambda_", "rho_", "theta_"]
_bounds = [(0, None), (0, None), (0, T.min()-0.001)]
def _cumulative_hazard(self, params, times):
lambda_, rho_, theta_ = params
return ((times - theta_) / lambda_) ** rho_
tpw = ThreeParameterWeibullFitter()
tpw.fit(T, E)
tpw.print_summary()
ax = tpw.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
<lifelines.ThreeParameterWeibullFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -666.71
hypothesis = lambda_ != 1, rho_ != 1, theta_ != 7.9995
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 63.92 5.38 53.38 74.47 <0.005 102.58
rho_ 4.20 0.56 3.11 5.29 <0.005 26.67
theta_ 2.55 5.05 -7.35 12.45 0.28 1.83
###Markdown
Inverse Gaussian distributionThe inverse Gaussian distribution is another popular model for survival analysis. Unlike other model, it's hazard does not asympotically converge to 0, allowing for a long tail of survival. Let's model this, using the same parameterization from [Wikipedia](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)
###Code
from autograd.scipy.stats import norm
class InverseGaussianFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['lambda_', 'mu_']
def _cumulative_density(self, params, times):
mu_, lambda_ = params
v = norm.cdf(np.sqrt(lambda_ / times) * (times / mu_ - 1), loc=0, scale=1) + \
np.exp(2 * lambda_ / mu_) * norm.cdf(-np.sqrt(lambda_ / times) * (times / mu_ + 1), loc=0, scale=1)
return v
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
from lifelines.datasets import load_rossi
rossi = load_rossi()
igf = InverseGaussianFitter()
igf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 500))
igf.print_summary()
igf.plot_hazard()
###Output
<lifelines.InverseGaussianFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -729.80
hypothesis = lambda_ != 1, mu_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 7441.43 9296.67 -10779.69 25662.56 0.42 1.24
mu_ 47.86 3.31 41.38 54.35 <0.005 148.83
###Markdown
Bounded lifetimes using the beta distributionMaybe your data is bounded between 0 and some (unknown) upperbound M? That is, lifetimes can't be more than M. Maybe you know M, maybe you don't.
###Code
n = 100
T = 5 * np.random.random(n)**2
T_censor = 10 * np.random.random(n)**2
E = T < T_censor
T_obs = np.minimum(T, T_censor)
from autograd_gamma import betainc
class BetaFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', "m_"]
_bounds = [(0, None), (0, None), (T.max(), None)]
def _cumulative_density(self, params, times):
alpha_, beta_, m_ = params
return betainc(alpha_, beta_, times / m_)
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
beta_fitter = BetaFitter().fit(T_obs, E)
beta_fitter.plot()
beta_fitter.print_summary()
###Output
<lifelines.BetaFitter: fitted with 100 observations, 35 censored>
number of subjects = 100
number of events = 65
log-likelihood = -93.20
hypothesis = alpha_ != 1, beta_ != 1, m_ != 5.99827
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 0.59 0.10 0.40 0.78 <0.005 15.06
beta_ 1.27 0.45 0.38 2.15 0.56 0.85
m_ 5.09 0.33 4.45 5.74 0.01 7.37
###Markdown
Gompertz
###Code
class GompertzFitter(ParametericUnivariateFitter):
# this parameterization is slightly different than wikipedia.
_fitted_parameter_names = ['nu_', 'b_']
def _cumulative_hazard(self, params, times):
nu_, b_ = params
return nu_ * (np.expm1(times / b_))
ggf = GompertzFitter()
ggf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 120))
ggf.print_summary()
ggf.plot_survival_function()
###Output
<lifelines.GompertzFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -697.81
hypothesis = nu_ != 1, b_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
nu_ 0.20 0.11 -0.01 0.40 <0.005 45.19
b_ 55.19 19.26 17.45 92.93 <0.005 7.68
###Markdown
Piecewise exponential models and creating custom modelsThis section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-decreasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is:$$ S(t) = \exp\left(-H(t)\right)$$Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: $$h(t) = \frac{dH(t)}{dt}$$which has the immediate relation to the survival function:$$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute than integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about versus if we define the hazard and ask questions about the other two.First, let's revisit some simpler parametric models. The Exponential modelRecall that the Exponential model has a constant hazard, that is:$$ h(t) = \frac{1}{\lambda} $$which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from lifelines.datasets import load_waltons
waltons = load_waltons()
T, E = waltons['T'], waltons['E']
from lifelines import ExponentialFitter
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
epf = ExponentialFitter().fit(T, E)
epf.plot_hazard(ax=ax[0])
epf.plot_cumulative_hazard(ax=ax[1])
ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard")
epf.print_summary(3)
###Output
<lifelines.ExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -771.913
hypothesis = lambda_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 51.831 4.149 43.699 59.963 <0.0005 112.218
###Markdown
This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the Exponential's lack of fit is very obvious.
###Code
from lifelines import NelsonAalenFitter
ax = epf.plot(figsize=(8,5))
naf = NelsonAalenFitter().fit(T, E)
ax = naf.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. Piecewise Exponential modelsWhat if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as:$$ h(t) = \begin{cases} \lambda_0, & \text{if $t \le \tau_0$} \\ \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ ... \end{cases}$$This model should be flexible enough to fit better to our dataset. The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$.
###Code
from lifelines import PiecewiseExponentialFitter
# looking at the above plot, I think there may be breaks at t=40 and t=60.
pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E)
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax = pf.plot(ax=axs[1])
pf.plot_hazard(ax=axs[0])
ax = naf.plot(ax=ax, ci_show=False)
axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard")
pf.print_summary(3)
###Output
<lifelines.PiecewiseExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -647.118
hypothesis = lambda_0_ != 1, lambda_1_ != 1, lambda_2_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_0_ 162.989 27.144 109.787 216.191 <0.0005 28.630
lambda_1_ 31.366 4.043 23.442 39.290 <0.0005 43.957
lambda_2_ 4.686 0.624 3.462 5.910 <0.0005 28.055
###Markdown
We can see a much better fit in this model. A quantitative measure of fit is to compare the log-likelihood between exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. Univarite models in _lifelines_I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented:```pythonclass ExponentialFitter(ParametericUnivariateFitter): _fitted_parameter_names = ["lambda_"] def _cumulative_hazard(self, params, times): lambda_ = params[0] return times / lambda_```We only need to specify the cumulative hazard function because of the 1:1:1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. Defining our own survival modelsTo show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form.$$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. Note the `import autograd.numpy as np` below.
###Code
from lifelines.fitters import ParametericUnivariateFitter
import autograd.numpy as np
class InverseTimeHazardFitter(ParametericUnivariateFitter):
# we tell the model what we want the names of the unknown parameters to be
_fitted_parameter_names = ['alpha_']
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a vector of times that will be passed in.
def _cumulative_hazard(self, params, times):
alpha = params[0]
return alpha /(80 - times)
itf = InverseTimeHazardFitter()
itf.fit(T, E)
itf.print_summary()
ax = itf.plot(figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
plt.legend()
###Output
<lifelines.InverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -697.84
hypothesis = alpha_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 21.51 1.72 18.13 24.88 <0.005 106.22
###Markdown
The best fit of the model to the data is:$$H_1(t) = \frac{21.51}{80-t}$$Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter:$$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems.
###Code
class TwoParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_']
# Sequence of (min, max) pairs for each element in x. None is used to specify no bound
_bounds = [(0, None), (75.0001, None)]
def _cumulative_hazard(self, params, times):
alpha, beta = params
return alpha / (beta - times)
two_f = TwoParamInverseTimeHazardFitter()
two_f.fit(T, E)
two_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
two_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.TwoParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -685.57
hypothesis = alpha_ != 1, beta_ != 76.0001
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 16.50 1.51 13.55 19.46 <0.005 79.98
beta_ 76.55 0.38 75.80 77.30 0.15 2.73
###Markdown
From the output, we see that the value of 76.55 is the suggested asymptote, that is:$$H_2(t) = \frac{16.50} {76.55 - t}$$The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$
###Code
from lifelines.fitters import ParametericUnivariateFitter
class ThreeParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', 'gamma_']
_bounds = [(0, None), (75.0001, None), (0, None)]
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a numpy vector of times that will be passed in by the optimizer
def _cumulative_hazard(self, params, times):
a, b, c = params
return a / (b - times) ** c
three_f = ThreeParamInverseTimeHazardFitter()
three_f.fit(T, E)
three_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
ax = two_f.plot(ax=ax, ci_show=False)
ax = three_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.ThreeParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -649.38
hypothesis = alpha_ != 1, beta_ != 76.0001, gamma_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 1588776.28 3775137.44 -5810357.13 8987909.70 0.67 0.57
beta_ 100.88 5.88 89.35 112.41 <0.005 15.38
gamma_ 3.83 0.50 2.85 4.81 <0.005 25.82
###Markdown
Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. Why fit parametric models anyways? Taking a step back, we are fitting parameteric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine take a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistical efficient._ 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity.
###Code
fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True)
new_timeline = np.arange(0, 85)
three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline)
three_f.plot_hazard(label='hazard', ax=axs[0]).legend()
three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend()
three_f.plot_survival_function(label='survival function', ax=axs[2]).legend()
fig.subplots_adjust(hspace=0)
# Hide x labels and tick labels for all but bottom plot.
for ax in axs:
ax.label_outer()
###Output
_____no_output_____
###Markdown
3-parameter Weibull distributionWe can easily extend the built-in Weibull model (`lifelines.WeibullFitter`) to include a new _location_ parameter:$$ H(t) = \left(\frac{t - \theta}{\lambda}\right)^\rho $$(When $\theta = 0$, this is just the 2-parameter case again). In *lifelines* custom models, this looks like:
###Code
import autograd.numpy as np
from autograd.scipy.stats import norm
# I'm shifting this to exaggerate the effect
T = T + 10
class ThreeParameterWeibullFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ["lambda_", "rho_", "theta_"]
_bounds = [(0, None), (0, None), (0, T.min()-0.001)]
def _cumulative_hazard(self, params, times):
lambda_, rho_, theta_ = params
return ((times - theta_) / lambda_) ** rho_
tpw = ThreeParameterWeibullFitter()
tpw.fit(T, E)
tpw.print_summary()
ax = tpw.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
<lifelines.ThreeParameterWeibullFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -666.71
hypothesis = lambda_ != 1, rho_ != 1, theta_ != 7.9995
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 63.92 5.38 53.38 74.47 <0.005 102.58
rho_ 4.20 0.56 3.11 5.29 <0.005 26.67
theta_ 2.55 5.05 -7.35 12.45 0.28 1.83
###Markdown
Inverse Gaussian distributionThe inverse Gaussian distribution is another popular model for survival analysis. Unlike other model, it's hazard does not asympotically converge to 0, allowing for a long tail of survival. Let's model this, using the same parameterization from [Wikipedia](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)
###Code
from autograd.scipy.stats import norm
class InverseGaussianFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['lambda_', 'mu_']
def _cumulative_density(self, params, times):
mu_, lambda_ = params
v = norm.cdf(np.sqrt(lambda_ / times) * (times / mu_ - 1), loc=0, scale=1) + \
np.exp(2 * lambda_ / mu_) * norm.cdf(-np.sqrt(lambda_ / times) * (times / mu_ + 1), loc=0, scale=1)
return v
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
from lifelines.datasets import load_rossi
rossi = load_rossi()
igf = InverseGaussianFitter()
igf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 500))
igf.print_summary()
igf.plot_hazard()
###Output
<lifelines.InverseGaussianFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -729.80
hypothesis = lambda_ != 1, mu_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 7441.43 9296.67 -10779.69 25662.56 0.42 1.24
mu_ 47.86 3.31 41.38 54.35 <0.005 148.83
###Markdown
Bounded lifetimes using the beta distributionMaybe your data is bounded between 0 and some (unknown) upperbound M? That is, lifetimes can't be more than M. Maybe you know M, maybe you don't.
###Code
n = 100
T = 5 * np.random.random(n)**2
T_censor = 10 * np.random.random(n)**2
E = T < T_censor
T_obs = np.minimum(T, T_censor)
from autograd_gamma import betainc
class BetaFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', "m_"]
_bounds = [(0, None), (0, None), (T.max(), None)]
def _cumulative_density(self, params, times):
alpha_, beta_, m_ = params
return betainc(alpha_, beta_, times / m_)
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
beta_fitter = BetaFitter().fit(T_obs, E)
beta_fitter.plot()
beta_fitter.print_summary()
###Output
<lifelines.BetaFitter: fitted with 100 observations, 35 censored>
number of subjects = 100
number of events = 65
log-likelihood = -93.20
hypothesis = alpha_ != 1, beta_ != 1, m_ != 5.99827
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 0.59 0.10 0.40 0.78 <0.005 15.06
beta_ 1.27 0.45 0.38 2.15 0.56 0.85
m_ 5.09 0.33 4.45 5.74 0.01 7.37
###Markdown
Gompertz
###Code
class GompertzFitter(ParametericUnivariateFitter):
# this parameterization is slightly different than wikipedia.
_fitted_parameter_names = ['nu_', 'b_']
def _cumulative_hazard(self, params, times):
nu_, b_ = params
return nu_ * (np.expm1(times / b_))
ggf = GompertzFitter()
ggf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 120))
ggf.print_summary()
ggf.plot_survival_function()
###Output
<lifelines.GompertzFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -697.81
hypothesis = nu_ != 1, b_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
nu_ 0.20 0.11 -0.01 0.40 <0.005 45.19
b_ 55.19 19.26 17.45 92.93 <0.005 7.68
###Markdown
Piecewise Exponential models and creating custom modelsThis section will be easier if we recall our three mathematical "creatures" and the relationships between them. First is the survival function, $S(t)$, that represents the probability of living past some time, $t$. Next is the _always non-negative and non-dereasing_ cumulative hazard function, $H(t)$. Its relation to $S(t)$ is:$$ S(t) = \exp\left(-H(t)\right)$$Finally, the hazard function, $h(t)$, is the derivative of the cumulative hazard: $$h(t) = \frac{dH(t)}{dt}$$which has the immediate relation to the survival function:$$S(t) = \exp\left(-\int_{0}^t h(s) ds\right)$$Notice that any of the three absolutely defines the other two. Some situations make it easier to define one vs the others. For example, in the Cox model, it's easist to work with the hazard, $h(t)$. In this section on parametric univariate models, it'll be easiest to work with the cumulative hazard. This is because of an asymmetry in math: derivatives are much easier to compute that integrals. So, if we define the cumulative hazard, both the hazard and survival function are much easier to reason about vs if we define the hazard and ask questions about the other two. At first, it may be easier to think about the hazard, and that's fine, but so long as we are clever enough to also determine the cumulative hazard, then we can ride the computational train. This will be clear by the end of the tutorial. First, let's revisit some simplier parametric models. The Exponential modelRecall that the Exponential model has a constant hazard, that is:$$ h(t) = \frac{1}{\lambda} $$which implies that the cumulative hazard, $H(t)$, has a pretty simple form: $H(t) = \frac{t}{\lambda}$. Below we fit this model to some survival data.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from lifelines.datasets import load_waltons
waltons = load_waltons()
T, E = waltons['T'], waltons['E']
from lifelines import ExponentialFitter
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
epf = ExponentialFitter().fit(T, E)
epf.plot_hazard(ax=ax[0])
epf.plot_cumulative_hazard(ax=ax[1])
ax[0].set_title("hazard"); ax[1].set_title("cumulative_hazard")
epf.print_summary(3)
###Output
<lifelines.ExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -771.913
hypothesis = lambda_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 51.831 4.149 43.699 59.963 <0.0005 112.218
###Markdown
This model does a poor job of fitting to our data. If I fit a _non-parametric_ model, like the Nelson-Aalen model, to this data, the lack of fit is very obvious.
###Code
from lifelines import NelsonAalenFitter
ax = epf.plot(figsize=(8,5))
naf = NelsonAalenFitter().fit(T, E)
ax = naf.plot(ax=ax)
plt.legend()
###Output
_____no_output_____
###Markdown
It should be clear that the single parameter model is just averaging the hazards over the entire time period. In reality though, the true hazard rate exhibits some complex non-linear behaviour. Piecewise Exponential modelsWhat if we could break out model into different time periods, and fit an exponential model to each of those? For example, we define the hazard as:$$ h(t) = \begin{cases} \lambda_0, & \text{if $t \le \tau_0$} \\ \lambda_1 & \text{if $\tau_0 < t \le \tau_1$} \\ \lambda_2 & \text{if $\tau_1 < t \le \tau_2$} \\ ... \end{cases}$$This model should be flexible enough to fit better to our dataset. The cumulative hazard is only slightly more complicated, but not too much and can still be defined in Python. In _lifelines_, univariate models are constructed such that one _only_ needs to define the cumulative hazard model with the parameters of interest, and all the hard work of fitting, creating confidence intervals, plotting, etc. is taken care. For example, _lifelines_ has implemented the `PiecewiseExponentialFitter` model. Internally, the code is a single function that defines the cumulative hazard. The user specifies where they believe the "breaks" are, and _lifelines_ estimates the best $\lambda_i$.
###Code
from lifelines import PiecewiseExponentialFitter
# looking at the above plot, I think there may be breaks at t=40 and t=60.
pf = PiecewiseExponentialFitter(breakpoints=[40, 60]).fit(T, E)
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax = pf.plot(ax=axs[1])
pf.plot_hazard(ax=axs[0])
ax = naf.plot(ax=ax, ci_show=False)
axs[0].set_title("hazard"); axs[1].set_title("cumulative_hazard")
pf.print_summary(3)
###Output
<lifelines.PiecewiseExponentialFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -647.118
hypothesis = lambda_0_ != 1, lambda_1_ != 1, lambda_2_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_0_ 162.989 27.144 109.787 216.191 <0.0005 28.630
lambda_1_ 31.366 4.043 23.442 39.290 <0.0005 43.957
lambda_2_ 4.686 0.624 3.462 5.910 <0.0005 28.055
###Markdown
We can see a much better fit in this model. A quantitative measure of better fit is to compare the log-likelihood mthe exponential model and the piecewise exponential model (higher is better). The log-likelihood went from -772 to -647, respectively. We could keep going and add more and more breakpoints, but that would end up overfitting to the data. Univarite models in _lifelines_I mentioned that the `PiecewiseExponentialFitter` was implemented using only its cumulative hazard function. This is not a lie. _lifelines_ has very general semantics for univariate fitters. For example, this is how the entire `ExponentialFitter` is implemented:```pythonclass ExponentialFitter(ParametericUnivariateFitter): _fitted_parameter_names = ["lambda_"] def _cumulative_hazard(self, params, times): lambda_ = params[0] return lambda_ * times```We only need to specify the cumulative hazard function because of the 1-1 relationship between the cumulative hazard function and the survival function and the hazard rate. From there, _lifelines_ handles the rest. Defining our own survival modelsTo show off the flexability of _lifelines_ univariate models, we'll create a brand new, never before seen, survival model. Looking at the Nelson-Aalen fit, the cumulative hazard looks looks like their might be an asymptote at $t=80$. This may correspond to an absolute upper limit of subjects' lives. Let's start with that functional form.$$ H_1(t; \alpha) = \frac{\alpha}{(80 - t)} $$We subscript $1$ because we'll investigate other models. In a _lifelines_ univariate model, this is defined in the following code. **Important**: in order to compute derivatives, you must use the numpy imported from the `autograd` library. This is a thin wrapper around the original numpy. Note the `import autograd.numpy as np` below.
###Code
from lifelines.fitters import ParametericUnivariateFitter
import autograd.numpy as np
class InverseTimeHazardFitter(ParametericUnivariateFitter):
# we tell the model what we want the names of the unknown parameters to be
_fitted_parameter_names = ['alpha_']
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: avector of times that will be passed in.
def _cumulative_hazard(self, params, times):
alpha = params[0]
return alpha /(80 - times)
itf = InverseTimeHazardFitter()
itf.fit(T, E)
itf.print_summary()
ax = itf.plot(figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
plt.legend()
###Output
<lifelines.InverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -697.84
hypothesis = alpha_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 21.51 1.72 18.13 24.88 <0.005 106.22
###Markdown
The best fit of the model to the data is:$$H_1(t) = \frac{21.51}{80-t}$$Our choice of 80 as an asymptote was maybe mistaken, so let's allow the asymptote to be another parameter:$$ H_2(t; \alpha, \beta) = \frac{\alpha}{\beta-t} $$If we define the model this way, we need to add a bound to the values that $\beta$ can take. Obviously it can't be smaller than or equal to the maximum observed duration. Generally, the cumulative hazard _must be positive and non-decreasing_. Otherwise the model fit will hit convergence problems.
###Code
class TwoParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_']
# Sequence of (min, max) pairs for each element in x. None is used to specify no bound
_bounds = [(0, None), (75.0001, None)]
def _cumulative_hazard(self, params, times):
alpha, beta = params
return alpha / (beta - times)
two_f = TwoParamInverseTimeHazardFitter()
two_f.fit(T, E)
two_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
two_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.TwoParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -685.57
hypothesis = alpha_ != 1, beta_ != 76
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 16.50 1.51 13.55 19.46 <0.005 79.98
beta_ 76.55 0.38 75.80 77.30 0.15 2.73
###Markdown
From the output, we see that the value of 76.55 is the suggested asymptote, that is:$$H_2(t) = \frac{16.50} {76.55 - t}$$The curve also appears to track against the Nelson-Aalen model better too. Let's try one additional parameter, $\gamma$, some sort of measure of decay. $$H_3(t; \alpha, \beta, \gamma) = \frac{\alpha}{(\beta-t)^\gamma} $$
###Code
from lifelines.fitters import ParametericUnivariateFitter
class ThreeParamInverseTimeHazardFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['alpha_', 'beta_', 'gamma_']
_bounds = [(0, None), (75.0001, None), (0, None)]
# this is the only function we need to define. It always takes two arguments:
# params: an iterable that unpacks the parameters you'll need in the order of _fitted_parameter_names
# times: a numpy vector of times that will be passed in by the optimizer
def _cumulative_hazard(self, params, times):
a, b, c = params
return a / (b - times) ** c
three_f = ThreeParamInverseTimeHazardFitter()
three_f.fit(T, E)
three_f.print_summary()
ax = itf.plot(ci_show=False, figsize=(8,5))
ax = naf.plot(ax=ax, ci_show=False)
ax = two_f.plot(ax=ax, ci_show=False)
ax = three_f.plot(ax=ax)
plt.legend()
###Output
<lifelines.ThreeParamInverseTimeHazardFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -649.38
hypothesis = alpha_ != 1, beta_ != 76, gamma_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
alpha_ 1588776.28 3775137.44 -5810357.13 8987909.70 0.67 0.57
beta_ 100.88 5.88 89.35 112.41 <0.005 15.38
gamma_ 3.83 0.50 2.85 4.81 <0.005 25.82
###Markdown
Our new asymptote is at $t\approx 100, \text{c.i.}=(87, 112)$. The model appears to fit the early times better than the previous models as well, however our $\alpha$ parameter has more uncertainty now. Continuing to add parameters isn't advisable, as we will overfit to the data. Why fit parametric models anyways?Taking a step back, we are fitting parameteric models and comparing them to the non-parametric Nelson-Aalen. Why not just always use the Nelson-Aalen model? 1) Sometimes we have scientific motivations to use a parametric model. That is, using domain knowledge, we may know the system has a parametric model and we wish to fit to that model. 2) In a parametric model, we are borrowing information from _all_ observations to determine the best parameters. To make this more clear, imagine take a single observation and changing it's value wildly. The fitted parameters would change as well. On the other hand, imagine doing the same for a non-parametric model. In this case, only the local survival function or hazard function would change. Because parametric models can borrow information from all observations, and there are much _fewer_ unknowns than a non-parametric model, parametric models are said to be more _statistical efficient._ 3) Extrapolation: non-parametric models are not easily extended to values outside the observed data. On the other hand, parametric models have no problem with this. However, extrapolation outside observed values is a very dangerous activity.
###Code
fig, axs = plt.subplots(3, figsize=(7, 8), sharex=True)
new_timeline = np.arange(0, 85)
three_f = ThreeParamInverseTimeHazardFitter().fit(T, E, timeline=new_timeline)
three_f.plot_hazard(label='hazard', ax=axs[0]).legend()
three_f.plot_cumulative_hazard(label='cumulative hazard', ax=axs[1]).legend()
three_f.plot_survival_function(label='survival function', ax=axs[2]).legend()
fig.subplots_adjust(hspace=0)
# Hide x labels and tick labels for all but bottom plot.
for ax in axs:
ax.label_outer()
###Output
_____no_output_____
###Markdown
3-parameter Weibull distributionWe can easily extend the built-in Weibull model (`lifelines.WeibullFitter`) to include a new _location_ parameter:$$ H(t) = \left(\frac{t - \theta}{\lambda}\right)^\rho $$(When $\theta = 0$, this is just the 2-parameter case again). In *lifelines* custom models, this looks like:
###Code
import autograd.numpy as np
from autograd.scipy.stats import norm
# I'm shifting this to exaggerate the effect
T = T + 10
class ThreeParameterWeibullFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ["lambda_", "rho_", "theta_"]
_bounds = [(0, None), (0, None), (0, T.min()-0.001)]
def _cumulative_hazard(self, params, times):
lambda_, rho_, theta_ = params
return ((times - theta_) / lambda_) ** rho_
tpw = ThreeParameterWeibullFitter()
tpw.fit(T, E)
tpw.print_summary()
ax = tpw.plot_cumulative_hazard(figsize=(8,5))
ax = NelsonAalenFitter().fit(T, E).plot(ax=ax, ci_show=False)
###Output
<lifelines.ThreeParameterWeibullFitter: fitted with 163 observations, 7 censored>
number of subjects = 163
number of events = 156
log-likelihood = -666.71
hypothesis = lambda_ != 1, rho_ != 1, theta_ != 7
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 63.92 5.38 53.38 74.47 <0.005 102.58
rho_ 4.20 0.56 3.11 5.29 <0.005 26.67
theta_ 2.55 5.05 -7.35 12.45 0.28 1.83
###Markdown
Inverse Gaussian distributionThe inverse Gaussian distribution is another popular model for survival analysis. Unlike other model, it's hazard does not asympotically converge to 0, allowing for a long tail of survival. Let's model this, using the same parameterization from [Wikipedia](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)
###Code
from autograd.scipy.stats import norm
class InverseGaussianFitter(ParametericUnivariateFitter):
_fitted_parameter_names = ['lambda_', 'mu_']
def _cumulative_density(self, params, times):
mu_, lambda_ = params
v = norm.cdf(np.sqrt(lambda_ / times) * (times / mu_ - 1), loc=0, scale=1) + \
np.exp(2 * lambda_ / mu_) * norm.cdf(-np.sqrt(lambda_ / times) * (times / mu_ + 1), loc=0, scale=1)
return v
def _cumulative_hazard(self, params, times):
return -np.log(1-self._cumulative_density(params, times))
from lifelines.datasets import load_rossi
rossi = load_rossi()
igf = InverseGaussianFitter()
igf.fit(rossi['week'], rossi['arrest'], timeline=np.arange(1, 500))
igf.print_summary()
igf.plot_hazard()
###Output
<lifelines.InverseGaussianFitter: fitted with 432 observations, 318 censored>
number of subjects = 432
number of events = 114
log-likelihood = -729.80
hypothesis = lambda_ != 1, mu_ != 1
---
coef se(coef) lower 0.95 upper 0.95 p -log2(p)
lambda_ 7441.43 9296.67 -10779.69 25662.56 0.42 1.24
mu_ 47.86 3.31 41.38 54.35 <0.005 148.83
|
Lec 11 Logistic regression Question/Logistics Regression EX1 .ipynb | ###Markdown
Logistics Regression [ Diabetes
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('Dataset/diabetes.csv')
x = data.iloc[:,0:8]
y = data.iloc[:,8]
data
data.info()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split (x , y , test_size = .20, random_state = 0)
print(x_train)
print(x_test)
print(y_train)
print(y_test)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression ()
lr.fit (x_train, y_train)
predict = lr.predict (x_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, predict))
predict
###Output
0.8246753246753247
|
module3-permutation-boosting/Ned_Przezdziecki_LS_DS_233_assignment.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 3*--- Permutation & BoostingYou will use your portfolio project dataset for all assignments this sprint. AssignmentComplete these tasks for your project, and document your work.- [ ] If you haven't completed assignment 1, please do so first.- [ ] Continue to clean and explore your data. Make exploratory visualizations.- [ ] Fit a model. Does it beat your baseline? - [ ] Try xgboost.- [ ] Get your model's permutation importances.You should try to complete an initial model today, because the rest of the week, we're making model interpretation visualizations.But, if you aren't ready to try xgboost and permutation importances with your dataset today, that's okay. You can practice with another dataset instead. You may choose any dataset you've worked with previously.The data subdirectory includes the Titanic dataset for classification and the NYC apartments dataset for regression. You may want to choose one of these datasets, because example solutions will be available for each. ReadingTop recommendations in _**bold italic:**_ Permutation Importances- _**[Kaggle / Dan Becker: Machine Learning Explainability](https://www.kaggle.com/dansbecker/permutation-importance)**_- [Christoph Molnar: Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/feature-importance.html) (Default) Feature Importances - [Ando Saabas: Selecting good features, Part 3, Random Forests](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/) - [Terence Parr, et al: Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html) Gradient Boosting - [A Gentle Introduction to the Gradient Boosting Algorithm for Machine Learning](https://machinelearningmastery.com/gentle-introduction-gradient-boosting-algorithm-machine-learning/) - [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 8 - _**[Gradient Boosting Explained](https://www.gormanalysis.com/blog/gradient-boosting-explained/)**_ — Ben Gorman - [Gradient Boosting Explained](http://arogozhnikov.github.io/2016/06/24/gradient_boosting_explained.html) — Alex Rogozhnikov - [How to explain gradient boosting](https://explained.ai/gradient-boosting/) — Terence Parr & Jeremy Howard
###Code
from google.colab import drive
drive.mount('/content/drive')
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
pd.options.display.max_columns = None
feb20 = pd.read_csv('drive/My Drive/2020_02.csv')
march20 = pd.read_csv('drive/My Drive/2020_03.csv')
march19 = pd.read_csv('drive/My Drive/201903-citibike-tripdata.csv')
march19
march20
feb20
march20['starttime']= pd.to_datetime(march20['starttime'])
march20['stoptime']= pd.to_datetime(march20['stoptime'])
march19['starttime']= pd.to_datetime(march19['starttime'])
march19['stoptime']= pd.to_datetime(march19['stoptime'])
march20.dtypes
march20['covid']='yes'
march19['covid']='no'
march19and20 = pd.concat([march19, march20], ignore_index=True)
march19and20
march19and20['start station id'] = march19and20['start station id'].dropna()
march19and20['end station id'] = march19and20['end station id'].dropna()
march19and20.dtypes
train, test = train_test_split(march19and20, test_size=0.30, random_state=42)
train.shape , test.shape
#train = train.drop(target, axis=1)
train
train.dtypes
target = 'covid'
features = [ 'starttime' ,'birthyear' ]
x_train = train[features]
y_train = train[target]
x_test = test[features]
y_test = test[target]
y_train.value_counts(normalize=True)
x_train.describe()
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver='lbfgs')
log_reg.fit(x_train, y_train)
import eli5
from eli5.sklearn import PermutationImportance
###Output
_____no_output_____ |
NLP_word_embeddings.ipynb | ###Markdown
Natural Language processing:- Word EmbeddingsWord Vectors Introduction:-Word vectors represent a significant leap forward in advancing our ability to analyse relationships across words, sentences and documents. In doing so, they advance technology by providing machines much more information about words than has previously been possible using traditional representations of words. It is word vectors that make technologies such as speech recognition and machine translation possible. There are many excellent explanations of word vectors, but in this one I want to make the concept accessible to data and research people who aren’t very familiar with natural language processing (NLP).This blog post is divided into a few parts:-1) Word Vector and its implementation in spacy.2) Word2vec and its Implementation in genism.3) Word2vec- The skip gram model and its implementation inTensorflow.4) GloVec and its implementation in genism.What are Word Vectors ?Word vectors are simply vectors of numbers that represent the meaning of a word.1. Traditional approaches to NLP, such as one-hot encoding and bag-of-words models (i.e. using dummy variables to represent the presence or absence of a word in an observation (e.g. a sentence)), while it is useful for some machine learning (ML) tasks, do not capture information about a word’s meaning or context. 2. This means that potential relationships, such as contextual closeness, are not captured across collections of words.3. For example, a one-hot encoding(Read my blog post [here](https://soumyadip1995.blogspot.com/2018/11/softmax-cross-entropy-and-logits.html)) cannot capture simple relationships, such as determining that the words “dog” and “cat” both refer to animals that are often discussed in the context of household pets. Such encodings often provide sufficient baselines for simple NLP tasks (for example, email spam classifiers), but lack the sophistication for more complex tasks such as translation and speech recognition.4. In essence, traditional approaches to NLP, such as one-hot encodings, do not capture syntactic (structure) and semantic (meaning) relationships across collections of words and, therefore, represent language in a very naive way.So what does word Vectors represent ?1. In contrast, word vectors represent words as multidimensional continuous floating point numbers where semantically similar words are mapped to proximate points in geometric space. In simpler terms, a word vector is a row of real valued numbers (as opposed to dummy numbers) where each point captures a dimension of the word’s meaning and where semantically similar words have similar vectors.2. This means that words such as wheel and engine should have similar word vectors to the word car (because of the similarity of their meanings), whereas the word banana should be quite distant. Put differently, words that are used in a similar context will be mapped to a proximate vector space (we will get to how these word vectors are created below). The beauty of representing words as vectors is that they lend themselves to mathematical operators. For example, we can add and subtract vectors — the example here is showing that by using word vectors we can determine that:**king — man + woman = queen**In other words, we can subtract one meaning from the word vector for king **(i.e. maleness)**, add another meaning **(femaleness)**, and show that this new word vector **(king — man + woman)** maps most closely to the word vector for queen.The numbers in the word vector represent the word’s distributed weight across dimensions. In a simplified sense each dimension represents a meaning and the word’s numerical weight on that dimension captures the closeness of its association with and to that meaning. Thus, the semantics of the word are embedded across the dimensions of the vector.*In the figure we are imagining that each dimension captures a clearly defined meaning. For example, if you imagine that the first dimension represents the meaning or concept of “animal”, then each word’s weight on that dimension represents how closely it relates to that concept.* Lets Take a look at some code..!!Here we simply extract vectors for different animals and words that might be used to describe some of them1. !pip install spacy2. !pip install genism
###Code
import numpy as np
import spacy
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
nlp = spacy.load("en")
animals = "dog cat hamster lion tiger elephant cheetah monkey gorilla antelope rabbit mouse rat zoo home pet fluffy wild domesticated"
animal_tokens = nlp(animals)
animal_vectors = np.vstack([word.vector for word in animal_tokens if word.has_vector])
pca = PCA(n_components=2)
animal_vecs_transformed = pca.fit_transform(animal_vectors)
animal_vecs_transformed = np.c_[animals.split(), animal_vecs_transformed]
print(animal_vecs_transformed)
###Output
_____no_output_____
###Markdown
Introduction to Word2Vec1. Word2vec is a two-layer neural net that processes text. Its input is a text corpus and its output is a set of vectors: feature vectors for words in that corpus. While Word2vec is not a deep neural network, it turns text into a numerical form that deep nets can understand. Deeplearning4j implements a distributed form of Word2vec for Java and Scala, which works on Spark with GPUs.2. Word2vec’s applications extend beyond parsing sentences in the wild. It can be applied just as well to genes, code, likes, playlists, social media graphs and other verbal or symbolic series in which patterns may be discerned.3. Why? Because words are simply discrete states, and we are simply looking for the transitional probabilities between those states: the likelihood that they will co-occur4. The purpose and usefulness of Word2vec is to group the vectors of similar words together in vectorspace. That is, it detects similarities mathematically. Word2vec creates vectors that are distributed numerical representations of word features, features such as the context of individual words.5. Given enough data, usage and contexts, Word2vec can make highly accurate guesses about a word’s meaning based on past appearances. Those guesses can be used to establish a word’s association with other words (e.g. “man” is to “boy” what “woman” is to “girl”), or cluster documents and classify them by topic. Those clusters can form the basis of search, sentiment analysis and recommendations in such diverse fields as scientific research, legal discovery, e-commerce and customer relationship management Lets look at some more code..!!There are two main training algorithms that can be used to learn the embedding from text; they are continuous bag of words (CBOW) and skip grams.Rather than loading a large text document or corpus from file, we will work with a small, in-memory list of pre-tokenized sentences. The model is trained and the minimum count for words is set to 1 so that no words are ignored.After the model is learned, we summarize, print the vocabulary, then print a single vector for the word ‘sentence‘.Finally, the model is saved to a file in binary format, loaded, and then summarized.Visualize Word EmbeddingAfter you learn word embedding for your text data, it can be nice to explore it with visualization.You can use classical projection methods to reduce the high-dimensional word vectors to two-dimensional plots and plot them on a graph.The visualizations can provide a qualitative diagnostic for your learned model.We can retrieve all of the vectors from a trained model We can then train a projection method on the vectors, such as those methods offered in scikit-learn, then use matplotlib to plot the projection as a scatter plot.Let’s look at an example with Principal Component Analysis or PCA.Plot Word Vectors Using PCAWe can create a 2-dimensional PCA model of the word vectors using the scikit-learn PCA class.The resulting projection can be plotted using matplotlib as follows, pulling out the two dimensions as x and y coordinates.Putting this all together with the model from the previous section, the complete code is listed below
###Code
from gensim.models import Word2Vec
from sklearn.decomposition import PCA
from matplotlib import pyplot
from gensim.models import Word2Vec
# define training data
sentences = [['this', 'is', 'the', 'first', 'sentence', 'for', 'word2vec'],
['this', 'is', 'the', 'second', 'sentence'],
['yet', 'another', 'sentence'],
['one', 'more', 'sentence'],
['and', 'the', 'final', 'sentence']]
# train model
model = Word2Vec(sentences, min_count=1)
# summarize the loaded model
print(model)
# summarize vocabulary
words = list(model.wv.vocab)
print(words)
# access vector for one word
print(model['sentence'])
# save model
model.save('model.bin')
# load model
new_model = Word2Vec.load('model.bin')
print(new_model)
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
pyplot.scatter(result[:, 0], result[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
# define training data
sentences = [['this', 'is', 'the', 'first', 'sentence', 'for', 'word2vec'],
['this', 'is', 'the', 'second', 'sentence'],
['yet', 'another', 'sentence'],
['one', 'more', 'sentence'],
['and', 'the', 'final', 'sentence']]
# train model
model = Word2Vec(sentences, min_count=1)
# fit a 2d PCA model to the vectors
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# create a scatter plot of the projection
pyplot.scatter(result[:, 0], result[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
pyplot.show()
###Output
_____no_output_____
###Markdown
Running the example creates a scatter plot with the dots annotated with the words.It is hard to pull much meaning out of the graph given such a tiny corpus was used to fit the model. Word2Vec (skip-gram model)The algorithm exists in two flavors CBOW and Skip-Gram. Given a set of sentences (also called corpus) the model loops on the words of each sentence and either tries to use the current word of to predict its neighbors (its context), in which case the method is called “Skip-Gram”, or it uses each of these contexts to predict the current word, in which case the method is called “Continuous Bag Of Words” (CBOW). The limit on the number of words in each context is determined by a parameter called “window size”.IntuitionThe skip-gram neural network model is actually surprisingly simple in its most basic form. Train a simple neural network with a single hidden layer to perform a certain task, but then we’re not actually going to use that neural network for the task we trained it on! Instead, the goal is actually just to learn the weights of the hidden layer–we’ll see that these weights are actually the “word vectors” that we’re trying to learn.As an example, let's consider the dataset**the quick brown fox jumped over the lazy dog**We first form a dataset of words and the contexts in which they appear. We could define 'context' in any way that makes sense, and in fact people have looked at syntactic contexts (i.e. the syntactic dependents of the current target word, see e.g. Levy et al.), words-to-the-left of the target, words-to-the-right of the target, etc. For now, let's stick to the vanilla definition and define 'context' as the window of words to the left and to the right of a target word. Using a window size of 1, we then have the dataset**([the, brown], quick), ([quick, fox], brown), ([brown, jumped], fox)**, ..of (context, target) pairs. Recall that skip-gram inverts contexts and targets, and tries to predict each context word from its target word, so the task becomes to predict 'the' and 'brown' from 'quick', 'quick' and 'fox' from 'brown', etc. Therefore our dataset becomes**(quick, the), (quick, brown), (brown, quick), (brown, fox)**, ...of **(input, output)** pairs. The objective function is defined over the entire dataset, but we typically optimize this with stochastic gradient descent (SGD) using one example at a time (or a 'minibatch' of batch_size examples, where typically 16 <= batch_size <= 512). Visualization using tensorflowWe can visualize the learned vectors by projecting them down to 2 dimensions using for instance something like the t-SNE dimensionality reduction technique[t-SNE](https://https://lvdmaaten.github.io/tsne/). When we inspect these visualizations it becomes apparent that the vectors capture some general, and in fact quite useful, semantic information about words and their relationships to one another. It was very interesting when we first discovered that certain directions in the induced vector space specialize towards certain semantic relationships, e.g. male-female, verb tense and even country-capital relationships between words, as illustrated in the figure below  The CodeVisualizing the Learned EmbeddingsAfter training has finished we can visualize the learned embeddings using t-SNE.
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import math
import os
import random
import sys
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
data_index = 0
def word2vec_basic(log_dir):
"""Example of building, training and visualizing a word2vec model."""
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = {}
for word, _ in count:
dictionary[word] = len(dictionary)
data = []
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, unused_dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all
# embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
_, summary, loss_val = session.run([optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000
# batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in
# TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(),
'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
# All functionality is run after tf.compat.v1.app.run() (b/122547914). This
# could be split up but the methods are laid sequentially with their usage for
# clarity.
def main(unused_argv):
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
flags, unused_flags = parser.parse_known_args()
word2vec_basic(flags.log_dir)
if __name__ == '__main__':
tf.app.run()
###Output
_____no_output_____
###Markdown
GloVe(Global Vectors)Another well-known model that learns vectors or words from their co-occurrence information, i.e. how frequently they appear together in large text corpora, is GlobalVectors (GloVe). While word2vec is a predictive model — a feed-forward neural network that learns vectors to improve the predictive ability, GloVe is a count-based model.What is a count-based model?Generally speaking, count-based models learn vectors by doing dimensionality reduction on a co-occurrence counts matrix. First they construct a large matrix of co-occurrence information, which contains the information on how frequently each “word” (stored in rows), is seen in some “context” (the columns). The number of “contexts” needs be large, since it is essentially combinatorial in size. Afterwards they factorize this matrix to yield a lower-dimensional matrix of words and features, where each row yields a vector representation for each word. It is achieved by minimizing a “reconstruction loss” which looks for lower-dimensional representations that can explain the variance in the high-dimensional data.In the case of GloVe, the counts matrix is preprocessed by normalizing the counts and log-smoothing them. Compared to word2vec, GloVe allows for parallel implementation, which means that it’s easier to train over more data. It is believed (GloVe) to combine the benefits of the word2vec skip-gram model in the word analogy tasks, with those of matrix factorization methods exploiting global statistical information.GloVe at a Glance GloVe is essentially a log-bilinear model with a weighted least-squares objective. The model rests on a rather simple idea that ratios of word-word co-occurrence probabilities have the potential for encoding some form of meaning which can be encoded as vector differences. Therefore, the training objective is to learn word vectors such that their dot product equals the logarithm of the words’ probability of co-occurrence. As the logarithm of a ratio equals the difference of logarithms, this objective associates the ratios of co-occurrence probabilities with vector differences in the word vector space. It creates the word vectors that perform well on both word analogy tasks and on similarity tasks and named entity recognition Load Stanford’s GloVe EmbeddingStanford researchers also have their own word embedding algorithm like word2vec called Global Vectors for Word Representation, or GloVe for short.I won’t get into the details of the differences between word2vec and GloVe here, but generally, NLP practitioners seem to prefer GloVe at the moment based on results.Like word2vec, the GloVe researchers also provide pre-trained word vectors, in this case, a great selection to choose from.You can download the GloVe pre-trained word vectors and load them easily with gensim.The first step is to convert the GloVe file format to the word2vec file format. The only difference is the addition of a small header line. This can be done by calling the *glove2word2vec() *function. For example:
###Code
from gensim.scripts.glove2word2vec import glove2word2vec
glove_input_file = 'glove.txt'
word2vec_output_file = 'word2vec.txt'
glove2word2vec(glove_input_file, word2vec_output_file)
###Output
_____no_output_____
###Markdown
Once converted, the file can be loaded just like word2vec file above.Let’s make this concrete with an example.You can download the smallest GloVe pre-trained model from the GloVe website. It an 822 Megabyte zip file with 4 different models (50, 100, 200 and 300-dimensional vectors) trained on Wikipedia data with 6 billion tokens and a 400,000 word vocabulary.The direct download link is here:[glove.6B.zip](https://http://nlp.stanford.edu/data/glove.6B.zip)Working with the 100-dimensional version of the model, we can convert the file to word2vec format as follows:
###Code
from gensim.scripts.glove2word2vec import glove2word2vec
glove_input_file = 'glove.6B.100d.txt'
word2vec_output_file = 'glove.6B.100d.txt.word2vec'
glove2word2vec(glove_input_file, word2vec_output_file)
###Output
_____no_output_____
###Markdown
You now have a copy of the GloVe model in word2vec format with the filename glove.6B.100d.txt.word2vec.Now we can load it and perform the same (king – man) + woman = ? test(as in the instagram post) The complete code listing is provided below. Note that the converted file is ASCII format, not binary, so we set binary=False when loading.
###Code
from gensim.models import KeyedVectors
# load the Stanford GloVe model
filename = 'glove.6B.100d.txt.word2vec'
model = KeyedVectors.load_word2vec_format(filename, binary=False)
# calculate: (king - man) + woman = ?
result = model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1)
print(result)
###Output
_____no_output_____ |
notebooks/gdal/OGRexamples.ipynb | ###Markdown
OGR examples Source: https://pcjericks.github.io/py-gdalogr-cookbook/geometry.html Create a point
###Code
from osgeo import ogr
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(1198054.34, 648493.09)
print point.ExportToWkt()
point.GetGeometryName()
###Output
_____no_output_____
###Markdown
Buffer a point geometry
###Code
bufferDistance = 500
bufpoint = point.Buffer(bufferDistance)
print "%s buffered by %d is %s" % (point.ExportToWkt(), bufferDistance, bufpoint.ExportToWkt())
centroid = bufpoint.Centroid()
print centroid
# This must be equal to the input point for the buffer
###Output
POINT (1198054.34 648493.089999999850988)
###Markdown
Create a line
###Code
from osgeo import ogr
line = ogr.Geometry(ogr.wkbLineString)
line.AddPoint(1116651.439379124, 637392.6969887456)
line.AddPoint(1188804.0108498496, 652655.7409537067)
line.AddPoint(1226730.3625203592, 634155.0816022386)
line.AddPoint(1281307.30760719, 636467.6640211721)
print line.ExportToWkt()
print "Geometry has %i points" % (line.GetPointCount())
for i in range(0, line.GetPointCount()):
# GetPoint returns a tuple not a Geometry
pt = line.GetPoint(i)
print "%i). POINT (%d %d)" %(i, pt[0], pt[1])
print "Length = %d" % line.Length()
line.GetGeometryName()
###Output
_____no_output_____
###Markdown
Create a polygon
###Code
from osgeo import ogr
# Create ring
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(1179091.1646903288, 712782.8838459781)
ring.AddPoint(1161053.0218226474, 667456.2684348812)
ring.AddPoint(1214704.933941905, 641092.8288590391)
ring.AddPoint(1228580.428455506, 682719.3123998424)
ring.AddPoint(1218405.0658121984, 721108.1805541387)
ring.AddPoint(1179091.1646903288, 712782.8838459781)
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
print poly.ExportToWkt()
# Get Envelope returns a tuple (minX, maxX, minY, maxY)
env = poly.GetEnvelope()
print "minX: %d, minY: %d, maxX: %d, maxY: %d" %(env[0],env[2],env[1],env[3])
print "Area = %d" % poly.GetArea()
poly.GetGeometryName()
###Output
_____no_output_____
###Markdown
Buffer a polygon geometry
###Code
bufferDistance = 500
bufpoly = poly.Buffer(bufferDistance)
print "%s buffered by %d is %s" % (poly.ExportToWkt(), bufferDistance, bufpoly.ExportToWkt())
bufpoly.GetGeometryName()
centroid = bufpoly.Centroid()
print centroid
###Output
POINT (1198366.724720017286018 682817.35505709622521)
###Markdown
Intersection
###Code
intersection = poly.Intersection(bufpoly)
print intersection.ExportToWkt()
###Output
POLYGON ((1179091.164690328761935 712782.883845978067257 0,1218405.065812198445201 721108.180554138729349 0,1228580.428455505985767 682719.312399842427112 0,1214704.933941904921085 641092.828859039116651 0,1161053.021822647424415 667456.268434881232679 0,1179091.164690328761935 712782.883845978067257 0))
###Markdown
Union
###Code
union = poly.Union(bufpoly)
print union.ExportToWkt()
###Output
POLYGON ((1178987.579469376476482 713272.036278252722695 0,1218301.480591246159747 721597.332986413384788 0,1218327.255845648935065 721602.089056905708276 0,1218353.244916843017563 721605.487898549181409 0,1218379.376388529781252 721607.520171545096673 0,1218405.578453103313223 721608.180291338008828 0,1218431.779108972288668 721607.466443960904144 0,1218457.906358417356387 721605.380591020570137 0,1218483.888405434554443 721601.928464306984097 0,1218509.653853027150035 721597.119550042669289 0,1218535.13189939991571 721590.967062815325335 0,1218560.252532517071813 721583.487909264513291 0,1218584.946722492575645 721574.702641624142416 0,1218609.146611277479678 721564.635401245439425 0,1218632.785699131898582 721553.313852258492261 0,1218655.79902736027725 721540.769105552928522 0,1218678.123356814263389 721527.035633287159726 0,1218699.697341670282185 721512.151174160884693 0,1218720.4616980033461 721496.156629712088034 0,1218740.359366695629433 721479.095951921888627 0,1218759.335670231375843 721461.016022437135689 0,1218777.338462946936488 721441.966523743118159 0,1218794.318274323828518 721421.999802639009431 0,1218810.228444931097329 721401.170726392185315 0,1218825.025254641659558 721379.536531966528855 0,1218838.668042772216722 721357.156668739160523 0,1218851.119319817284122 721334.092635137145407 0,1218862.34487046697177 721310.407809643540531 0,1218872.313847629120573 721286.167276638094336 0,1218880.998857194790617 721261.437647548737004 0,1218888.376033315202221 721236.286877808277495 0,1229063.738676622742787 682847.418723511975259 0,1229069.826498327543959 682821.730931771569885 0,1229074.550026639830321 682795.757628210238181 0,1229077.896093794843182 682769.571218525059521 0,1229079.855371971381828 682743.244702488300391 0,1229080.422399295726791 682716.851470446330495 0,1229079.59559506806545 682690.465098729589954 0,1229077.377264167414978 682664.159144543809816 0,1229073.773590628057718 682638.006940915249288 0,1229068.79462039959617 682612.081392260151915 0,1229062.45423334161751 682586.454771149205044 0,1229054.77010453119874 682561.198516834061593 0,1215179.275590930134058 640934.714976030751131 0,1215170.472761062905192 640910.418311490793712 0,1215160.426350902067497 640886.608914935728535 0,1215149.163197130197659 640863.35038771352265 0,1215136.713386682327837 640840.704859650577419 0,1215123.110176374670118 640818.732823086786084 0,1215108.389904066454619 640797.492971283732913 0,1215092.591891593066975 640777.042041639215313 0,1215075.758339724969119 640757.43466412613634 0,1215057.934215439250693 640738.723215361125767 0,1215039.167131800204515 640720.957678692531772 0,1215019.507220772095025 640704.185510681243613 0,1214999.006999302422628 640688.451514331391081 0,1214977.721229036571458 640673.797719409340061 0,1214955.706770032411441 640660.263270170777105 0,1214933.02242887346074 640647.884320796001703 0,1214909.728801579913124 640636.693938811891712 0,1214885.888111740350723 640626.722016760264523 0,1214861.564044295111671 640617.995192345930263 0,1214836.82157541741617 640610.536777281085961 0,1214811.726798944408074 640604.366695012664422 0,1214786.346749821444973 640599.501427501789294 0,1214760.74922503484413 640595.953971195966005 0,1214735.00260250759311 640593.733802311937325 0,1214709.175658442545682 640592.846851521986537 0,1214683.337383603677154 640593.295488112256862 0,1214657.556799022480845 640595.078513652668335 0,1214631.902771623339504 640598.191165199154057 0,1214606.443830263335258 640602.625128016108647 0,1214581.24798267101869 640608.368557787965983 0,1214556.382533780764788 640615.40611225774046 0,1214531.913905941881239 640623.718992211157456 0,1214507.907461487688124 640633.28499169414863 0,1214484.427328134421259 640644.078557330649346 0,1160832.515208876924589 667007.518133172765374 0,1160809.717457682825625 667019.458423561416566 0,1160787.564276192570105 667032.555924999644049 0,1160766.114353322656825 667046.775939167710021 0,1160745.424514894606546 667062.080793949542567 0,1160725.549573090160266 667078.429943234426901 0,1160706.542181240627542 667095.780074333189987 0,1160688.452694336185232 667114.085222723777406 0,1160671.329035623930395 667133.296893821796402 0,1160655.216569648357108 667153.364191453787498 0,1160640.157982069300488 667174.233952693175524 0,1160626.193166578421369 667195.850888701272197 0,1160613.359119211789221 667218.157731199869886 0,1160601.689840337727219 667241.095384188462049 0,1160591.216244583250955 667264.603080503526144 0,1160581.966078932862729 667288.618542804033495 0,1160573.963849221356213 667313.078148559550755 0,1160567.230755211552605 667337.917098600184545 0,1160561.784634431358427 667363.069588785408996 0,1160557.639914918225259 667388.468984333798289 0,1160554.80757699534297 667414.047996354638599 0,1160553.295124183176085 667439.738860111217946 0,1160553.106563320150599 667465.47351454582531 0,1160554.242393947206438 667491.183782589039765 0,1160556.699606986250728 667516.801551776588894 0,1160560.471692709252238 667542.258954695309512 0,1160565.548657986568287 667567.488548779278062 0,1160571.917052759323269 667592.423494981136173 0,1160579.56000567227602 667616.997734843986109 0,1160588.457268770318478 667641.14616550586652 0,1178626.600136451655999 712967.761576602701098 0,1178636.786520990310237 712991.547401081887074 0,1178648.189780931686983 713014.774401293601841 0,1178660.779377073980868 713037.38037274137605 0,1178674.521593075245619 713059.304774112883024 0,1178689.379625748377293 713080.488889416796155 0,1178705.313683625310659 713100.875985229969956 0,1178722.281093522440642 713120.41146263666451 0,1178740.236414823913947 713139.043003450147808 0,1178759.131561177317053 713156.720710326568224 0,1178778.915929273935035 713173.397240395774134 0,1178799.536534369457513 713189.027932050754316 0,1178820.938152184709907 713203.570924556348473 0,1178843.063466800609604 713216.987270156969316 0,1178865.853224157355726 713229.241038383101113 0,1178889.246390743879601 713240.299412277177908 0,1178913.180317051475868 713250.132776280865073 0,1178937.590905356686562 713258.714795548818074 0,1178962.412781382212415 713266.022486476460472 0,1178987.579469376476482 713272.036278252722695 0))
###Markdown
To GeoJson
###Code
# Create the output Driver
outDriver = ogr.GetDriverByName('GeoJSON')
# Create the output GeoJSON
outDataSource = outDriver.CreateDataSource('./test.geojson')
outLayer = outDataSource.CreateLayer('./test.geojson', geom_type=ogr.wkbPolygon )
# Get the output Layer's Feature Definition
featureDefn = outLayer.GetLayerDefn()
# create a new feature
outFeature = ogr.Feature(featureDefn)
# Set new geometry
outFeature.SetGeometry(union)
# Add new feature to output Layer
outLayer.CreateFeature(outFeature)
# destroy the feature
outFeature.Destroy
# Close DataSources
outDataSource.Destroy()
###Output
_____no_output_____
###Markdown
Export to GeoJson
###Code
geojson = poly.ExportToJson()
print geojson
###Output
{ "type": "Polygon", "coordinates": [ [ [ 1179091.164690328761935, 712782.883845978067257, 0.0 ], [ 1161053.021822647424415, 667456.268434881232679, 0.0 ], [ 1214704.933941904921085, 641092.828859039116651, 0.0 ], [ 1228580.428455505985767, 682719.312399842427112, 0.0 ], [ 1218405.065812198445201, 721108.180554138729349, 0.0 ], [ 1179091.164690328761935, 712782.883845978067257, 0.0 ] ] ] }
###Markdown
Export to KML
###Code
kml = poly.ExportToKML()
print kml
###Output
<Polygon><outerBoundaryIs><LinearRing><coordinates>91.164690328761935,712782.883845978067257,0 53.021822647424415,667456.268434881232679,0 64.933941904921085,641092.828859039116651,0 -99.571544494014233,682719.312399842427112,0 165.065812198445201,721108.180554138729349,0 91.164690328761935,712782.883845978067257,0</coordinates></LinearRing></outerBoundaryIs></Polygon>
|
vis_boardtransform.ipynb | ###Markdown
Visualize Board Transformations> Joseph P. Vantassel, The University of Texas at AustinTo cut down on the computational expense of training the tic-tac-toe model, we can utlize the inherent symetry present in thegame board.Below is a single game state shown in eight possible unique ways. These eight possible combinations correspond to the eightvalues the `trans_number` variable can assume.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Possibility 0Base state (i.e., no manipulation).
###Code
state = np.array([[1,2,0],[2,0,1],[2,2,2]])
print(state)
###Output
[[1 2 0]
[2 0 1]
[2 2 2]]
###Markdown
Possibility 1Rotate 90 degrees clock-wise.
###Code
print(np.flip(np.transpose(state), 1))
###Output
[[2 2 1]
[2 0 2]
[2 1 0]]
###Markdown
Possibility 2Flip across diagnol spanning from lower-left to upper-right.
###Code
print(np.flip(np.flip(np.transpose(state), 1), 0))
###Output
[[2 1 0]
[2 0 2]
[2 2 1]]
###Markdown
Possibility 3Flip across diagnol spanning from upper-left to lower-right.
###Code
print(np.transpose(state))
###Output
[[1 2 2]
[2 0 2]
[0 1 2]]
###Markdown
Possibility 4Rotate 180 degrees clock-wise.
###Code
print(np.flip(np.flip(state, 0), 1))
###Output
[[2 2 2]
[1 0 2]
[0 2 1]]
###Markdown
Possibility 5Flip board across central vertical line.
###Code
print(np.flip(state, 1))
###Output
[[0 2 1]
[1 0 2]
[2 2 2]]
###Markdown
Possibility 6Flip board across central horizontal line.
###Code
print(np.flip(state, 0))
###Output
[[2 2 2]
[2 0 1]
[1 2 0]]
###Markdown
Possibility 7Rotate 270 degrees clock-wise.
###Code
print(np.flip(np.transpose(state), 0))
###Output
[[0 1 2]
[2 0 2]
[1 2 2]]
###Markdown
Superfluous ManipulationsSame as Possibliity 0
###Code
print(np.transpose(np.flip(np.flip(np.transpose(state), 0), 0)))
###Output
[[1 2 0]
[2 0 1]
[2 2 2]]
|
src/tensorflow/cnn.ipynb | ###Markdown
Table of Contents
###Code
from PIL import Image
import scipy.misc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import random
import tensorflow as tf
# taken from: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
from matplotlib.font_manager import FontProperties
def display_image_samples(data, labels=None): # labels are used for plot titles and are optional
font = FontProperties()
font.set_family('monospace')
plt.figure(figsize=(8,4))
rows, cols = 2, 4 # these are arbitrary
random_ids = random.sample(range(len(data)), rows*cols) # randomly select the images
for i in range(rows*cols):
curr_index = random_ids[i]
image = data[curr_index]
title_str = ('shape: ' + str(image.shape))
if labels:
title_str += ('\nclass ' + str(labels[i]))
plt.subplot(rows, cols, i+1)
plt.title(title_str, fontproperties=font)
plt.imshow(image)
plt.axis('off')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Fetch Data
###Code
def clean_data(data):
# apply greyscale
data = data.mean(3) # dimension 3 of image shape corresponds to color channels
# data = data[:, :, :, 0] # same as above
# center-crop images
# data = data[:, :, 7:data.shape[2]-1]
print(data.shape)
return data
from sklearn.model_selection import train_test_split
def load_data(data_path, k, test_size=0.3):
x = []
y = []
for i in range(k):
curr_dir_path = data_path + 'c' + str(i) + '/'
for file in os.listdir(curr_dir_path):
file_name = os.fsdecode(file)
if file_name.endswith(".jpg"):
file_path = (os.path.join(curr_dir_path, file_name))
img = np.asarray(Image.open(file_path))#.flatten()
x.append(img)
y.append(i)
# apply greyscale and cropping
x = clean_data(np.asarray(x))
# np.asarray(x_train), np.asarray(labels)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
return np.asarray(x_train), np.asarray(y_train), np.asarray(x_test), np.asarray(y_test)
###Output
_____no_output_____
###Markdown
Convolutional Neural Network
###Code
def conv_layer(x, W, bias):
# given an image x, apply all the filters W to it
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') # strides=[batch, height, width, channels]
# note: the padding determines what to do when the window runs out of pixels at the end
conv_with_bias = tf.nn.bias_add(conv, bias)
conv_out = tf.nn.relu(conv_with_bias)
return conv_out
def maxpool_layer(conv, k=2):
# k: # window size
# strides: steps taken to move across the image
return (tf.nn.max_pool(conv, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME'))
def model(x):
# define all needed variables
W1 = tf.Variable(tf.random_normal([5,5,1,64]))
b1 = tf.Variable(tf.random_normal([64]))
W2 = tf.Variable(tf.random_normal([5,5,64,64]))
b2 = tf.Variable(tf.random_normal([64]))
W3 = tf.Variable(tf.random_normal([6*6*64, 1024]))
b3 = tf.Variable(tf.random_normal([1024]))
W_out = tf.Variable(tf.random_normal([1024, len(classes)]))
b_out = tf.Variable(tf.random_normal([len(classes)]))
# define the actual model
x_reshaped = tf.reshape(x, shape=[-1,24,24,1])
# ---------- 1: CONVOLUTION + MAXPOOL ----------
# construct the first layer of convolution and maxpooling
conv_out1 = conv_layer(x_reshaped, W1, b1)
maxpool_out1 = maxpool_layer(conv_out1)
# normalizing to "prevent neurons from saturating when inputs may have varying scale, and to aid generalization."
norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)
# ---------- 2: CONVOLUTION + MAXPOOL ----------
# construct the second layer
conv_out2 = conv_layer(norm1, W2, b2)
norm2 = tf.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)
maxpool_out2 = maxpool_layer(norm2)
# ---------- 3: FULLY CONNECTED LAYER ----------
# first, flatten the images. instead of (6,6,64), now 6*6*64 = 2304
maxpool_reshaped = tf.reshape(maxpool_out2, [-1, W3.get_shape().as_list()[0]])
local = tf.add(tf.matmul(maxpool_reshaped, W3), b3)
local_out = tf.nn.relu(local)
# obtain the final predition
out = tf.add(tf.matmul(local_out, W_out), b_out)
return out
def run_model(classes, x_train, y_train, x_test, y_test, num_epochs, num_batches):
# define the x and y placeholders
x = tf.placeholder(tf.float32, [None, 24 * 24])
y = tf.placeholder(tf.float32, [None, len(classes)])
# run the model
y_predicted = model(x)
# define the cost
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_predicted, labels=y))
# get the optimizer
train_op = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(cost)
# train the model
with tf.Session() as s:
s.run(tf.global_variables_initializer())
onehot_labels = tf.one_hot(y_train, len(classes), on_value=1., off_value=0., axis=-1)
onehot_labels = s.run(onehot_labels)
batch_size = len(x_train) // num_batches
# print("batch size: ", batch_size, '\n')
for j in range(0, num_epochs):
printProgressBar(0, 1, prefix='EPOCH ' + str(j+1) + ': ', length=50)
# if j % 10 == 0: print("\nEPOCH ", j+1)
total_cost = 0
for i in range(0, len(x_train), batch_size):
batch_data = x_train[i:i+batch_size, :]
batch_onehot_vals = onehot_labels[i:i+batch_size, :]
_, c = s.run([train_op, cost], feed_dict={x: batch_data, y: batch_onehot_vals})
total_cost += c
# if (j % 10 == 0) and (i % batch_size == 0): print("batch", i + 1, ", cost =", total_cost)
printProgressBar(i, len(x_train), prefix='EPOCH ' + str(j+1) + ': ',
suffix='Cost = ' + str(total_cost), length=50)
print()
# if j % 10 == 0: print("> Total Cost =", total_cost)
# obtain the accuracy on the trained model
print("Calculating Accuracy")
correct_pred = tf.equal(tf.argmax(y_predicted,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
with tf.Session() as s:
onehot_ytest_tf = tf.one_hot(y_test, len(classes), on_value=1., off_value=0., axis=-1)
onehot_ytest = s.run(onehot_ytest_tf)
s.run(tf.global_variables_initializer())
accuracy_val = s.run(accuracy,feed_dict={x: x_test, y:onehot_ytest})
print("\nACCURACY = ", accuracy_val*100, "%")
###Output
_____no_output_____
###Markdown
Implementation
###Code
csv_path = '../dataset/driver_imgs_list.csv'
# train_data_path = '../dataset/original/train/'
train_data_path = '../dataset/resized/'
# train_data_path = '../dataset/samples/'
drivers_csv = pd.read_csv(csv_path)
classes = (np.unique(np.asarray(drivers_csv)[:,1]))
NUM_CLASSES = len(classes) # 10
# fetch images from stored dataset in path
x_train, y_train, x_test, y_test = load_data(train_data_path, NUM_CLASSES) # test perc = 0.3 (default)
print(x_train.shape)
# print a sample of images
display_image_samples(x_train)
print('\n---------------------------------------- DETAILS ---------------------------------------\n')
print('data shape (original):', x_train.shape) # (13, 24, 24)
# want to flatten it, like: (13, 576)
x_train_flattened = x_train.reshape(x_train.shape[0], -1) # the -1 would be automatically calculated as 24*24 (=576)
x_test_flattened = x_test.reshape(x_test.shape[0], -1)
print('data shape (flattened):' , x_train_flattened.shape)
print('\nclass names:', classes, '\nclass names shape:', classes.shape)
print('\nlabels shape:', y_train.shape)
print('\n------------------------------------- CONFIGURATION -------------------------------------\n')
# SIZES: names: [] x 10 , data:(50000, 576), labels:(50000,)
num_epochs = 1
num_batches = 5
print('epochs:', num_epochs)
print('number of batches:', num_batches)
print('batch size:', len(x_train) // num_batches)
print('\n-----------------------------------------------------------------------------------------\n')
run_model(classes, x_train_flattened, y_train, x_test_flattened, y_test, num_epochs, num_batches)
###Output
---------------------------------------- DETAILS ---------------------------------------
data shape (original): (15024, 24, 24)
data shape (flattened): (15024, 576)
class names: ['c0' 'c1' 'c2' 'c3' 'c4' 'c5' 'c6' 'c7' 'c8' 'c9']
class names shape: (10,)
labels shape: (15024,)
------------------------------------- CONFIGURATION -------------------------------------
epochs: 1
number of batches: 5
batch size: 3004
-----------------------------------------------------------------------------------------
EPOCH 0: |█████████████████████████████---------------------| 60.0% Cost = 64346.3671875
|
projecten data science/Comparing Cosmetics by Ingredients/notebook.ipynb | ###Markdown
1. Cosmetics, chemicals... it's complicatedWhenever I want to try a new cosmetic item, it's so difficult to choose. It's actually more than difficult. It's sometimes scary because new items that I've never tried end up giving me skin trouble. We know the information we need is on the back of each product, but it's really hard to interpret those ingredient lists unless you're a chemist. You may be able to relate to this situation.So instead of buying and hoping for the best, why don't we use data science to help us predict which products may be good fits for us? In this notebook, we are going to create a content-based recommendation system where the 'content' will be the chemical components of cosmetics. Specifically, we will process ingredient lists for 1472 cosmetics on Sephora via word embedding, then visualize ingredient similarity using a machine learning method called t-SNE and an interactive visualization library called Bokeh. Let's inspect our data first.
###Code
# Import libraries
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
# Load the data
df = pd.read_csv("datasets/cosmetics.csv")
# Check the first five rows
display(df.sample(5))
# Inspect the types of products
df.Label.value_counts()
###Output
_____no_output_____
###Markdown
2. Focus on one product category and one skin typeThere are six categories of product in our data (moisturizers, cleansers, face masks, eye creams, and sun protection) and there are five different skin types (combination, dry, normal, oily and sensitive). Because individuals have different product needs as well as different skin types, let's set up our workflow so its outputs (a t-SNE model and a visualization of that model) can be customized. For the example in this notebook, let's focus in on moisturizers for those with dry skin by filtering the data accordingly.
###Code
# Filter for moisturizers
moisturizers = df[df['Label']=='Moisturizer']
# Filter for dry skin as well
moisturizers_dry =moisturizers[moisturizers['Dry']==1]
# Reset index
moisturizers_dry = moisturizers_dry.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
3. Tokenizing the ingredientsTo get to our end goal of comparing ingredients in each product, we first need to do some preprocessing tasks and bookkeeping of the actual words in each product's ingredients list. The first step will be tokenizing the list of ingredients in Ingredients column. After splitting them into tokens, we'll make a binary bag of words. Then we will create a dictionary with the tokens, ingredient_idx, which will have the following format:{ "ingredient": index value, ... }
###Code
# Initialize dictionary, list, and initial index
ingredient_idx = {}
corpus = []
idx = 0
# For loop for tokenization
for i in range(len(moisturizers_dry)):
ingredients = moisturizers_dry['Ingredients'][i]
ingredients_lower = ingredients.lower()
tokens = ingredients_lower.split(', ')
corpus.append(tokens)
for ingredient in tokens:
if ingredient not in ingredient_idx:
ingredient_idx[ingredient] = idx
idx += 1
# Check the result
print("The index for decyl oleate is", ingredient_idx['decyl oleate'])
ingredient_idx
###Output
_____no_output_____
###Markdown
4. Initializing a document-term matrix (DTM)The next step is making a document-term matrix (DTM). Here each cosmetic product will correspond to a document, and each chemical composition will correspond to a term. This means we can think of the matrix as a “cosmetic-ingredient” matrix. The size of the matrix should be as the picture shown below.To create this matrix, we'll first make an empty matrix filled with zeros. The length of the matrix is the total number of cosmetic products in the data. The width of the matrix is the total number of ingredients. After initializing this empty matrix, we'll fill it in the following tasks.
###Code
moisturizers_dry.shape[0]
# Get the number of items and tokens
M = moisturizers_dry.shape[0]
N = len(ingredient_idx)
# Initialize a matrix of zeros
A = np.zeros((M, N))
###Output
_____no_output_____
###Markdown
5. Creating a counter functionBefore we can fill the matrix, let's create a function to count the tokens (i.e., an ingredients list) for each row. Our end goal is to fill the matrix with 1 or 0: if an ingredient is in a cosmetic, the value is 1. If not, it remains 0. The name of this function, oh_encoder, will become clear next.
###Code
# Define the oh_encoder function
def oh_encoder(tokens):
x = np.zeros(N)
for ingredient in tokens:
# Get the index for each ingredient
idx = ingredient_idx[ingredient]
# Put 1 at the corresponding indices
x[idx] = 1
return x
###Output
_____no_output_____
###Markdown
6. The Cosmetic-Ingredient matrix!Now we'll apply the oh_encoder() functon to the tokens in corpus and set the values at each row of this matrix. So the result will tell us what ingredients each item is composed of. For example, if a cosmetic item contains water, niacin, decyl aleate and sh-polypeptide-1, the outcome of this item will be as follows. This is what we called one-hot encoding. By encoding each ingredient in the items, the Cosmetic-Ingredient matrix will be filled with binary values.
###Code
# Make a document-term matrix
i = 0
for tokens in corpus:
A[i, :] = oh_encoder(tokens)
i += 1
oh_encoder(tokens)
###Output
_____no_output_____
###Markdown
7. Dimension reduction with t-SNEThe dimensions of the existing matrix is (190, 2233), which means there are 2233 features in our data. For visualization, we should downsize this into two dimensions. We'll use t-SNE for reducing the dimension of the data here.T-distributed Stochastic Neighbor Embedding (t-SNE) is a nonlinear dimensionality reduction technique that is well-suited for embedding high-dimensional data for visualization in a low-dimensional space of two or three dimensions. Specifically, this technique can reduce the dimension of data while keeping the similarities between the instances. This enables us to make a plot on the coordinate plane, which can be said as vectorizing. All of these cosmetic items in our data will be vectorized into two-dimensional coordinates, and the distances between the points will indicate the similarities between the items.
###Code
# Dimension reduction with t-SNE
model = TSNE(n_components=2, learning_rate=200, random_state=42)
tsne_features = model.fit_transform(A)
# Make X, Y columns
moisturizers_dry['X'] = tsne_features[:, 0]
moisturizers_dry['Y'] = tsne_features[:, 1]
moisturizers_dry
###Output
_____no_output_____
###Markdown
8. Let's map the items with BokehWe are now ready to start creating our plot. With the t-SNE values, we can plot all our items on the coordinate plane. And the coolest part here is that it will also show us the name, the brand, the price and the rank of each item. Let's make a scatter plot using Bokeh and add a hover tool to show that information. Note that we won't display the plot yet as we will make some more additions to it.
###Code
from bokeh.io import show, output_notebook, push_notebook
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
output_notebook()
# Make a source and a scatter plot
source = ColumnDataSource(moisturizers_dry)
plot = figure(x_axis_label = 'T-SNE 1',
y_axis_label = 'T-SNE 2',
width = 500, height = 400)
plot.circle(x = 'X',
y = 'Y',
source = source,
size = 10, color = '#FF7373', alpha = .8)
###Output
_____no_output_____
###Markdown
9. Adding a hover toolWhy don't we add a hover tool? Adding a hover tool allows us to check the information of each item whenever the cursor is directly over a glyph. We'll add tooltips with each product's name, brand, price, and rank (i.e., rating).
###Code
# Create a HoverTool object
hover = HoverTool(tooltips = [('Item', '@Name'),
('Brand', '@Brand'),
('Price', '$@Price'),
('Rank', '@Rank')])
plot.add_tools(hover)
###Output
_____no_output_____
###Markdown
10. Mapping the cosmetic itemsFinally, it's show time! Let's see how the map we've made looks like. Each point on the plot corresponds to the cosmetic items. Then what do the axes mean here? The axes of a t-SNE plot aren't easily interpretable in terms of the original data. Like mentioned above, t-SNE is a visualizing technique to plot high-dimensional data in a low-dimensional space. Therefore, it's not desirable to interpret a t-SNE plot quantitatively.Instead, what we can get from this map is the distance between the points (which items are close and which are far apart). The closer the distance between the two items is, the more similar the composition they have. Therefore this enables us to compare the items without having any chemistry background.
###Code
# Plot the map
show(plot)
###Output
_____no_output_____
###Markdown
11. Comparing two productsSince there are so many cosmetics and so many ingredients, the plot doesn't have many super obvious patterns that simpler t-SNE plots can have (example). Our plot requires some digging to find insights, but that's okay!Say we enjoyed a specific product, there's an increased chance we'd enjoy another product that is similar in chemical composition. Say we enjoyed AmorePacific's Color Control Cushion Compact Broad Spectrum SPF 50+. We could find this product on the plot and see if a similar product(s) exist. And it turns out it does! If we look at the points furthest left on the plot, we see LANEIGE's BB Cushion Hydra Radiance SPF 50 essentially overlaps with the AmorePacific product. By looking at the ingredients, we can visually confirm the compositions of the products are similar (though it is difficult to do, which is why we did this analysis in the first place!), plus LANEIGE's version is $22 cheaper and actually has higher ratings.It's not perfect, but it's useful. In real life, we can actually use our little ingredient-based recommendation engine help us make educated cosmetic purchase choices.
###Code
# Print the ingredients of two similar cosmetics
cosmetic_1 = moisturizers_dry[moisturizers_dry['Name'] == "Color Control Cushion Compact Broad Spectrum SPF 50+"]
cosmetic_2 = moisturizers_dry[moisturizers_dry['Name'] == "BB Cushion Hydra Radiance SPF 50"]
# Display each item's data and ingredients
display(cosmetic_1)
print(cosmetic_1.Ingredients.values)
display(cosmetic_2)
print(cosmetic_2.Ingredients.values)
###Output
_____no_output_____ |
Week-03/2_Principal Component Analysis.ipynb | ###Markdown
Week 3 - Classical ML Models - Part II Principal Component AnalysisPrincipal components analysis (or PCA) is a technique used for dimensionality reduction enabling to identify signifficant correlations in the dataset. Consequently, it allows to reduce the number of dimensions within the dataset without lossing important information. Why do we need PCA?As we have seen so far, ML models has a tendency to work better with larger datasets: the good amount of data allows training of more accurate model. On the other hand, as we increase the dataset dimensions, we observe a few negative effects:- In large dimensional datasets, there are a lot of inconsistencies reducing model's accuracy- The usage of redundant features increases the computational time.This is where the dimensional reduction comes in - it helps to extract only the most signifficant correlations and reduce the number of dimensions. PCA computation steps 1. StandardizationIn short, the data standardization involves taking values and scalling them into a similar range. It ensures less biased model training process as larger values no longer shift whole model.It is carried out by subtracting each value by the mean and dividing by deviation. 2. Computing covariance matrixAs it has been mentioned earlier, PCA helps to find the correlations within the dataset. These correlations between different dataset variables can be expressed in a covariance matrix.Mathematically speaking, covariance matrix can be imagined as a p x p matrix, where p represent the dimensions.For example, let's say we have a 2-dimensional dataset consisting of variables a and b. In such case, the covariance matrix can be expressed as:- $Cov(a, a)$ shows the covariance between the variable and itself- $Cov(a, b)$ shows the covariance between two variables 3. Eigenvalues and eigenvectorsLet's say we have a 3-dimensional matrix **A** containing dataset variables:According to theory, if we multiply the matrix by a vector and the resulting vector differs from the original vector by a scalar value, such vector is called **eigenvector**. The scalar value in such expression becomes **eigenvalue**. In mathematical terms - $A.x = lambda.x$ or $A.x - lambda.x = 0$.In such condition, the determinant of the characteristic function has to be equal to 0 or in other words:$det|A - lambda.I|$, where $I$ is identity function.To better understand this, let's analyze an example with 2-dimensions:After multilplying $\lambda$ by identity matrix and subtracting from the 2-dimensional matrix, we get:After determining the determinant and solving it, we get the following eigenvalues: $\frac{5}{2} + i\frac{\sqrt{15}}{2}$ and $\frac{5}{2} - i\frac{\sqrt{15}}{2}$.To get the eigenvectors, we have to simply substitute the eigenvalues into the equation: $det|A - \lambda.I|$ 4. Computing principal componentsIn short, principal components are new set of variables obtained from the initial dataset that are signifficant an independent of each other. Once we computed the eigenvectors and eigenvalues, we have to order them in the descending order (first component is formed from eigenvector with the heighest eigenvalue and so on). 5. Reducing the dimensions of the datasetThe last step is to re-arrange the original data accoring to variables' signifficance. In order to do so, we simply multiply the transpose of the original data by the transpose of the feature vector. Python implementation Such pipeline would take the following code form:
###Code
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def pca(X):
#Scaling values
X = StandardScaler().fit_transform(X)
#Computing covariance matrix
mean = np.mean(X, axis = 0)
cov_mat = (X - mean).T.dot((X - mean)) / (X.shape[0]-1)
#Calculating eigenvectors and eigenvalues
cov_mat = np.cov(X.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
#Computing feature vectors
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
return eig_pairs
###Output
_____no_output_____
###Markdown
However, as in the previous examples, we Scikit-learn library provides functions for performing PCA computations which reduces the coding time.
###Code
#PCA using scikit-learn
def pca(X):
pca = PCA(n_components=2)
pca.fit_transform(X)
###Output
_____no_output_____ |
site/_build/html/_sources/notebooks/09-big-data/01-intro-mapreduce.ipynb | ###Markdown
[](http://rpi.analyticsdojo.com)Introduction to Map Reducerpi.analyticsdojo.com Adopted from work by Steve Phelps:https://github.com/phelps-sg/python-bigdata This work is licensed under the Creative Commons Attribution 4.0 International license agreement. Overview1. Recap of functional programming in Python2. Python's `map` and `reduce` functions3. Writing parallel code using `map`4. The Map-Reduce programming model History- The Map-Reduce programming model was popularised by Google (Dean and Ghemawat 2008).- The first popular open-source implementation was Apache Hadoop, first released in 2011. Functional programmingConsider the following code:
###Code
def double_everything_in(data):
result = []
for i in data:
result.append(2 * i)
return result
def quadruple_everything_in(data):
result = []
for i in data:
result.append(4 * i)
return result
double_everything_in([1, 2, 3, 4, 5])
quadruple_everything_in([1, 2, 3, 4, 5])
###Output
_____no_output_____
###Markdown
DRY - Fundamental Programming Concept- The above code violates the ["do not repeat yourself"](https://en.wikipedia.org/wiki/Don't_repeat_yourself_) principle of good software engineering practice.- How can rewrite the code so that it avoids duplication?
###Code
def multiply_by_x_everything_in(x, data):
result = []
for i in data:
result.append(x * i)
return result
multiply_by_x_everything_in(2, [1, 2, 3, 4, 5])
multiply_by_x_everything_in(4, [1, 2, 3, 4, 5])
###Output
_____no_output_____
###Markdown
- Now consider the following code:
###Code
def squared(x):
return x*x
def double(x):
return x*2
def square_everything_in(data):
result = []
for i in data:
result.append(squared(i))
return result
def double_everything_in(data):
result = []
for i in data:
result.append(double(i))
return result
square_everything_in([1, 2, 3, 4, 5])
double_everything_in([1, 2, 3, 4, 5])
###Output
_____no_output_____
###Markdown
DRY - Fundamental Programming Concept- The above code violates the ["do not repeat yourself"](https://en.wikipedia.org/wiki/Don't_repeat_yourself_) principle of good software engineering practice.- How can rewrite the code so that it avoids duplication? Passing Functions as Values- Functions can be passed to other functions as values.-
###Code
def apply_f_to_everything_in(f, data):
result = []
for x in data:
result.append(f(x))
return result
apply_f_to_everything_in(squared, [1, 2, 3, 4, 5])
apply_f_to_everything_in(double, [1, 2, 3, 4, 5])
###Output
_____no_output_____
###Markdown
Lambda expressions- We can use anonymous functions to save having to define a function each time we want to use map.
###Code
apply_f_to_everything_in(lambda x: x*x, [1, 2, 3, 4, 5])
###Output
_____no_output_____
###Markdown
Python's `map` function- Python has a built-in function `map` which is much faster than our version.
###Code
map(lambda x: x*x, [1, 2, 3, 4, 5])
###Output
_____no_output_____
###Markdown
Implementing reduce- The `reduce` function is an example of a [fold](https://en.wikipedia.org/wiki/Fold_%28higher-order_function%29).- There are different ways we can fold data.- The following implements a *left* fold.
###Code
def foldl(f, data, z):
if (len(data) == 0):
print (z)
return z
else:
head = data[0]
tail = data[1:]
print ("Folding", head, "with", tail, "using", z)
partial_result = f(z, data[0])
print ("Partial result is", partial_result)
return foldl(f, tail, partial_result)
def add(x, y):
return x + y
foldl(add, [3, 3, 3, 3, 3], 0)
foldl(lambda x, y: x + y, [1, 2, 3, 4, 5], 0)
foldl(lambda x, y: x - y, [1, 2, 3, 4, 5], 0)
(((((0 - 1) - 2) - 3) - 4) - 5)
###Output
_____no_output_____
###Markdown
- Subtraction is neither [commutative](https://en.wikipedia.org/wiki/Commutative_property) nor [associative](https://en.wikipedia.org/wiki/Associative_property), so the order in which apply the fold matters:
###Code
(1 - (2 - (3 - (4 - (5 - 0)))))
def foldr(f, data, z):
if (len(data) == 0):
return z
else:
return f(data[0], foldr(f, data[1:], z))
foldl(lambda x, y: x - y, [1, 2, 3, 4, 5], 0)
foldr(lambda x, y: x - y, [1, 2, 3, 4, 5], 0)
###Output
_____no_output_____
###Markdown
Python's `reduce` function.- Python's built-in `reduce` function is a *left* fold.
###Code
from functools import reduce
reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])
reduce(lambda x, y: x - y, [1, 2, 3, 4, 5], 0)
foldl(lambda x, y: x - y, [1, 2, 3, 4, 5], 0)
###Output
Folding 1 with [2, 3, 4, 5] using 0
Partial result is -1
Folding 2 with [3, 4, 5] using -1
Partial result is -3
Folding 3 with [4, 5] using -3
Partial result is -6
Folding 4 with [5] using -6
Partial result is -10
Folding 5 with [] using -10
Partial result is -15
-15
###Markdown
Functional programming and parallelism- Functional programming lends itself to [parallel programming](https://computing.llnl.gov/tutorials/parallel_comp/Models).- The `map` function can easily be parallelised through [data-level parallelism](https://en.wikipedia.org/wiki/Data_parallelism), - provided that the function we supply as an argument is *free from* [side-effects](https://en.wikipedia.org/wiki/Side_effect_%28computer_science%29) - (which is why we avoid working with mutable data).- We can see this by rewriting it so:
###Code
def perform_computation(f, result, data, i):
print ("Computing the ", i, "th result...")
# This could be scheduled on a different CPU
result[i] = f(data[i])
def my_map(f, data):
result = [None] * len(data)
for i in range(len(data)):
perform_computation(f, result, data, i)
# Wait for other CPUs to finish, and then..
return result
my_map(lambda x: x * x, [1, 2, 3, 4, 5])
###Output
Computing the 0 th result...
Computing the 1 th result...
Computing the 2 th result...
Computing the 3 th result...
Computing the 4 th result...
###Markdown
A multi-threaded `map` function
###Code
from threading import Thread
def schedule_computation_threaded(f, result, data, threads, i):
# Each function evaluation is scheduled on a different core.
def my_job():
print ("Processing data:", data[i], "... ")
result[i] = f(data[i])
print ("Finished job #", i)
print ("Result was", result[i])
threads[i] = Thread(target=my_job)
def my_map_multithreaded(f, data):
n = len(data)
result = [None] * n
threads = [None] * n
print ("Scheduling jobs.. ")
for i in range(n):
schedule_computation_threaded(f, result, data, threads, i)
print ("Starting jobs.. ")
for i in range(n):
threads[i].start()
print ("Waiting for jobs to finish.. ")
for i in range(n):
threads[i].join()
print ("All done.")
return result
my_map_multithreaded(lambda x: x*x, [1, 2, 3, 4, 5])
from numpy.random import uniform
from time import sleep
def a_function_which_takes_a_long_time(x):
sleep(uniform(2, 10)) # Simulate some long computation
return x*x
my_map_multithreaded(a_function_which_takes_a_long_time, [1, 2, 3, 4, 5])
###Output
Scheduling jobs..
Starting jobs..
Processing data: 1 ...
Processing data: 2 ...
Processing data: 3 ...
Processing data: 4 ...
Processing data:Waiting for jobs to finish..
5 ...
Finished job # 1
Result was 4
Finished job # 4
Result was 25
Finished job # 0
Result was 1
Finished job # 3
Result was 16
Finished job # 2
Result was 9
All done.
###Markdown
Map Reduce- Map Reduce is a _programming model_ for scalable parallel processing.- Scalable here means that it can work on big data with very large compute clusters.- There are many implementations: e.g. Apache Hadoop and Apache Spark.- We can use Map-Reduce with any programming language: - Hadoop is written in Java - Spark is written in Scala, but has a Python interface.- *Functional programming* languages such as Python or Scala fit very well with the Map Reduce model: - However, we don't *have* to use functional programming. - A MapReduce implementation will take care of the low-level functionality so that you don't have to worry about: - load balancing - network I/O - network and disk transfer optimisation - handling of machine failures - serialization of data - etc..- The model is designed to move the processing to where the data resides. Typical steps in a Map Reduce Computation1. ETL a big data set.2. _Map_ operation: extract something you care about from each row3. "Shuffle and Sort": task/node allocation4. _Reduce_ operation: aggregate, summarise, filter or transform5. Write the results. Callbacks for Map Reduce- The data set, and the state of each stage of the computation, is represented as a set of key-value pairs.- The programmer provides a map function:$\operatorname{map}(k, v) \rightarrow \; \left*$ - and a reduce function:$\operatorname{reduce}(k', \left *) \rightarrow \; \left< k', v''\right> *$- The $*$ refers to a *collection* of values.- These collections are *not* ordered. Word Count Example- In this simple example, the input is a set of URLs, each record is a document.- Problem: compute how many times each word has occurred across data set. Word Count: Map - The input to $\operatorname{map}$ is a mapping:- Key: URL- Value: Contents of document $\left$ - In this example, our $\operatorname{map}$ function will process a given URL, and produces a mapping: - Key: word- Value: 1- So our original data-set will be transformed to: $\left$ $\left$ $\left$ $\left$ $\left$ $\left$ Word Count: Reduce- The reduce operation groups values according to their key, and then performs areduce on each key.- The collections are partitioned across different storage units, therefore.- Map-Reduce will fold the data in such a way that it minimises data-copying across the cluster.- Data in different partitions are reduced separately in parallel.- The final result is a reduce of the reduced data in each partition.- Therefore it is very important that our operator *is both commutative and associative*.- In our case the function is the `+` operator $\left$ $\left$ $\left$ $\left$ Map and Reduce compared with Python- Notice that these functions are formulated differently from the standard Python functions of the same name.- The `reduce` function works with key-value *pairs*.- It would be more apt to call it something like `reduceByKey`. MiniMapReduce- To illustrate how the Map-Reduce programming model works, we can implement our own Map-Reduce framework in Python.- This *illustrates* how a problem can be written in terms of `map` and `reduce` operations.- Note that these are illustrative functions; this is *not* how Hadoop or Apache Spark actually implement them.
###Code
##########################################################
#
# MiniMapReduce
#
# A non-parallel, non-scalable Map-Reduce implementation
##########################################################
def groupByKey(data):
result = dict()
for key, value in data:
if key in result:
result[key].append(value)
else:
result[key] = [value]
return result
def reduceByKey(f, data):
key_values = groupByKey(data)
return map(lambda key:
(key, reduce(f, key_values[key])),
key_values)
###Output
_____no_output_____
###Markdown
Word-count using MiniMapReduce
###Code
data = map(lambda x: (x, 1), "to be or not to be".split())
data
groupByKey(data)
reduceByKey(lambda x, y: x + y, data)
###Output
_____no_output_____
###Markdown
Parallelising MiniMapReduce- We can easily turn our Map-Reduce implementation into a parallel, multi-threaded frameworkby using the `my_map_multithreaded` function we defined earlier.- This will allow us to perform map-reduce computations that exploit parallel processing using *multiple* cores on a *single* computer.
###Code
def reduceByKey_multithreaded(f, data):
key_values = groupByKey(data)
return my_map_multithreaded(
lambda key: (key, reduce(f, key_values[key])), key_values.keys())
reduceByKey_multithreaded(lambda x, y: x + y, data)
###Output
Scheduling jobs..
Starting jobs..
Waiting for jobs to finish..
All done.
###Markdown
Parallelising the reduce step- Provided that our operator is both associative and commutative we canalso parallelise the reduce operation.- We partition the data into approximately equal subsets.- We then reduce each subset independently on a separate core.- The results can be combined in a final reduce step. Partitioning the data
###Code
def split_data(data, split_points):
partitions = []
n = 0
for i in split_points:
partitions.append(data[n:i])
n = i
partitions.append(data[n:])
return partitions
data = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
partitioned_data = split_data(data, [3])
partitioned_data
###Output
_____no_output_____
###Markdown
Reducing across partitions in parallel
###Code
from threading import Thread
def parallel_reduce(f, partitions):
n = len(partitions)
results = [None] * n
threads = [None] * n
def job(i):
results[i] = reduce(f, partitions[i])
for i in range(n):
threads[i] = Thread(target = lambda: job(i))
threads[i].start()
for i in range(n):
threads[i].join()
return reduce(f, results)
parallel_reduce(lambda x, y: x + y, partitioned_data)
###Output
_____no_output_____ |
Chapter03/Exercise03_01/Exercise03_01.ipynb | ###Markdown
Exercise 3.1 - Building a sequential model with Keras high-level API Import TensorFlow module and print its version
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
print("TensorFlow version: {}".format(tf.__version__))
###Output
TensorFlow version: 2.1.0
###Markdown
Build the model using Keras `sequential` and `add` methods and print network summary
###Code
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(units=64, activation='relu', input_dim=100))
model.add(tf.keras.layers.Dense(units=10, activation='softmax'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 64) 6464
_________________________________________________________________
dense_1 (Dense) (None, 10) 650
=================================================================
Total params: 7,114
Trainable params: 7,114
Non-trainable params: 0
_________________________________________________________________
|
qubiter/jupyter_notebooks/Teleportation-showcasing-IF_M-blocks.ipynb | ###Markdown
Teleportation example showcasing IF_M blocksThis notebook uses Qubiter to illustrate quantum Teleportation of the pure state of one qubit (at 0) to another qubit (at 2) with the help of an ancilla qubit (at 1).The purpose of this notebook is not to teach about the "theory" behind quantum Teleportation.For that, the reader can go to numerous sources on the internet (Wikipedia, course notes, etc.)The purpose is to showcase some of the features of Qubiter, especially IF_M blocks (and also PRINT statements and calculations and plotting of various density matrices associated with any quantum circuit).For a full inventory of Qubiter English file commands, see Qubiter's Rosetta Stone pdfIBM has posted at https://github.com/QISKit/qiskit-tutorial, a jupyter notebook similar to this one, analysing the same quantum circuit for quantum Teleportation from one qubit to another. Their notebook uses IBM's qasm language instead of Qubiter's language so you might profit from comparing their notebook to this one to decide which qc language you prefer. Qubiter includes subroutines that can translate its language to IBM qasm that can then be run on IBM qc hardware. First change your working directory to the qubiter directory in your computer, and add its path to the path environment variable.
###Code
import os
import sys
print(os.getcwd())
os.chdir('../../')
print(os.getcwd())
sys.path.insert(0,os.getcwd())
###Output
C:\Users\rrtuc\Desktop\backedup\python-projects\qubiter\qubiter\jupyter_notebooks
C:\Users\rrtuc\Desktop\backedup\python-projects\qubiter
###Markdown
Next do imports:
###Code
from qubiter.SEO_writer import *
from qubiter.SEO_simulator import *
from qubiter.StateVec import *
from qubiter.Plotter import *
import numpy as np
# np.set_printoptions(precision=5)
import pandas as pan
import seaborn as sea; sea.set()
###Output
loaded OneBitGates, WITHOUT autograd.numpy
###Markdown
Number of qubits is 3.Note that we use "bit" for both qbits and cbits.Use a trivial circuit embedder that embeds 3 qubits into same 3 qubits
###Code
num_bits = 3
emb = CktEmbedder(num_bits, num_bits)
###Output
_____no_output_____
###Markdown
Open a writer, and tell it where to write to.We will use zero bit last (ZL) convention which is the default.
###Code
file_prefix = 'teleportation-with-ifs'
wr = SEO_writer(file_prefix, emb)
###Output
_____no_output_____
###Markdown
Write English and Picture files of the quantum circuit. Close those files once finished writing to them.
###Code
wr.write_Rn(0, list(np.pi/180*np.array([20, 68, 46])))
wr.write_PRINT("ALL")
wr.write_H(1)
wr.write_cnot(control_bit=1, target_bit=2)
#wr.write_one_bit_gate(0, OneBitGates.rot_ax, [-np.pi/8, 2])
wr.write_cnot(control_bit=0, target_bit=1)
wr.write_H(0)
wr.write_PRINT("ALL")
wr.write_MEAS(0, kind=2)
wr.write_MEAS(1, kind=2)
wr.write_PRINT("ALL")
wr.write_IF_M_beg(Controls.new_knob(num_bits, 0, True))
wr.write_Z(2)
wr.write_IF_M_end()
wr.write_PRINT("ALL")
wr.write_IF_M_beg(Controls.new_knob(num_bits, 1, True))
wr.write_X(2)
wr.write_IF_M_end()
wr.write_PRINT("ALL")
wr.close_files()
###Output
_____no_output_____
###Markdown
The English and Picture files just produced have been stored in the io_folder. Here are links to them:* ../io_folder/teleportation-with-ifs_3_eng.txt* ../io_folder/teleportation-with-ifs_3_ZLpic.txt Let's print the English file:
###Code
wr.print_eng_file()
###Output
ROTN 20.0 68.0 46.0 AT 0
PRINT ALL
HAD2 AT 1
SIGX AT 2 IF 1T
SIGX AT 1 IF 0T
HAD2 AT 0
PRINT ALL
MEAS 2 AT 0
MEAS 2 AT 1
PRINT ALL
IF_M( 0T ){
SIGZ AT 2
}IF_M
PRINT ALL
IF_M( 1T ){
SIGX AT 2
}IF_M
PRINT ALL
###Markdown
Let's print the Picture file. Time points downward, and, since we are using the ZL convention, the 0th qubit is rightmost. Note that after an M measurement, vertical lines "|"directly under M are replaced by colons ":"Line n of English file corresponds to line n of Picture file.
###Code
wr.print_pic_file()
###Output
| | R
PRINT ALL
| H |
X---@ |
| X---@
| | H
PRINT ALL
| | M
| M :
PRINT ALL
IF_M( 0T ){
Z : :
}IF_M
PRINT ALL
IF_M( 1T ){
X : :
}IF_M
PRINT ALL
###Markdown
Now we create a simulator object with the ground state (|0> for each qubit) as initial state.Creating the simulator automatically evolves the state from initial to final.The PRINT statements that we have inserted in the quantum circuit print toscreen the state of the circuit at the line where they appear in the English and Picture files.Each PRINT is identified by its line number (line numbers start with 1, what is called 1 based numbers) in the Eng and Pic files.
###Code
init_st_vec = StateVec.get_ground_st_vec(num_bits)
sim = SEO_simulator(file_prefix, num_bits, init_st_vec)
###Output
*************************beginning PRINT output
PRINT line number=2
*********branch= pure
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL (0.09587145233013179+0.5418806258342269j) , prob= 0.3028259480263821
(001)ZL (-0.8010409251462487+0.23560027210183782j) , prob= 0.697174051973618
total probability of state vector (=one if no measurements)= 1.0000000000000002
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.30282594802638213, 0.6971740519736178),
1: (1.0000000000000002, -2.220446049250313e-16),
2: (1.0000000000000002, -2.220446049250313e-16)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=7
*********branch= pure
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(100)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
(010)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
(110)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(001)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(101)ZL (0.4005204625731243-0.11780013605091888j) , prob= 0.17429351299340448
(011)ZL (0.4005204625731243-0.11780013605091888j) , prob= 0.17429351299340448
(111)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
total probability of state vector (=one if no measurements)= 0.9999999999999998
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.4999999999999999, 0.5000000000000001),
1: (0.4999999999999999, 0.5000000000000001),
2: (0.4999999999999999, 0.5000000000000001)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=10
*********branch= 0T1T
state vector:
ZL convention (Zero bit Last in state tuple)
(011)ZL (0.4005204625731243-0.11780013605091888j) , prob= 0.17429351299340448
(111)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (0.0, 1.0), 2: (0.6971740519736179, 0.3028259480263821)}
*********branch= 0T1F
state vector:
ZL convention (Zero bit Last in state tuple)
(001)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(101)ZL (0.4005204625731243-0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (1.0, 0.0), 2: (0.3028259480263821, 0.6971740519736179)}
*********branch= 0F1T
state vector:
ZL convention (Zero bit Last in state tuple)
(010)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
(110)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (0.0, 1.0), 2: (0.6971740519736179, 0.3028259480263821)}
*********branch= 0F1F
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(100)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (1.0, 0.0), 2: (0.3028259480263821, 0.6971740519736179)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=14
*********branch= 0T1T
state vector:
ZL convention (Zero bit Last in state tuple)
(011)ZL (0.4005204625731243-0.11780013605091888j) , prob= 0.17429351299340448
(111)ZL (-0.04793572616506589-0.2709403129171134j) , prob= 0.07570648700659549
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (0.0, 1.0), 2: (0.6971740519736179, 0.3028259480263821)}
*********branch= 0T1F
state vector:
ZL convention (Zero bit Last in state tuple)
(001)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(101)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (1.0, 0.0), 2: (0.3028259480263821, 0.6971740519736179)}
*********branch= 0F1T
state vector:
ZL convention (Zero bit Last in state tuple)
(010)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
(110)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (0.0, 1.0), 2: (0.6971740519736179, 0.3028259480263821)}
*********branch= 0F1F
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(100)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (1.0, 0.0), 2: (0.3028259480263821, 0.6971740519736179)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=18
*********branch= 0T1T
state vector:
ZL convention (Zero bit Last in state tuple)
(011)ZL (-0.04793572616506589-0.2709403129171134j) , prob= 0.07570648700659549
(111)ZL (0.4005204625731243-0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (0.0, 1.0), 2: (0.3028259480263821, 0.6971740519736179)}
*********branch= 0T1F
state vector:
ZL convention (Zero bit Last in state tuple)
(001)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(101)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (1.0, 0.0), 2: (0.3028259480263821, 0.6971740519736179)}
*********branch= 0F1T
state vector:
ZL convention (Zero bit Last in state tuple)
(010)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(110)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (0.0, 1.0), 2: (0.3028259480263821, 0.6971740519736179)}
*********branch= 0F1F
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL (0.04793572616506589+0.2709403129171134j) , prob= 0.07570648700659549
(100)ZL (-0.4005204625731243+0.11780013605091888j) , prob= 0.17429351299340448
total probability of state vector (=one if no measurements)= 0.24999999999999994
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (1.0, 0.0), 2: (0.3028259480263821, 0.6971740519736179)}
****************************ending PRINT output
###Markdown
Initially, all qubits are in state |0>. Line 1 of the circuit rotates qubit 0 by an arbitrary one qubit rotation.Line 2 has a PRINT statement. If you look at the code for SEO_simulator.use_PRINT(),you will see that a PRINT statement in format "ALL" prints stuff to screen and also stores at sim.cached_sts[line_num] a copy of the current state.Next we convert the cached state at line_num=2 toa density matrix called den_mat1. Then we convert den_mat1 to a Pandas dataframeand display that dataframe as an HTML table. States are labeled in binary ZL (Zero bit last) convention.
###Code
# density matrix cached at line number 2 of eng & pic files
den_mat1 = StateVec.get_den_mat(num_bits, sim.cached_sts[2])
den_mat1_df = Plotter.get_den_mat_df(num_bits, den_mat1)
# pan.set_option('precision', 5)
# print("\nden_mat1=\n", den_mat1_df)
den_mat1_df.style.format("{:.4}")
###Output
_____no_output_____
###Markdown
Next we convert the final state (which is the current state of the simulator)to a density matrix called den_mat2. Then we convert that to a Pandas dataframeand display that dataframe as an HTML table.
###Code
den_mat2 = StateVec.get_den_mat(num_bits, sim.cur_st_vec_dict)
den_mat2_df = Plotter.get_den_mat_df(num_bits, den_mat2)
# print("\nden_mat2=\n", den_mat2_df)
den_mat2_df.style.format("{:.3}")
###Output
_____no_output_____
###Markdown
Teleportation example showcasing IF_M blocksThis notebook uses Qubiter to illustrate quantum Teleportation of the pure state of one qubit (at 0) to another qubit (at 2) with the help of an ancilla qubit (at 1).The purpose of this notebook is not to teach about the "theory" behind quantum Teleportation.For that, the reader can go to numerous sources on the internet (Wikipedia, course notes, etc.)The purpose is to showcase some of the features of Qubiter, especially IF_M blocks (and also PRINT statements and calculations and plotting of various density matrices associated with any quantum circuit).For a full inventory of Qubiter English file commands, see Qubiter's Rosetta Stone pdfIBM has posted at https://github.com/QISKit/qiskit-tutorial, a jupyter notebook similar to this one, analysing the same quantum circuit for quantum Teleportation from one qubit to another. Their notebook uses IBM's qasm language instead of Qubiter's language so you might profit from comparing their notebook to this one to decide which qc language you prefer. Qubiter includes subroutines that can translate its language to IBM qasm that can then be run on IBM qc hardware. First change your working directory to the qubiter directory in your computer, and add its path to the path environment variable.
###Code
import os
import sys
print(os.getcwd())
os.chdir('../../')
print(os.getcwd())
sys.path.insert(0,os.getcwd())
###Output
/home/rrtucci/PycharmProjects/qubiter/qubiter/jupyter_notebooks
/home/rrtucci/PycharmProjects/qubiter
###Markdown
Next do imports:
###Code
from qubiter.SEO_writer import *
from qubiter.SEO_simulator import *
from qubiter.StateVec import *
from qubiter.Plotter import *
import numpy as np
# np.set_printoptions(precision=5)
import pandas as pan
import seaborn as sea; sea.set()
###Output
loaded OneQubitGate, WITHOUT autograd.numpy
###Markdown
Number of qubits is 3.Note that we use "bit" for both qbits and cbits.Use a trivial circuit embedder that embeds 3 qubits into same 3 qubits
###Code
num_qbits = 3
emb = CktEmbedder(num_qbits, num_qbits)
###Output
_____no_output_____
###Markdown
Open a writer, and tell it where to write to.We will use zero bit last (ZL) convention which is the default.
###Code
file_prefix = 'teleportation-with-ifs'
wr = SEO_writer(file_prefix, emb)
###Output
_____no_output_____
###Markdown
Write English and Picture files of the quantum circuit. Close those files once finished writing to them.
###Code
wr.write_Rn(0, list(np.pi/180*np.array([20, 68, 46])))
wr.write_PRINT("ALL")
wr.write_H(1)
wr.write_cnot(control_bit=1, target_bit=2)
#wr.write_one_qbit_gate(0, OneQubitGate.rot_ax, [-np.pi/8, 2])
wr.write_cnot(control_bit=0, target_bit=1)
wr.write_H(0)
wr.write_PRINT("ALL")
wr.write_MEAS(0, kind=2)
wr.write_MEAS(1, kind=2)
wr.write_PRINT("ALL")
wr.write_IF_M_beg(Controls.new_single_trol(num_qbits, 0, True))
wr.write_Z(2)
wr.write_IF_M_end()
wr.write_PRINT("ALL")
wr.write_IF_M_beg(Controls.new_single_trol(num_qbits, 1, True))
wr.write_X(2)
wr.write_IF_M_end()
wr.write_PRINT("ALL")
wr.close_files()
###Output
_____no_output_____
###Markdown
The English and Picture files just produced have been stored in the io_folder. Here are links to them:* ../io_folder/teleportation-with-ifs_3_eng.txt* ../io_folder/teleportation-with-ifs_3_ZLpic.txt Let's print the English file:
###Code
wr.print_eng_file(jup=True)
###Output
_____no_output_____
###Markdown
Let's print the Picture file. Time points downward, and, since we are using the ZL convention, the 0th qubit is rightmost. Note that after an M measurement, vertical lines "|"directly under M are replaced by colons ":"Line n of English file corresponds to line n of Picture file.
###Code
wr.print_pic_file(jup=True)
###Output
_____no_output_____
###Markdown
Now we create a simulator object with the ground state (|0> for each qubit) as initial state.Creating the simulator automatically evolves the state from initial to final.The PRINT statements that we have inserted in the quantum circuit print toscreen the state of the circuit at the line where they appear in the English and Picture files.Each PRINT is identified by its line number (line numbers start with 1, what is called 1 based numbers) in the Eng and Pic files.
###Code
init_st_vec = StateVec.get_ground_st_vec(num_qbits)
sim = SEO_simulator(file_prefix, num_qbits, init_st_vec)
###Output
*************************beginning PRINT output
PRINT line number=2
*********branch= pure
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL ( 0.095871 + 0.541881j) prob=0.302826
(001)ZL (-0.801041 + 0.235600j) prob=0.697174
total probability of state vector (=one if no measurements)= 1.000000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.302826, 0.697174), 1: (1.0, -0.0), 2: (1.0, -0.0)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=7
*********branch= pure
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL ( 0.047936 + 0.270940j) prob=0.075706
(100)ZL (-0.400520 + 0.117800j) prob=0.174294
(010)ZL (-0.400520 + 0.117800j) prob=0.174294
(110)ZL ( 0.047936 + 0.270940j) prob=0.075706
(001)ZL ( 0.047936 + 0.270940j) prob=0.075706
(101)ZL ( 0.400520 - 0.117800j) prob=0.174294
(011)ZL ( 0.400520 - 0.117800j) prob=0.174294
(111)ZL ( 0.047936 + 0.270940j) prob=0.075706
total probability of state vector (=one if no measurements)= 1.000000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.5, 0.5), 1: (0.5, 0.5), 2: (0.5, 0.5)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=10
*********branch= 0T1T
state vector:
ZL convention (Zero bit Last in state tuple)
(011)ZL ( 0.400520 - 0.117800j) prob=0.174294
(111)ZL ( 0.047936 + 0.270940j) prob=0.075706
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (0.0, 1.0), 2: (0.697174, 0.302826)}
*********branch= 0T1F
state vector:
ZL convention (Zero bit Last in state tuple)
(001)ZL ( 0.047936 + 0.270940j) prob=0.075706
(101)ZL ( 0.400520 - 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (1.0, 0.0), 2: (0.302826, 0.697174)}
*********branch= 0F1T
state vector:
ZL convention (Zero bit Last in state tuple)
(010)ZL (-0.400520 + 0.117800j) prob=0.174294
(110)ZL ( 0.047936 + 0.270940j) prob=0.075706
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (0.0, 1.0), 2: (0.697174, 0.302826)}
*********branch= 0F1F
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL ( 0.047936 + 0.270940j) prob=0.075706
(100)ZL (-0.400520 + 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (1.0, 0.0), 2: (0.302826, 0.697174)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=14
*********branch= 0T1T
state vector:
ZL convention (Zero bit Last in state tuple)
(011)ZL ( 0.400520 - 0.117800j) prob=0.174294
(111)ZL (-0.047936 - 0.270940j) prob=0.075706
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (0.0, 1.0), 2: (0.697174, 0.302826)}
*********branch= 0T1F
state vector:
ZL convention (Zero bit Last in state tuple)
(001)ZL ( 0.047936 + 0.270940j) prob=0.075706
(101)ZL (-0.400520 + 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (1.0, 0.0), 2: (0.302826, 0.697174)}
*********branch= 0F1T
state vector:
ZL convention (Zero bit Last in state tuple)
(010)ZL (-0.400520 + 0.117800j) prob=0.174294
(110)ZL ( 0.047936 + 0.270940j) prob=0.075706
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (0.0, 1.0), 2: (0.697174, 0.302826)}
*********branch= 0F1F
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL ( 0.047936 + 0.270940j) prob=0.075706
(100)ZL (-0.400520 + 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (1.0, 0.0), 2: (0.302826, 0.697174)}
****************************ending PRINT output
*************************beginning PRINT output
PRINT line number=18
*********branch= 0T1T
state vector:
ZL convention (Zero bit Last in state tuple)
(011)ZL (-0.047936 - 0.270940j) prob=0.075706
(111)ZL ( 0.400520 - 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (0.0, 1.0), 2: (0.302826, 0.697174)}
*********branch= 0T1F
state vector:
ZL convention (Zero bit Last in state tuple)
(001)ZL ( 0.047936 + 0.270940j) prob=0.075706
(101)ZL (-0.400520 + 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (0.0, 1.0), 1: (1.0, 0.0), 2: (0.302826, 0.697174)}
*********branch= 0F1T
state vector:
ZL convention (Zero bit Last in state tuple)
(010)ZL ( 0.047936 + 0.270940j) prob=0.075706
(110)ZL (-0.400520 + 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (0.0, 1.0), 2: (0.302826, 0.697174)}
*********branch= 0F1F
state vector:
ZL convention (Zero bit Last in state tuple)
(000)ZL ( 0.047936 + 0.270940j) prob=0.075706
(100)ZL (-0.400520 + 0.117800j) prob=0.174294
total probability of state vector (=one if no measurements)= 0.250000
dictionary with key=qubit, value=(Prob(0), Prob(1))
{0: (1.0, 0.0), 1: (1.0, 0.0), 2: (0.302826, 0.697174)}
****************************ending PRINT output
###Markdown
Initially, all qubits are in state |0>. Line 1 of the circuit rotates qubit 0 by an arbitrary one qubit rotation.Line 2 has a PRINT statement. If you look at the code for SEO_simulator.use_PRINT(),you will see that a PRINT statement in format "ALL" prints stuff to screen and also stores at sim.cached_sts[line_num] a copy of the current state.Next we convert the cached state at line_num=2 toa density matrix called den_mat1. Then we convert den_mat1 to a Pandas dataframeand display that dataframe as an HTML table. States are labeled in binary ZL (Zero bit last) convention.
###Code
# density matrix cached at line number 2 of eng & pic files
den_mat1 = StateVec.get_den_mat(num_qbits, sim.cached_sts[2])
den_mat1_df = Plotter.get_den_mat_df(num_qbits, den_mat1)
# pan.set_option('precision', 5)
# print("\nden_mat1=\n", den_mat1_df)
den_mat1_df.style.format("{:.4}")
###Output
_____no_output_____
###Markdown
Next we convert the final state (which is the current state of the simulator)to a density matrix called den_mat2. Then we convert that to a Pandas dataframeand display that dataframe as an HTML table.
###Code
den_mat2 = StateVec.get_den_mat(num_qbits, sim.cur_st_vec_dict)
den_mat2_df = Plotter.get_den_mat_df(num_qbits, den_mat2)
# print("\nden_mat2=\n", den_mat2_df)
den_mat2_df.style.format("{:.3}")
###Output
_____no_output_____
###Markdown
Next we plot the entries of the square matrix den_mat2 as phasor arrows in a square grid (what is called a quiver plot).
###Code
Plotter.plot_phasors(['den_mat2'], den_mat_df_list=[den_mat2_df])
###Output
_____no_output_____
###Markdown
Next we create a dataframe df by replacing each entry of den_mat2's dataframe byits magnitude. Then we display df as an HTML table.
###Code
df = den_mat2_df.apply(lambda x : np.sqrt((x*np.conj(x)).to_numpy().real))
df
###Output
_____no_output_____
###Markdown
df is a square dataframe of non-negative numbers so it begs to be plotted as a so called heatmap, using the wonderful package called seaborn.
###Code
plt.close('all')
ax = sea.heatmap(df, cmap="YlGnBu")
ax.set_title('den_mat2 magnitude')
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
The impurity of a density matrix $\rho$ is defined as $abs({\rm tr}(\rho^2) - 1 )$. It equals zero iff $\rho$is a pure state. Note that den_mat2 is not a pure state.
###Code
print("impurity of den_mat2=", StateVec.get_impurity(den_mat2))
###Output
impurity of den_mat2= 0.75
###Markdown
Next we calculate the trace over bits 0 and 1 of den_mat2. We call this partialdensity matrix tr01_den_mat2. We convert it to a dataframe, and display that dataframe as an HTML table.Note that the state at qubit 0 in den_mat1 has been successfully duplicated at qubit 2 with density matrix tr01_den_mat2.
###Code
tr01_den_mat2 = StateVec.get_partial_tr(num_qbits, den_mat2, {0, 1})
tr01_den_mat2_df = Plotter.get_den_mat_df(1, tr01_den_mat2)
# print("\ntr01_den_mat2=\n", tr01_den_mat2_df)
tr01_den_mat2_df.style.format("{:.4}")
###Output
_____no_output_____
###Markdown
As expected, tr01_den_mat2 is a pure state.
###Code
print("impurity of tr01_den_mat2=", StateVec.get_impurity(tr01_den_mat2))
###Output
impurity of tr01_den_mat2= 2.220446049250313e-16
###Markdown
Next we plot the entries of the square matrix den_mat2 as phasor arrows in a square grid (what is called a quiver plot).
###Code
Plotter.plot_phasors(['den_mat2'], den_mat_df_list=[den_mat2_df])
###Output
_____no_output_____
###Markdown
Next we create a dataframe df by replacing each entry of den_mat2's dataframe byits magnitude. Then we display df as an HTML table.
###Code
df = den_mat2_df.apply(lambda x : np.sqrt(np.real(x*np.conj(x))))
df
###Output
_____no_output_____
###Markdown
df is a square dataframe of non-negative numbers so it begs to be plotted as a so called heatmap, using the wonderful package called seaborn.
###Code
plt.close('all')
ax = sea.heatmap(df, cmap="YlGnBu")
ax.set_title('den_mat2 magnitude')
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
The impurity of a density matrix $\rho$ is defined as $abs({\rm tr}(\rho^2) - 1 )$. It equals zero iff $\rho$is a pure state. Note that den_mat2 is not a pure state.
###Code
print("impurity of den_mat2=", StateVec.get_impurity(den_mat2))
###Output
impurity of den_mat2= 0.75
###Markdown
Next we calculate the trace over bits 0 and 1 of den_mat2. We call this partialdensity matrix tr01_den_mat2. We convert it to a dataframe, and display that dataframe as an HTML table.Note that the state at qubit 0 in den_mat1 has been successfully duplicated at qubit 2 with density matrix tr01_den_mat2.
###Code
tr01_den_mat2 = StateVec.get_partial_tr(num_bits, den_mat2, {0, 1})
tr01_den_mat2_df = Plotter.get_den_mat_df(1, tr01_den_mat2)
# print("\ntr01_den_mat2=\n", tr01_den_mat2_df)
tr01_den_mat2_df.style.format("{:.4}")
###Output
_____no_output_____
###Markdown
As expected, tr01_den_mat2 is a pure state.
###Code
print("impurity of tr01_den_mat2=", StateVec.get_impurity(tr01_den_mat2))
###Output
impurity of tr01_den_mat2= 2.220446049250313e-16
|
breast-cancer-analysis-and-prediction.ipynb | ###Markdown
1.3. Missing values
###Code
null_feat = pd.DataFrame(len(data['id']) - data.isnull().sum(), columns = ['Count'])
trace = go.Bar(x = null_feat.index, y = null_feat['Count'] ,opacity = 0.8, marker=dict(color = 'lightgrey',
line=dict(color='#000000',width=1.5)))
layout = dict(title = "Missing Values")
fig = dict(data = [trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
All features are complete, only 'Unnamed: 32' is completely null, probably an error in the dataset, we drop it in below 1.4. Reassign target and drop useless features
###Code
# Drop useless variables
data = data.drop(['Unnamed: 32','id'],axis = 1)
# Reassign target
data.diagnosis.replace(to_replace = dict(M = 1, B = 0), inplace = True)
###Output
_____no_output_____
###Markdown
2. Exploratory Data Analysis (EDA) 2.1. Head and describe
###Code
# Head
data.head()
# describe
data.describe()
###Output
_____no_output_____
###Markdown
2.2. Target distribution (number and %)
###Code
# 2 datasets
M = data[(data['diagnosis'] != 0)]
B = data[(data['diagnosis'] == 0)]
#------------COUNT-----------------------
trace = go.Bar(x = (len(M), len(B)), y = ['malignant', 'benign'], orientation = 'h', opacity = 0.8, marker=dict(
color=[ 'gold', 'lightskyblue'],
line=dict(color='#000000',width=1.5)))
layout = dict(title = 'Count of diagnosis variable')
fig = dict(data = [trace], layout=layout)
py.iplot(fig)
#------------PERCENTAGE-------------------
trace = go.Pie(labels = ['benign','malignant'], values = data['diagnosis'].value_counts(),
textfont=dict(size=15), opacity = 0.8,
marker=dict(colors=['lightskyblue', 'gold'],
line=dict(color='#000000', width=1.5)))
layout = dict(title = 'Distribution of diagnosis variable')
fig = dict(data = [trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
2.3. Features distribution (hue = diagnosis)
###Code
def plot_distribution(data_select, size_bin) :
tmp1 = M[data_select]
tmp2 = B[data_select]
hist_data = [tmp1, tmp2]
group_labels = ['malignant', 'benign']
colors = ['#FFD700', '#7EC0EE']
fig = ff.create_distplot(hist_data, group_labels, colors = colors, show_hist = True, bin_size = size_bin, curve_type='kde')
fig['layout'].update(title = data_select)
py.iplot(fig, filename = 'Density plot')
###Output
_____no_output_____
###Markdown
Bellow, you can remove the '' to show all features distribution (except the first line)
###Code
#plot distribution 'mean'
plot_distribution('radius_mean', .5)
plot_distribution('texture_mean', .5)
plot_distribution('perimeter_mean', 5)
plot_distribution('area_mean', 10)
#plot_distribution('smoothness_mean', .5)
#plot_distribution('compactness_mean' .5)
#plot_distribution('concavity_mean' .5)
#plot_distribution('concave points_mean' .5)
#plot_distribution('symmetry_mean' .5)
#plot_distribution('fractal_dimension_mean' .5)
#plot distribution 'se'
plot_distribution('radius_se', .1)
plot_distribution('texture_se', .1)
plot_distribution('perimeter_se', .5)
plot_distribution('area_se', 5)
#plot_distribution('smoothness_se', .5)
#plot_distribution('compactness_se', .5)
#plot_distribution('concavity_se', .5)
#plot_distribution('concave points_se', .5)
#plot_distribution('symmetry_se', .5)
#plot_distribution('fractal_dimension_se', .5)
#plot distribution 'worst'
plot_distribution('radius_worst', .5)
plot_distribution('texture_worst', .5)
plot_distribution('perimeter_worst', 5)
plot_distribution('area_worst', 10)
#plot_distribution('smoothness_worst', .5)
#plot_distribution('compactness_worst', .5)
#plot_distribution('concavity_worst', .5)
#plot_distribution('concave points_worst', .5)
#plot_distribution('symmetry_worst', .5)
#plot_distribution('fractal_dimension_worst', .5)
###Output
_____no_output_____
###Markdown
2.4. Correlation matrix
###Code
#correlation
correlation = data.corr()
#tick labels
matrix_cols = correlation.columns.tolist()
#convert to array
corr_array = np.array(correlation)
#Plotting
trace = go.Heatmap(z = corr_array,
x = matrix_cols,
y = matrix_cols,
xgap = 2,
ygap = 2,
colorscale='Viridis',
colorbar = dict() ,
)
layout = go.Layout(dict(title = 'Correlation Matrix for variables',
autosize = False,
height = 720,
width = 800,
margin = dict(r = 0 ,l = 210,
t = 25,b = 210,
),
yaxis = dict(tickfont = dict(size = 9)),
xaxis = dict(tickfont = dict(size = 9)),
)
)
fig = go.Figure(data = [trace],layout = layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
Let's check the correlation between few features by pair 2.5. Positive correlated features
###Code
def plot_feat1_feat2(feat1, feat2) :
trace0 = go.Scatter(
x = M[feat1],
y = M[feat2],
name = 'malignant',
mode = 'markers',
marker = dict(color = '#FFD700',
line = dict(
width = 1)))
trace1 = go.Scatter(
x = B[feat1],
y = B[feat2],
name = 'benign',
mode = 'markers',
marker = dict(color = '#7EC0EE',
line = dict(
width = 1)))
layout = dict(title = feat1 +" "+"vs"+" "+ feat2,
yaxis = dict(title = feat2,zeroline = False),
xaxis = dict(title = feat1, zeroline = False)
)
plots = [trace0, trace1]
fig = dict(data = plots, layout=layout)
py.iplot(fig)
plot_feat1_feat2('perimeter_mean','radius_worst')
plot_feat1_feat2('area_mean','radius_worst')
plot_feat1_feat2('texture_mean','texture_worst')
plot_feat1_feat2('area_worst','radius_worst')
#seaborn version :
palette ={0 : 'lightblue', 1 : 'gold'}
edgecolor = 'grey'
# Plot +
fig = plt.figure(figsize=(12,12))
plt.subplot(221)
ax1 = sns.scatterplot(x = data['perimeter_mean'], y = data['radius_worst'], hue = "diagnosis",
data = data, palette = palette, edgecolor=edgecolor)
plt.title('perimeter mean vs radius worst')
plt.subplot(222)
ax2 = sns.scatterplot(x = data['area_mean'], y = data['radius_worst'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('area mean vs radius worst')
plt.subplot(223)
ax3 = sns.scatterplot(x = data['texture_mean'], y = data['texture_worst'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('texture mean vs texture worst')
plt.subplot(224)
ax4 = sns.scatterplot(x = data['area_worst'], y = data['radius_worst'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('area mean vs radius worst')
fig.suptitle('Positive correlated features', fontsize = 20)
plt.savefig('1')
plt.show()
###Output
_____no_output_____
###Markdown
2.6. Uncorrelated features
###Code
plot_feat1_feat2('smoothness_mean','texture_mean')
plot_feat1_feat2('radius_mean','fractal_dimension_worst')
plot_feat1_feat2('texture_mean','symmetry_mean')
plot_feat1_feat2('texture_mean','symmetry_se')
# seaborn version :
fig = plt.figure(figsize=(12,12))
plt.subplot(221)
ax1 = sns.scatterplot(x = data['smoothness_mean'], y = data['texture_mean'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('smoothness mean vs texture mean')
plt.subplot(222)
ax2 = sns.scatterplot(x = data['radius_mean'], y = data['fractal_dimension_worst'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('radius mean vs fractal dimension_worst')
plt.subplot(223)
ax3 = sns.scatterplot(x = data['texture_mean'], y = data['symmetry_mean'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('texture mean vs symmetry mean')
plt.subplot(224)
ax4 = sns.scatterplot(x = data['texture_mean'], y = data['symmetry_se'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('texture mean vs symmetry se')
fig.suptitle('Uncorrelated features', fontsize = 20)
plt.savefig('2')
plt.show()
###Output
_____no_output_____
###Markdown
2.7. Negative correlated features
###Code
plot_feat1_feat2('area_mean','fractal_dimension_mean')
plot_feat1_feat2('radius_mean','fractal_dimension_mean')
plot_feat1_feat2('area_mean','smoothness_se')
plot_feat1_feat2('smoothness_se','perimeter_mean')
# seaborn version
fig = plt.figure(figsize=(12,12))
plt.subplot(221)
ax1 = sns.scatterplot(x = data['area_mean'], y = data['fractal_dimension_mean'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('smoothness mean vs fractal dimension mean')
plt.subplot(222)
ax2 = sns.scatterplot(x = data['radius_mean'], y = data['fractal_dimension_mean'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('radius mean vs fractal dimension mean')
plt.subplot(223)
ax2 = sns.scatterplot(x = data['area_mean'], y = data['smoothness_se'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('area mean vs fractal smoothness se')
plt.subplot(224)
ax2 = sns.scatterplot(x = data['smoothness_se'], y = data['perimeter_mean'], hue = "diagnosis",
data = data, palette =palette, edgecolor=edgecolor)
plt.title('smoothness se vs perimeter mean')
fig.suptitle('Negative correlated features', fontsize = 20)
plt.savefig('3')
plt.show()
###Output
_____no_output_____
###Markdown
3. Principal Component Analysis 3.1. Compute PCA
###Code
target_pca = data['diagnosis']
data_pca = data.drop('diagnosis', axis=1)
target_pca = pd.DataFrame(target_pca)
#To make a PCA, normalize data is essential
X_pca = data_pca.values
X_std = StandardScaler().fit_transform(X_pca)
pca = PCA(svd_solver='full')
pca_std = pca.fit(X_std, target_pca).transform(X_std)
pca_std = pd.DataFrame(pca_std)
pca_std = pca_std.merge(target_pca, left_index = True, right_index = True, how = 'left')
pca_std['diagnosis'] = pca_std['diagnosis'].replace({1:'malignant',0:'benign'})
###Output
_____no_output_____
###Markdown
3.2. PCA pie plot with 6 components (88.8%)
###Code
#explained_variance
var_pca = pd.DataFrame(pca.explained_variance_ratio_)
var_pca = var_pca.T
#----------SUM AND DROP COMP [7:30]
col_list = list(v for v in chain(pca_std.columns[6:30]))
var_pca['OTHERS_COMP'] = var_pca[col_list].sum(axis=1)
var_pca.drop(var_pca[col_list],axis=1,inplace=True)
var_pca = var_pca.T
labels = ['COMP1','COMP2','COMP3','COMP4','COMP5','COMP6', 'COMP7 - 30']
colors = ['gold', 'lightgreen', 'lightcoral', 'lightskyblue', 'lightgrey', 'orange', 'white']
trace = go.Pie(labels = labels, values = var_pca[0].values, opacity = 0.8,
textfont=dict(size=15),
marker=dict(colors=colors,
line=dict(color='#000000', width=1.5)))
layout = dict(title = 'PCA : components and explained variance (6 comp = 88.8%)')
fig = dict(data = [trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
3.3. PCA scatter plot with 2 components (63.3%)
###Code
pca = PCA(n_components = 2)
pca_std = pca.fit(X_std, target_pca).transform(X_std)
pca_std = pd.DataFrame(pca_std,columns = ['COMP1','COMP2'])
pca_std = pca_std.merge(target_pca,left_index = True,right_index = True,how = 'left')
pca_std['diagnosis'] = pca_std['diagnosis'].replace({1:'malignant',0:'benign'})
def pca_scatter(target,color) :
tracer = go.Scatter(x = pca_std[pca_std['diagnosis'] == target]['COMP1'] ,
y = pca_std[pca_std['diagnosis'] == target]['COMP2'],
name = target, mode = 'markers',
marker = dict(color = color,line = dict(width = 1))
)
return tracer
layout = go.Layout(dict(title = 'PCA Scatter plot (2 comp = 63.3%)',
xaxis = dict(gridcolor = 'rgb(255, 255, 255)',
title = 'COMP1 = 44.3%',
zerolinewidth=1,ticklen=5,gridwidth=2),
yaxis = dict(gridcolor = 'rgb(255, 255, 255)',
title = 'COMP2 = 19.0%',
zerolinewidth=1,ticklen=5,gridwidth=2),
height = 800
))
trace1 = pca_scatter('malignant','#FFD700')
trace2 = pca_scatter('benign','#7EC0EE')
plots = [trace2,trace1]
fig = go.Figure(data = plots,layout = layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
3.4. PCA scatter plot with 3 components (72.7%)
###Code
pca = PCA(n_components = 3)
pca_std = pca.fit(X_std, target_pca).transform(X_std)
pca_std = pd.DataFrame(pca_std,columns = ['COMP1','COMP2','COMP3'])
pca_std = pca_std.merge(target_pca, left_index = True, right_index = True,how = 'left')
pca_std['diagnosis'] = pca_std['diagnosis'].replace({1:'malignant',0:'benign'})
M_pca = pca_std[(pca_std['diagnosis'] == 'malignant')]
B_pca = pca_std[(pca_std['diagnosis'] == 'benign')]
trace1 = go.Scatter3d(x = M_pca['COMP1'],
y = M_pca['COMP3'],
z = M_pca['COMP2'],
mode = "markers",
name = "malignant",
marker = dict(size = 4,color = '#FFD700',line = dict(width = 1))
)
trace2 = go.Scatter3d(x = B_pca['COMP1'],
y = B_pca['COMP3'],
z = B_pca['COMP2'],
name = 'benign',
mode = 'markers',
marker = dict(size = 4,color= '#7EC0EE',line = dict(width = 1))
)
layout = go.Layout(dict(title = 'PCA Scatter plot (3 comp = 72.7%)',
scene = dict(camera = dict(up=dict(x= 0 , y=0, z=0),
center=dict(x=0, y=0, z=0),
eye=dict(x=1.25, y=1.25, z=1.25)),
xaxis = dict(title = 'COMP1',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'),
yaxis = dict(title = 'COMP3',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
zaxis = dict(title = 'COMP2',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
)),height = 700))
plots = [trace1,trace2]
fig = go.Figure(data = plots,layout = layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
4. Define functions This part is essential to measure the performance of a model : roc, cross validation, learning curve ... 4.1. Confusion matrix and show metrics The confusion matrix, also known as the error matrix, allows visualization of the performance of an algorithm :* true positive (TP) : Malignant tumour correctly identified as malignant* true negative (TN) : Benign tumour correctly identified as benign* false positive (FP) : Benign tumour incorrectly identified as malignant * false negative (FN) : Malignant tumour incorrectly identified as benignMetrics : * Accuracy : (TP +TN) / (TP + TN + FP +FN)* Precision : TP / (TP + FP)* Recall : TP / (TP + FN)
###Code
# Confusion matrix
def plot_confusion_matrix(cm, classes,
normalize = False,
title = 'Confusion matrix"',
cmap = plt.cm.Blues) :
plt.imshow(cm, interpolation = 'nearest', cmap = cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation = 0)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])) :
plt.text(j, i, cm[i, j],
horizontalalignment = 'center',
color = 'white' if cm[i, j] > thresh else 'black')
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Show metrics
def show_metrics():
tp = cm[1,1]
fn = cm[1,0]
fp = cm[0,1]
tn = cm[0,0]
print('Accuracy = {:.3f}'.format((tp+tn)/(tp+tn+fp+fn)))
print('Precision = {:.3f}'.format(tp/(tp+fp)))
print('Recall = {:.3f}'.format(tp/(tp+fn)))
print('F1_score = {:.3f}'.format(2*(((tp/(tp+fp))*(tp/(tp+fn)))/
((tp/(tp+fp))+(tp/(tp+fn))))))
###Output
_____no_output_____
###Markdown
4.2. Precision – Recall curve The precision-recall curve shows the tradeoff between precision and recall for different threshold
###Code
# Precision-recall curve
def plot_precision_recall():
plt.step(recall, precision, color = 'b', alpha = 0.2,
where = 'post')
plt.fill_between(recall, precision, step ='post', alpha = 0.2,
color = 'b')
plt.plot(recall, precision, linewidth=2)
plt.xlim([0.0,1])
plt.ylim([0.0,1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall Curve')
plt.show();
###Output
_____no_output_____
###Markdown
4.3. ROC curve The ROC curve is created by plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings.
###Code
# ROC curve
def plot_roc():
plt.plot(fpr, tpr, label = 'ROC curve', linewidth = 2)
plt.plot([0,1],[0,1], 'k--', linewidth = 2)
# plt.xlim([0.0,0.001])
# plt.ylim([0.0,1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show();
###Output
_____no_output_____
###Markdown
4.4. Learning curve The Learning curve determines cross-validated training and test scores.
###Code
# Learning curve
def plot_learning_curve(estimator, title, X, y, ylim = None, cv = None,
n_jobs = 1, train_sizes = np.linspace(.1, 1.0, 5)):
"""
Plots a learning curve. http://scikit-learn.org/stable/modules/learning_curve.html
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training examples')
plt.ylabel('Score')
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv = cv, n_jobs = n_jobs, train_sizes = train_sizes)
train_scores_mean = np.mean(train_scores, axis = 1)
train_scores_std = np.std(train_scores, axis = 1)
test_scores_mean = np.mean(test_scores, axis = 1)
test_scores_std = np.std(test_scores, axis = 1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha = 0.1, color = "g")
plt.plot(train_sizes, train_scores_mean, 'o-', color = "r",
label = "Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color = "g",
label = "Cross-validation score")
plt.legend(loc = "best")
return plt
###Output
_____no_output_____
###Markdown
4.5. Cross validation metrics Cross-validation is a technique to evaluate predictive models by partitioning the original sample into a training set to train the model, and a test set to evaluate it.
###Code
# Cross val metric
def cross_val_metrics(model) :
scores = ['accuracy', 'precision', 'recall']
for sc in scores:
scores = cross_val_score(model, X, y, cv = 5, scoring = sc)
print('[%s] : %0.5f (+/- %0.5f)'%(sc, scores.mean(), scores.std()))
###Output
_____no_output_____
###Markdown
5. Prepare dataset 5.1. Define (X, y) * y = diagnosis (target)* X = features (radius_mean, area_se, ....)
###Code
# Def X and Y
y = np.array(data.diagnosis.tolist())
data = data.drop('diagnosis', 1)
X = np.array(data.as_matrix())
###Output
_____no_output_____
###Markdown
5.2. Standard scaler (X) A variable that ranges between 0 and 100 will outweigh a variable that ranges between 0 and 1. Using these variables without standardization in effect gives the variable with the larger range a bigger weight in the analysis
###Code
# Normalization
scaler = StandardScaler()
X = scaler.fit_transform(X)
###Output
_____no_output_____
###Markdown
5.3. Train test split
###Code
# Train_test split
random_state = 42
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.12, random_state = random_state)
###Output
_____no_output_____
###Markdown
6. Predictive model : Logistic Regression 6.1. Logistic Regression and GridSearch CV to optimise hyperparameters (accuracy)
###Code
# Find best hyperparameters (accuracy)
log_clf = LogisticRegression(random_state = random_state)
param_grid = {
'penalty' : ['l2','l1'],
'C' : [0.001, 0.01, 0.1, 1, 10, 100, 1000]
}
CV_log_clf = GridSearchCV(estimator = log_clf, param_grid = param_grid , scoring = 'accuracy', verbose = 1, n_jobs = -1)
CV_log_clf.fit(X_train, y_train)
best_parameters = CV_log_clf.best_params_
print('The best parameters for using this model is', best_parameters)
#Log with best hyperparameters
CV_log_clf = LogisticRegression(C = best_parameters['C'],
penalty = best_parameters['penalty'],
random_state = random_state)
CV_log_clf.fit(X_train, y_train)
y_pred = CV_log_clf.predict(X_test)
y_score = CV_log_clf.decision_function(X_test)
# Confusion maxtrix & metrics
cm = confusion_matrix(y_test, y_pred)
class_names = [0,1]
plt.figure()
plot_confusion_matrix(cm,
classes=class_names,
title='Logistic Confusion matrix')
plt.savefig('6')
plt.show()
show_metrics()
# ROC curve
fpr, tpr, t = roc_curve(y_test, y_score)
plot_roc()
###Output
_____no_output_____
###Markdown
6.2. RFE : Recursive features elimination (30 features => 15 features) Recursive feature elimination (RFE) is a feature selection method that fits a model and removes the weakest feature (or features) until the specified number of features is reached. Features are ranked by the model’s coef_ or feature_importances_
###Code
#Logistic regression with RFE
log_clf = LogisticRegression(C = best_parameters['C'],
penalty = best_parameters['penalty'],
random_state = random_state)
selector = RFE(log_clf)
selector = selector.fit(X_train, y_train)
y_pred = selector.predict(X_test)
y_score = selector.predict_proba(X_test)[:,1]
# Confusion maxtrix & metrics
cm = confusion_matrix(y_test, y_pred)
class_names = [0,1]
plt.figure()
plot_confusion_matrix(cm,
classes=class_names,
title='Logistic Confusion matrix')
plt.show()
show_metrics()
# ROC curve
fpr, tpr, t = roc_curve(y_test, y_score)
plot_roc()
# support and ranking RFE
print(selector.support_)
print(selector.ranking_)
###Output
_____no_output_____
###Markdown
6.3. Compare learning curves and cross validation scores
###Code
#Learning curve Log with best hyperpara
plot_learning_curve(CV_log_clf, 'Learning Curve For Logistic Model', X, y, (0.85,1.05), 10)
plt.savefig('7')
plt.show()
#Learning curve Log with RFE
plot_learning_curve(selector, 'Learning Curve For Logistic Model with RFE', X, y, (0.85,1.05), 10)
plt.show()
# Cross val Log
cross_log = cross_val_metrics(CV_log_clf)
# Cross val Log with RFE
cross_selector = cross_val_metrics(selector)
###Output
_____no_output_____
###Markdown
With only 15 features and 5 folds, we got an accuracy of 97.4 with a standard deviation of 0.78.To follow, we don't use the selector, the log cfl is most performant but the code is here for you :) 6.4. Select threshold for a recall = 100% (all malignant tumors detected) For this study, the most important is to detect all malignants tumours.
###Code
# Threshold
thresholds_adj = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
plt.figure(figsize = (15,15))
j = 1
for i in thresholds_adj:
y_score = CV_log_clf.predict_proba(X_test)[:,1] > i
plt.subplot(3,3,j)
j += 1
cm = confusion_matrix(y_test, y_score)
tp = cm[1,1]
fn = cm[1,0]
fp = cm[0,1]
tn = cm[0,0]
print('Recall w/ threshold = %s :'%i, (tp/(tp+fn)))
class_names = [0,1]
plot_confusion_matrix(cm,
classes=class_names,
title='Threshold = %s'%i)
###Output
_____no_output_____
###Markdown
6.5. Predicting with recall = 100%
###Code
# Recall = 1.
y_score = CV_log_clf.predict_proba(X_test)[:,1] > 0.1
cm = confusion_matrix(y_test, y_score)
class_names = [0,1]
show_metrics()
###Output
_____no_output_____
###Markdown
With 2 models we can increase the precision while keeping a recall = 100% 7. Predictive model 2 : Ensemble Classifier to maximise precision and detect all malignant tumors 7.1. Logistic Regression and GridSearch CV to optimise hyperparameters (recall)
###Code
# Find the best parameters (recall)
log2_clf = LogisticRegression(random_state = random_state)
param_grid = {
'penalty' : ['l2','l1'],
'C' : [0.001, 0.01, 0.1, 1, 10, 100, 1000],
}
CV_log2_clf = GridSearchCV(estimator = log2_clf, param_grid = param_grid , scoring = 'recall', verbose = 1, n_jobs = -1)
CV_log2_clf.fit(X_train, y_train)
best_parameters = CV_log2_clf.best_params_
print('The best parameters for using this model is', best_parameters)
# Log w best hyperparameters (recall)
CV_log2_clf = LogisticRegression(C = best_parameters['C'],
penalty = best_parameters['penalty'],
random_state = random_state)
CV_log2_clf.fit(X_train, y_train)
y_pred = CV_log2_clf.predict(X_test)
y_score = CV_log2_clf.decision_function(X_test)
# Confusion maxtrix & metrics
cm = confusion_matrix(y_test, y_pred)
class_names = [0,1]
###Output
_____no_output_____
###Markdown
* Grid search CV accuracy, penalty = l2* Grid search CV recall, penalty = l1
###Code
# Cross val log2
cross_val_metrics(CV_log2_clf)
###Output
_____no_output_____
###Markdown
7.2. Voting classifier : log + log2
###Code
#Voting Classifier
voting_clf = VotingClassifier (
estimators = [('log1', CV_log_clf), ('log_2', CV_log2_clf)],
voting='soft', weights = [1, 1])
voting_clf.fit(X_train,y_train)
y_pred = voting_clf.predict(X_test)
y_score = voting_clf.predict_proba(X_test)[:,1]
# Confusion maxtrix
cm = confusion_matrix(y_test, y_pred)
class_names = [0,1]
show_metrics()
# Cross val score voting
cross_voting = cross_val_metrics(voting_clf)
#Learning curve Voting
plot_learning_curve(voting_clf, 'Learning Curve For Voting clf', X, y, (0.85,1.05), 10)
plt.savefig('9')
plt.show()
###Output
_____no_output_____
###Markdown
7.3. Voting classifier : select threshold (recall = 100%)
###Code
# Threshold
thresholds_adj = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
plt.figure(figsize = (15,15))
j = 1
for i in thresholds_adj:
y_score = voting_clf.predict_proba(X_test)[:,1] > i
plt.subplot(3,3,j)
j += 1
cm = confusion_matrix(y_test, y_score)
tp = cm[1,1]
fn = cm[1,0]
fp = cm[0,1]
tn = cm[0,0]
print('Recall w/ threshold = %s :'%i, (tp/(tp+fn)))
class_names = [0,1]
plot_confusion_matrix(cm,
classes=class_names,
title='Threshold = %s'%i)
###Output
_____no_output_____
###Markdown
7.4. Voting classifier : predicting with recall = 100% (precision = 92%)
###Code
# Ensemble, recall = 1.
y_score = voting_clf.predict_proba(X_test)[:,1] > 0.23
cm = confusion_matrix(y_test, y_score)
class_names = [0,1]
plt.figure()
plot_confusion_matrix(cm,
classes = class_names,
title = 'Ensemble Clf CM : recall = 100%')
plt.savefig('8')
plt.show()
show_metrics()
# ROC curve
fpr, tpr, t = roc_curve(y_test, y_score)
plot_roc()
# Precision-recall curve
precision, recall, thresholds = precision_recall_curve(y_test, y_score)
plot_precision_recall()
###Output
_____no_output_____
###Markdown
7.5. Models performance plot (accuracy, precision, recall)
###Code
models_metrics = {'log_clf': [0.982, 0.990, 0.962],
'selector': [0.974, 0.981, 0.948],
'log2_clf' : [0.974,0.976,0.953],
'voting_clf' : [0.979,0.985,0.958]
}
df = pd.DataFrame(data = models_metrics)
df.rename(index={0:'Accuracy',1:'Precision', 2: 'Recall'},
inplace=True)
ax = df.plot(kind='bar', figsize = (15,10), ylim = (0.94, 1),
color = ['gold', 'lightgreen', 'lightcoral', 'lightskyblue'],
rot = 0, title ='Models performance (cross val mean)',
edgecolor = 'grey', alpha = 0.5)
for p in ax.patches:
ax.annotate(str(p.get_height()), (p.get_x() * 1.01, p.get_height() * 1.0005))
plt.show()
###Output
_____no_output_____
###Markdown
----------**Breast Cancer Analysis and Prediction**=====================================***Recall = 1. Precision = .92 Accuracy = .971******Md. Mashfiq Rizvee***---------- - 1. Load libraries and read the data - 1.1. Load libraries - 1.2. Read the data - 1.3. Missing values - 1.4. Reassign target and drop useless features - 2. Exploratory Data Analysis (EDA) - 2.1. Head and describe - 2.2. Target distribution (number and %) - 2.3. Features distribution (hue = diagnosis) - 2.4. Correlation matrix - 2.5. Positive correlated features - 2.6. Uncorrelated features - 2.7. Negative correlated features - 3. Principal Component Analysis - 3.1. Compute PCA - 3.2. PCA pie plot with 6 components (88.8%) - 3.3. PCA scatter plot with 2 components (63.3%) - 3.4. PCA scatter plot with 3 components (72.7%)- 4. Define functions - 4.1. Confusion matrix and show metrics - 4.2. Precision – Recall curve - 4.3. ROC curve - 4.4. Learning curve - 4.5. Cross validation metrics - 5. Prepare dataset - 5.1. Define (X, y) - 5.2. Standard scaler (X) - 5.3. Train test split - 6. Predictive model : Logistic Regression - 6.1. Logistic Regression and GridSearch CV to optimise hyperparameters (accuracy) - 6.2. RFE : Recursive features elimination (30 features => 15 features) - 6.3. Compare learning curves and cross validation scores - 6.4. Select threshold for a recall = 100% (all malignant tumors detected) - 6.5. Predicting with recall = 100% - 7. Predictive model 2 : Ensemble Classifier to maximise precision and detect all malignant tumors - 7.1. Logistic Regression and GridSearch CV to optimise hyperparameters (recall) - 7.2. Voting classifier : log + log2 - 7.3. Voting classifier : select threshold (recall = 100%) - 7.4. Voting classifier : predicting with recall = 100% (precision = 92%) - 7.5. Models performance plot (accuracy, precision, recall) **Information : [here](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29)**Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. * ID number * Diagnosis (M = malignant, B = benign) Ten real-valued features are computed for each cell nucleus: * radius (mean of distances from center to points on the perimeter) * texture (standard deviation of gray-scale values) * perimeter * area * smoothness (local variation in radius lengths) * compactness (perimeter^2 / area - 1.0) * concavity (severity of concave portions of the contour) * concave points (number of concave portions of the contour) * symmetry * fractal dimension ("coastline approximation" - 1) 1. Load libraries and read the data 1.1. Load libraries
###Code
# Python libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import itertools
from itertools import chain
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score, learning_curve, train_test_split
from sklearn.metrics import precision_score, recall_score, confusion_matrix, roc_curve, precision_recall_curve, accuracy_score
import warnings
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import plotly.figure_factory as ff
warnings.filterwarnings('ignore') #ignore warning messages
###Output
_____no_output_____
###Markdown
1.2. Read the data
###Code
# Read data
data = pd.read_csv('../input/data.csv')
###Output
_____no_output_____ |
wp/notebooks/active learning/binary/mc_dropout_metrics.ipynb | ###Markdown
MC Dropout Metrics evaluation
###Code
%load_ext autoreload
import os, sys, importlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
BASE_PATH = os.path.join(os.getcwd(), "..", "..")
MODULES_PATH = os.path.join(BASE_PATH, "modules")
METRICS_PATH = os.path.join(BASE_PATH, "metrics")
os.listdir(METRICS_PATH)
sys.path.append(MODULES_PATH)
from active_learning import Metrics
%autoreload 2
metrics_reader = Metrics(os.path.join(METRICS_PATH, "mc_dropout"))
###Output
_____no_output_____
###Markdown
Read Metrics
###Code
data = metrics_reader.read("4_mc_dropout_max_entropy.csv")
def get_frame(filename, astype = {"iteration": "int32", "binary_accuracy": "float32"}):
metrics_reader = Metrics(os.path.join(METRICS_PATH, "mc_dropout"))
data = metrics_reader.read(filename)
df = pd.DataFrame(data)
df = df.astype(astype)
return df
df_2 = get_frame("3_mc_dropout_max_entropy.csv")
df_2.head()
df_1 = get_frame("4_mc_dropout_max_entropy.csv")
df_1.head()
df_3 = get_frame("2_mc_dropout_max_entropy.csv")
df_3.head()
df = df.astype({"iteration": "int32", "binary_accuracy": "float"})
max_tick = int(df_1["iteration"].max())
min_tick = int(df_1["iteration"].min())
plt.figure(figsize=(15, 5))
sns.lineplot(x="iteration", y="binary_accuracy", color="red", marker="X", data=df_1)
sns.lineplot(x="iteration", y="binary_accuracy", marker="o", data=df_2)
sns.lineplot(x="iteration", y="binary_accuracy", color="green", marker="v", data=df_3)
plt.xticks(range(min_tick, max_tick + 2, 3), rotation=90)
plt.title("Comparison Max Entropy")
plt.ylabel("Accuracy")
plt.xlabel("Iteration")
plt.show()
###Output
_____no_output_____ |
4 - Applications of GANs/PassGAN.ipynb | ###Markdown
PassGANBased on paper [PassGAN: A Deep Learning Approach for Password Guessing](https://arxiv.org/abs/1709.00440) Outline- Introduction- Prerequest- Datasets- Build Models - Generator Models - Discriminator Models- Models Settings- Training- Result Introduction Abstract :State-of-the-art password guessing tools, such as HashCat and John the Ripper, enable users to check billions of passwords per second against password hashes. In addition to performing straightforward dictionary attacks, these tools can expand password dictionaries using password generation rules, such as concatenation of words (e.g., "password123456") and leet speak (e.g., "password" becomes "p4s5w0rd"). Although these rules work well in practice, expanding them to model further passwords is a laborious task that requires specialized expertise. To address this issue, in this paper we introduce PassGAN, a novel approach that replaces human-generated password rules with theory-grounded machine learning algorithms. Instead of relying on manual password analysis, PassGAN uses a Generative Adversarial Network (GAN) to autonomously learn the distribution of real passwords from actual password leaks, and to generate high-quality password guesses. Our experiments show that this approach is very promising. When we evaluated PassGAN on two large password datasets, we were able to surpass rule-based and state-of-the-art machine learning password guessing tools. However, in contrast with the other tools, PassGAN achieved this result without any a-priori knowledge on passwords or common password structures. Additionally, when we combined the output of PassGAN with the output of HashCat, we were able to match 51%-73% more passwords than with HashCat alone. This is remarkable, because it shows that PassGAN can autonomously extract a considerable number of password properties that current state-of-the art rules do not encode. Prerequest
###Code
from google.colab import drive
drive.mount('/content/drive')
# import All prerequisites
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torchvision import datasets, transforms
from torch.autograd import Variable
from torchvision.utils import save_image
import numpy as np
import os
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ROOT = "password/"
# Make dir if no exist
if not os.path.exists(ROOT):
os.makedirs(ROOT)
# Download Library
!curl --remote-name \
-H 'Accept: application/vnd.github.v3.raw' \
--location https://raw.githubusercontent.com/DSC-UI-SRIN/Introduction-to-GAN/master/4%20-%20Applications%20of%20GANs/password/datasets.py
!curl --remote-name \
-H 'Accept: application/vnd.github.v3.raw' \
--location https://raw.githubusercontent.com/DSC-UI-SRIN/Introduction-to-GAN/master/4%20-%20Applications%20of%20GANs/password/utils.py
###Output
_____no_output_____
###Markdown
Dataset
###Code
import datasets
batch_size = 100
# Rockyou Dataset
train_dataset = datasets.Rockyou(root=ROOT, train=True, download=True, input_size=(10,0), tokenize=False)
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
examples = enumerate(train_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
###Output
_____no_output_____
###Markdown
Build Models
###Code
from torch import nn, functional
class ResBlock(nn.Module):
def __init__(self, dim, kernel_size=5):
super(ResBlock, self).__init__()
self.model = nn.Sequential(
nn.ReLU(),
nn.Conv1d(dim, dim, padding=kernel_size//2, kernel_size=kernel_size),
nn.ReLU(),
nn.Conv1d(dim, dim, padding=kernel_size//2, kernel_size=kernel_size)
)
def forward(self, input_data):
output = (self.model(input_data))
return input_data + output
###Output
_____no_output_____
###Markdown
Generator Model
###Code
class Generator(nn.Module):
def __init__(self, seq_len, layer_dim, z_dim, char_len):
super(Generator, self).__init__()
self.seq_len = seq_len
self.layer_dim = layer_dim
self.z_dim = z_dim
self.char_len = char_len
self.linear = nn.Linear(self.z_dim, self.seq_len*self.layer_dim)
self.res_blocks = nn.Sequential(
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
)
self.conv = nn.Conv1d(self.layer_dim, self.char_len, kernel_size=1)
def softmax(self, logits, num_classes):
logits = logits.reshape(-1, num_classes)
logits = logits.softmax(1)
return logits.reshape(-1, self.seq_len, self.char_len)
def forward(self, z_input):
output = self.linear(z_input)
output = output.view(-1, self.layer_dim, self.seq_len)
output = self.res_blocks(output)
output = self.conv(output)
output = output.permute([0, 2, 1])
output = self.softmax(output, self.char_len)
return output
###Output
_____no_output_____
###Markdown
Discriminator Model
###Code
class Discriminator(nn.Module):
def __init__(self, seq_len, layer_dim, char_len):
super(Discriminator, self).__init__()
self.seq_len = seq_len
self.layer_dim = layer_dim
self.char_len = char_len
self.conv = nn.Conv1d(self.char_len, self.layer_dim, kernel_size=1)
self.res_blocks = nn.Sequential(
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
ResBlock(self.layer_dim),
)
self.linear = nn.Linear(self.seq_len*self.layer_dim, 1)
def forward(self, input_data):
output = input_data.permute([0, 2, 1])
output = self.conv(output)
output = self.res_blocks(output)
output = output.view(-1, self.layer_dim*self.seq_len)
output = self.linear(output)
return output
###Output
_____no_output_____
###Markdown
Build network
###Code
# build network
z_dim = 128
seq_len = 10
layer_dim = 128
G = Generator(seq_len, layer_dim, z_dim, len(train_dataset.class_to_idx)).to(device)
D = Discriminator(seq_len, layer_dim, len(train_dataset.class_to_idx)).to(device)
print(G, D)
###Output
_____no_output_____
###Markdown
Train Process Gradient Penalty
###Code
def compute_gradient_penalty(D, real_data, fake_data):
# Random weight term for interpolation between real and fake samples
alpha = Tensor(
np.random.random((real_data.size(0), 1, 1)))
# Get random interpolation between real and fake samples
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
d_interpolates = D(interpolates.requires_grad_(True))
fake = Tensor(real_data.shape[0], 1).fill_(1.0)
# Get gradient w.r.t. interpolates
grads = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
grads = grads.reshape(grads.size(0), -1)
grad_penalty = ((grads.norm(2, dim=1) - 1) ** 2).mean()
return grad_penalty
# Loss weight for gradient penalty
lambda_gp = 10
# optimizer
lr = 1e-4
n_critic = 5
b1 = 0.5
b2 = 0.999
optimizer_G = torch.optim.Adam(G.parameters(), lr=lr, betas=(b1, b2))
optimizer_D = torch.optim.Adam(D.parameters(), lr=lr, betas=(b1, b2))
from torch.utils.tensorboard import SummaryWriter
logdir = './runs'
os.makedirs(logdir, exist_ok=True)
writer = SummaryWriter(logdir)
%load_ext tensorboard
%tensorboard --logdir runs/
def check_generated_data(samples, iters, tag="result"):
"""
this function used for check the result of generator network and save it to tensorboard
:param samples(dict): samples of input network
:param tag: save the output to tensorboard log wit tag
:param iters: global iteration counts for tensorboard logging
:return:
"""
G.eval()
with torch.no_grad():
inv_charmap = train_dataset.idx_to_class
samples = G(samples)
if torch.cuda.is_available():
samples = samples.cpu().numpy()
else:
samples = samples.numpy()
samples = np.argmax(samples, axis=2)
decoded_samples = []
for i in range(len(samples)):
decoded = []
for j in range(len(samples[i])):
decoded.append(inv_charmap[samples[i][j]])
decoded_samples.append("".join(decoded).replace('`', ""))
# print(", ".join(decoded_samples))
writer.add_text(tag, ", ".join(decoded_samples), iters)
epochs = 200
list_loss_D = []
list_loss_G = []
fixed_z = Variable(Tensor(np.random.normal(0, 1, (10, z_dim))))
for epoch in range(epochs):
for i, (X, _) in enumerate(train_loader):
# Configure input
real_data = Variable(X.type(Tensor))
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Sample noise as generator input
z = Variable(Tensor(np.random.normal(0, 1, (real_data.shape[0], z_dim))))
# Generate a batch of images
fake_data = G(z).detach()
# Gradient penalty
gradient_penalty = compute_gradient_penalty(D, real_data.data, fake_data.data)
# Adversarial loss
d_loss = -torch.mean(D(real_data)) + torch.mean(D(fake_data)) + lambda_gp * gradient_penalty
d_loss.backward()
optimizer_D.step()
# Train the generator every n_critic iterations
if i % n_critic == 0:
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
gen_data = G(z)
# Adversarial loss
g_loss = -torch.mean(D(gen_data))
g_loss.backward()
optimizer_G.step()
list_loss_D.append(d_loss.item())
list_loss_G.append(g_loss.item())
if i % 300 == 0:
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, epochs, i, len(train_loader), d_loss.item(), g_loss.item()))
writer.add_scalar('G_loss', g_loss.item(), epoch * len(train_loader) + i)
writer.add_scalar('D_loss', d_loss.item(), epoch * len(train_loader) + i)
if epoch % 5 == 0:
check_generated_data(fixed_z, tag="result_{}".format(epoch), iters=epoch * len(train_loader) + i)
###Output
_____no_output_____ |
BBY162_Arama.ipynb | ###Markdown
###Code
#Google Drive Bağlantısı
from google.colab import drive
drive.mount('/gdrive')
dosya = "/gdrive/My Drive/Colab Notebooks/BBY162 - Arama/kitap_listesi.txt"
okunanVeri = []
f = open(dosya, "r")
for line in f.readlines():
okunanVeri.append(line)
print(okunanVeri)
arama = input("Anahtar Kelime: ")
kayitNo = 0
for ara in okunanVeri:
if(arama in ara):
print(ara)
print(okunanVeri[kayitNo])
kayitNo +=1
else:
print("Kayıt Yok!")
f.close()
###Output
_____no_output_____ |
Exercise-Notebooks/.ipynb_checkpoints/E3-Prelab-checkpoint.ipynb | ###Markdown
E3 Prelab Q1-2 Part 1In Q1 you are asked to compare the titration data obtained from E2 to the model of a titration that you developed in the E2 prelab. You are then asked to modify some parameters in the model to best fit the experimental data. The model is an expression for the number of Ce(IV) added as function of potential. A comparison will require you to convert steps of the sringe pump to the number of Ce(IV) added. This first part shows an example of a similar conversion to grams of the Ce(IV)-containing solution added based off the 'Fine Titration Data.txt' uploaded to canvas. Import Packages
###Code
import os
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('JGW')
import pandas as pd
###Output
_____no_output_____
###Markdown
Import titration data Import titration data into a pandas dataframe. For the example dataset, the delimiters are commas and the first 4 rows contains details of the calibration that you will need to convert steps to number of Ce(IV) added. We skip those rows in the first step. The format of your titration data may differ.
###Code
data_path = r"C:\Users\jgage\OneDrive - Stanford\2021\CHEM 274\Labs\E3\Fine Titration Data.txt"
titration_data = pd.read_csv(data_path, skiprows = 4, sep = ',')
###Output
_____no_output_____
###Markdown
Read calibration dataHere we simply want to see the pump calibration values. In the example file the calibration data are in the first four rows of the file. Your own calibration data might be located elsewhere e.g. in your lab notebook.
###Code
# read first 4 lines of titration data which contains the calibration info
calibration_data = pd.read_csv(data_path, nrows = 4, header = None, sep = ':')
# create dictionary containing calibration information
calibration_dict = dict(zip(calibration_data[0], calibration_data[1]))
# calibration_dict
###Output
_____no_output_____
###Markdown
Convert steps to grams of solution addedUse the calibration data to convert the number of steps of Ce(IV) added in the titration to grams of the Ce(IV)-containing solution added. In the example file, 1 step correspoonds to 1.355/6000 grams of Ce(IV) solution
###Code
number_steps_in_calibration = 6000
mass_of_calibration_in_g = 1.355
# convert steps to grams of solution, then moles of Ce(IV)
titration_data['step in g sol'] = (titration_data['step'] / number_steps_in_calibration) * mass_of_calibration_in_g
titration_data['step in mol Ce(IV)'] = titration_data['step in g sol'] * (0.990 / 20.030) / 548.22
# titration_data
###Output
_____no_output_____
###Markdown
Plot dataAfter you finish the conversion of steps to number of Ce(IV) added, adjust the code below to plot your converted titration data.
###Code
# plot data
fig, ax = plt.subplots()
ax.errorbar(x = titration_data['step in mol Ce(IV)'] , y = titration_data['mean potential'],
yerr = titration_data['std dev'], fmt='o', ecolor='k', capsize=2)
ax.set_title('Cerium/Iron titration')
ax.set_xlabel('Ce(IV) added / mol')
ax.set_ylabel('Potential / V')
###Output
_____no_output_____
###Markdown
E3 Prelab Q1-2 Part 2In the second part of Q1, you will overlay the titration data with the derived titration curve (model) and estimate the redox potentials required for the model to match the experimental data. This is a visual way to estimate the parameters in the model. You are welcome to go further and use nonlinear fitting routines to further refine the parameters, but you do not need to do so. The example below shows how you can visually model a dataset. Here the data is compared to the function y = a sin(bx + c). Change a, b and c to visually match the model to the synthetic dataset.
###Code
def E_predict(n_Ce4, n_Fe2, E0_Fe, E0_Ce):
'''
Returns E_cell based on the manually derived solution
'''
R, T, F = 8.314, 298, 96485
a = F/(R*T)
argument = ((np.exp(E0_Ce * a)/2)*(n_Ce4/n_Fe2 -1 + np.sqrt((1 - n_Ce4/n_Fe2)**2 + 4*np.exp((E0_Fe - E0_Ce)*a))))
return np.log(argument) / a
# Data
x_data = titration_data['step in mol Ce(IV)']
y_data = titration_data['mean potential']
# Guess some values of n_Fe2, E0_Fe, E0_Ce,
guess_n_Fe2 = 1.140 * 10**-4
guess_E0_Fe = 0.47
guess_E0_Ce = 1.23
guesses = [guess_n_Fe2, guess_E0_Fe, guess_E0_Ce]
y_model = []
for step in range(len(x_data)):
n_Ce4 = x_data[step]
y_model.append(E_predict(n_Ce4, *guesses))
# Plot data overlaid with model
fig, ax = plt.subplots()
ax.errorbar(x = titration_data['step in mol Ce(IV)'] , y = titration_data['mean potential'],
yerr = titration_data['std dev'], fmt='o', ecolor='k', capsize=2, label='Data')
ax.set_title('Cerium/Iron titration')
ax.set_xlabel('Ce(IV) added / mol')
ax.set_ylabel('Potential / V')
ax.plot(x_data, y_model, label='Model')
ax.legend()
plt.show()
# Optimize using guesses
from scipy.optimize import curve_fit
guess_n_Fe2 = 1.70 * 10**-4
guess_E0_Fe = 0.48
guess_E0_Ce = 1.20
guesses = [guess_n_Fe2, guess_E0_Fe, guess_E0_Ce]
iterations = 10
for i in range(iterations):
y_model2, _ = curve_fit(E_predict, x_data, y_data, p0=guesses)
guesses = y_model2
fig, ax = plt.subplots()
ax.errorbar(x = titration_data['step in mol Ce(IV)'] , y = titration_data['mean potential'],
yerr = titration_data['std dev'], fmt='o', ecolor='k', capsize=2, label='Data')
ax.set_title('Cerium/Iron titration')
ax.set_xlabel('Ce(IV) added / mol')
ax.set_ylabel('Potential / V')
ax.plot(x_data, E_predict(x_data, *y_model2), label='Model')
ax.legend()
plt.show()
print(y_model2)
###Output
[1.69989237e-04 5.57926647e-01 1.21039553e+00]
###Markdown
Scipy's curve_fit, a non-linear routine, is not particularly good at this unless given fantastic guesses. Perhaps this is due to the functional form of E_predict.
###Code
# Now adding a constant to the model
def E_predict_plus_c(n_Ce4, n_Fe2, E0_Fe, E0_Ce, c):
'''
Returns E_cell
'''
R, T, F = 8.314, 298, 96485
a = F/(R*T)
disc = (np.exp(2*E0_Ce*a))*(1 - c - n_Ce4/n_Fe2)**2 + 4*(n_Ce4/n_Fe2)*np.exp((E0_Fe + E0_Ce)*a)
argument = (1/2) * ((-np.exp(E0_Ce * a)*(1 - n_Ce4/n_Fe2 -c) + np.sqrt(disc)))
return np.log(argument) / a
# Guess some values of n_Fe2, E0_Fe, E0_Ce, and c
guess_n_Fe2 = 2.25 * 10**-4
guess_E0_Fe = 0.47
guess_E0_Ce = 1.25
guess_c = 0.5
guesses = [guess_n_Fe2, guess_E0_Fe, guess_E0_Ce, guess_c]
y_model3 = []
for step in range(len(x_data)):
n_Ce4 = x_data[step]
y_model3.append(E_predict_plus_c(n_Ce4, *guesses))
# Plot data overlaid with model
fig, ax = plt.subplots()
ax.errorbar(x = titration_data['step in mol Ce(IV)'] , y = titration_data['mean potential'],
yerr = titration_data['std dev'], fmt='o', ecolor='k', capsize=2, label='Data')
ax.set_title('Cerium/Iron titration')
ax.set_xlabel('Ce(IV) added / mol')
ax.set_ylabel('Potential / V')
ax.plot(x_data, y_model3, label='Model')
ax.legend()
plt.show()
# Optimize using guesses
from scipy.optimize import curve_fit
guess_n_Fe2 = 2.25 * 10**-4
guess_E0_Fe = 0.47
guess_E0_Ce = 1.25
guess_c = 0.5
guesses = [guess_n_Fe2, guess_E0_Fe, guess_E0_Ce, guess_c]
iterations = 10
for i in range(iterations):
y_model4, _ = curve_fit(E_predict_plus_c, x_data, y_data, p0=guesses)
guesses = y_model4
fig, ax = plt.subplots()
ax.errorbar(x = titration_data['step in mol Ce(IV)'] , y = titration_data['mean potential'],
yerr = titration_data['std dev'], fmt='o', ecolor='k', capsize=2, label='Data')
ax.set_title('Cerium/Iron titration')
ax.set_xlabel('Ce(IV) added / mol')
ax.set_ylabel('Potential / V')
ax.plot(x_data, E_predict_plus_c(x_data, *y_model4), label='Model')
ax.legend()
plt.show()
###Output
C:\Users\jgage\anaconda3\lib\site-packages\pandas\core\arraylike.py:364: RuntimeWarning: divide by zero encountered in log
result = getattr(ufunc, method)(*inputs, **kwargs)
|
compiler/simulate_processor.ipynb | ###Markdown
A Computational Design of a Programmable Biological Processor - Description of the biological compiler The compiler accepts programs written as text files. Each line of a program is translated to a specific location in the memory, whereas first line is translated to the first address in the memory, second line to the second, etc. Multiple instructions can be given within the same line. In this case, instructions should be separated with semicolons (`;`).Each program is compiled into an ordinary differential equation model with the simulation capabilities, which can be analysed further on. How to use the biological compilerThe compiler is implemented in the Python module `generate_model.py`. You need to import the `generate_model` function from this module into a Python program.
###Code
from generate_model import generate_model
###Output
_____no_output_____
###Markdown
The `generate_model` function accepts the following arguments:* `program_name`: name of the `txt` file in which the program is stored,* `model_name`: name of the file in which the Python implementation of the model will be stored,* `n_bits`: number of flip-flops in the Johnson counter that is used for the addressing of the instruction memory (defines the maximal size of the program),* `prog_alpha`: maximal expression rate of proteins that are used as operands in the program,* `prog_delta`: degradation rate of proteins that are used as operands in the program,* `prog_n`: Hill coefficient defining the expression activation of proteins that are used as operands in the program,* `prog_Kd`: dissociation constant defining the expression activation of proteins that are used as operands in the program.An example of the function call is
###Code
n_bits = 4
generate_model("programs\\program_add.txt", "test_add", n_bits, 10, 0.1, 2, 10)
###Output
_____no_output_____
###Markdown
The upper call translates the program from the file `programs\program_add.txt` to the Python implementation of ordinary differential equation-based model named as `test_add.py`. This model can be imported using the `importlib` library.
###Code
import importlib
model = importlib.import_module("test_add")
###Output
_____no_output_____
###Markdown
The `generate_model` function also generates the list of operands that are used in the model. It stores this list in the file `model_name+"description.txt"`. You can read this file and display the operands used in the program.
###Code
f_description = open("test_add"+"description.txt")
ops = f_description.readline().strip().split(",")[:-1]
f_description.close()
ops
###Output
_____no_output_____
###Markdown
In order to simulate the dynamics of the obtained model, you still need to define the remaining parameter values used for a simulation. You can obtain the most of the parameter values with the optimization framework proposed in *Pušnik et al., 2019*.Some feasible values are stored in the file `selected_points.txt`
###Code
import numpy as np
points = np.loadtxt('selected_points.txt')
params = points[0]
###Output
_____no_output_____
###Markdown
The first 8 parameters define the dynamics of flip-flops and the remaining parameters define the dynamics of the addressing logic.
###Code
params_ff = list(params[:8])
params_addr = list(params[8:])
###Output
_____no_output_____
###Markdown
You still need to define the proteolysis parameters used for asynchronous set and reset D flip-flop inputs and dissociation constant used in conditional jumps.
###Code
delta_P= 250
K_M = 100
K_X = 0.1
params_proteolysis = [delta_P, K_M]
params_condition = [K_X]
###Output
_____no_output_____
###Markdown
You can now simulate the dynamics of the program with the `odeint` function from the `scipy` module `integrate`.
###Code
from scipy.integrate import odeint
T = np.linspace(0, 100, 1000)
Y0 = np.array([0]*(n_bits*6+len(ops)))
Y = odeint(model.model, Y0, T, args= (params_ff + params_proteolysis + params_addr + params_condition,))
###Output
_____no_output_____
###Markdown
Here `T` includes the timepoints in which results of a simulation will be sampled and `Y0` presents the initial state of the system. The simulation results are now stored in the matrix `Y`. The operands present the last (in our case three) columns of the matrix. You can analyse the results of your program's execution by plotting the operands' concentrations through time.
###Code
import matplotlib.pyplot as plt
for i,op in enumerate(ops):
plt.plot(T,Y[:,-(i+1)], label = op)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
A faster alternative to test your programs is to use the `simulate_program` function from the `simulate_program` module, which automatically generates the program, runs the simulation and displays the simulation results. First you need to import this function into your module.
###Code
from simulate_program import simulate_program
###Output
_____no_output_____
###Markdown
The `simulate_program` function accepts the following mandatory arguments:* `program_name`: name of the `txt` file in which the program is stored,* `t_end`: simulation duration (in hours),* `N`: number of samples,* `params_ff`: flip-flop parameters,* `params_addr`: addressing parameters,* `params_proteolysis`: proteolysis parameters,* `params_condition`: params used in conditional jumps,* `params_prog`: params used for the expression of operands,* `n_bits`: number of flip-flops in the Johnson counter that is used for the addressing of the instruction memory (defines the maximal size of the program).An example of the function call and its output is
###Code
params_prog = [10, 0.1, 2, 10]
simulate_program("programs\\program_add.txt",
100,
200,
params_ff,
params_addr,
params_proteolysis,
params_condition,
params_prog,
4)
###Output
_____no_output_____
###Markdown
Currently supported instruction set ```nop```Only used as a placeholder instruction. ```generate op1```Expression of operand with the name ```op1``` is triggered. ```add op1, op2, op3```Performs the addition of the operands ```op2``` and ```op3``` and stores the result to ```op1```:```op1<-op2+op3 ``` ```sub op1, op2, op3```Performs the subtraction of the operands ```op2``` and ```op3``` and stores the result to ```op1```:```op1<-op2-op3 ``` ```if condition instruction```Executes the ```instruction``` if the concentration of operand ```condition``` is high. Can be followed by multiple instructions separated by semicolons (```;```), e.g., ```if D generate A; generate B; add C,A,B```. ```do-while condition instruction```Instruction ```instruction``` is always executed in the first loop transition and is executed as long as the operand ```condition``` is high. ```while condition instruction```Instruction ```instruction``` is executed only if and as long as operand ```condition``` is high. ```halt```Halts the processor at current instruction memory location. ExamplesThe following examples present the application of the proposed compiler on simple biological programs. The compiler is used to translate these programs into ordinary differential equation models, which are than used to simulate the dynamics of the program in dependence on the given set of kinetic parameters. Initialization Imports
###Code
from simulate_program import simulate_program
import numpy as np
import seaborn as sns
sns.set_style("white")
###Output
_____no_output_____
###Markdown
Simulation parameters
###Code
t_end = 160 # duration in hours
N = 1000 # number of samples to display
###Output
_____no_output_____
###Markdown
Program parameters
###Code
n_bits = 4 # number of bits in the instruction memory
# parameters defining the expression of operands
prog_alpha = 10
prog_delta = 0.1#0.01
prog_n = 2
prog_Kd = 10
params_prog = prog_alpha, prog_delta, prog_n, prog_Kd
# proteolysis and induction of protease (conditional jumps)
delta_P= 250
K_M = 100
K_X = 0.1
###Output
_____no_output_____
###Markdown
Processor parametersUse the parameter values that were obtained with the optimization framework proposed in *Pušnik et al., 2019*.
###Code
points = np.loadtxt('selected_points.txt')
params = points[0]
params_ff = list(params[:8])
params_addr = list(params[8:])
params_proteolysis = [delta_P, K_M]
params_condition = [K_X]
###Output
_____no_output_____
###Markdown
Examples of simple programsFew examples are given to show how to compile a program stored in a text file and produce a model that can be simulated with the parameters defined above. Addition```generate Agenerate Badd C, A, B```
###Code
program_name = "programs\\program_add.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Subtraction```generate Agenerate Bsub C, A, B```
###Code
program_name = "programs\\program_sub.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Subtraction 2```generate Asub C, A, B```
###Code
program_name = "programs\\program_sub_one.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Addition and halt```generate Agenerate Badd C, A, Bhalt```
###Code
program_name = "programs\\program_add_halt.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Multiple additions, subtraction and haltThis example demonstrates the execution of multiple instruction in the same clock period.```generate A; generate B; add E, A, Badd C, A, B; sub D, A, Bhalt```
###Code
program_name = "programs\\program_add_multi.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
If-thenCondition is fulfilled```generate Agenerate Bif B add C, A, B```
###Code
program_name = "programs\\program_if.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Condition is not fulfilled```generate Aif B add C, A, B```
###Code
program_name = "programs\\program_if_false.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
While```generate Awhile A generate C```
###Code
program_name = "programs\\program_while.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
A Computational Design of a Programmable Biological Processor - Description of the biological compiler The compiler accepts programs written as text files. Each line of a program is translated to a specific location in the memory, whereas first line is translated to the first address in the memory, second line to the second, etc. Multiple instructions can be given within the same line. In this case, instructions should be separated with semicolons (`;`).Each program is compiled into an ordinary differential equation model with the simulation capabilities, which can be analysed further on. How to use the biological compilerThe compiler is implemented in the Python module `generate_model.py`. You need to import the `generate_model` function from this module into a Python program.
###Code
from generate_model import generate_model
###Output
_____no_output_____
###Markdown
The `generate_model` function accepts the following arguments:* `program_name`: name of the `txt` file in which the program is stored,* `model_name`: name of the file in which the Python implementation of the model will be stored,* `n_bits`: number of flip-flops in the Johnson counter that is used for the addressing of the instruction memory (defines the maximal size of the program),* `prog_alpha`: maximal expression rate of proteins that are used as operands in the program,* `prog_delta`: degradation rate of proteins that are used as operands in the program,* `prog_n`: Hill coefficient defining the expression activation of proteins that are used as operands in the program,* `prog_Kd`: dissociation constant defining the expression activation of proteins that are used as operands in the program.An example of the function call is
###Code
n_bits = 4
generate_model("programs\\program_add.txt", "test_add", n_bits, 10, 0.1, 2, 10)
###Output
_____no_output_____
###Markdown
The upper call translates the program from the file `programs\program_add.txt` to the Python implementation of ordinary differential equation-based model named as `test_add.py`. This model can be imported using the `importlib` library.
###Code
import importlib
model = importlib.import_module("test_add")
###Output
_____no_output_____
###Markdown
The `generate_model` function also generates the list of operands that are used in the model. It stores this list in the file `model_name+"description.txt"`. You can read this file and display the operands used in the program.
###Code
f_description = open("test_add"+"description.txt")
ops = f_description.readline().strip().split(",")[:-1]
f_description.close()
ops
###Output
_____no_output_____
###Markdown
In order to simulate the dynamics of the obtained model, you still need to define the remaining parameter values used for a simulation. You can obtain the most of the parameter values with the optimization framework proposed in *Pušnik et al., 2019*.Some feasible values are stored in the file `selected_points.txt`
###Code
import numpy as np
points = np.loadtxt('selected_points.txt')
params = points[0]
###Output
_____no_output_____
###Markdown
The first 8 parameters define the dynamics of flip-flops and the remaining parameters define the dynamics of the addressing logic.
###Code
params_ff = list(params[:8])
params_addr = list(params[8:])
###Output
_____no_output_____
###Markdown
You still need to define the proteolysis parameters used for asynchronous set and reset D flip-flop inputs and dissociation constant used in conditional jumps.
###Code
delta_P= 250
K_M = 100
K_X = 0.1
params_proteolysis = [delta_P, K_M]
params_condition = [K_X]
###Output
_____no_output_____
###Markdown
You can now simulate the dynamics of the program with the `odeint` function from the `scipy` module `integrate`.
###Code
from scipy.integrate import odeint
T = np.linspace(0, 100, 1000)
Y0 = np.array([0]*(n_bits*6+len(ops)))
Y = odeint(model.model, Y0, T, args= (params_ff + params_proteolysis + params_addr + params_condition,))
###Output
_____no_output_____
###Markdown
Here `T` includes the timepoints in which results of a simulation will be sampled and `Y0` presents the initial state of the system. The simulation results are now stored in the matrix `Y`. The operands present the last (in our case three) columns of the matrix. You can analyse the results of your program's execution by plotting the operands' concentrations through time.
###Code
import matplotlib.pyplot as plt
for i,op in enumerate(ops):
plt.plot(T,Y[:,-(i+1)], label = op)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
A faster alternative to test your programs is to use the `simulate_program` function from the `simulate_program` module, which automatically generates the program, runs the simulation and displays the simulation results. First you need to import this function into your module.
###Code
from simulate_program import simulate_program
###Output
_____no_output_____
###Markdown
The `simulate_program` function accepts the following mandatory arguments:* `program_name`: name of the `txt` file in which the program is stored,* `t_end`: simulation duration (in hours),* `N`: number of samples,* `params_ff`: flip-flop parameters,* `params_addr`: addressing parameters,* `params_proteolysis`: proteolysis parameters,* `params_condition`: params used in conditional jumps,* `params_prog`: params used for the expression of operands,* `n_bits`: number of flip-flops in the Johnson counter that is used for the addressing of the instruction memory (defines the maximal size of the program).An example of the function call and its output is
###Code
params_prog = [10, 0.1, 2, 10]
simulate_program("programs\\program_add.txt",
100,
200,
params_ff,
params_addr,
params_proteolysis,
params_condition,
params_prog,
4)
###Output
_____no_output_____
###Markdown
Currently supported instruction set ```nop```Only used as a placeholder instruction. ```generate op1```Expression of operand with the name ```op1``` is triggered. ```add op1, op2, op3```Performs the addition of the operands ```op2``` and ```op3``` and stores the result to ```op1```:```op1<-op2+op3 ``` ```sub op1, op2, op3```Performs the subtraction of the operands ```op2``` and ```op3``` and stores the result to ```op1```:```op1<-op2-op3 ``` ```if condition instruction```Executes the ```instruction``` if the concentration of operand ```condition``` is high. Can be followed by multiple instructions separated by semicolons (```;```), e.g., ```if D generate A; generate B; add C,A,B```. ```do-while condition instruction```Instruction ```instruction``` is always executed in the first loop transition and is executed as long as the operand ```condition``` is high. ```while condition instruction```Instruction ```instruction``` is executed only if and as long as operand ```condition``` is high. ```halt```Halts the processor at current instruction memory location. ExamplesThe following examples present the application of the proposed compiler on simple biological programs. The compiler is used to translate these programs into ordinary differential equation models, which are than used to simulate the dynamics of the program in dependence on the given set of kinetic parameters. Initialization Imports
###Code
from simulate_program import simulate_program
import numpy as np
import seaborn as sns
sns.set_style("white")
###Output
_____no_output_____
###Markdown
Simulation parameters
###Code
t_end = 160 # duration in hours
N = 1000 # number of samples to display
###Output
_____no_output_____
###Markdown
Program parameters
###Code
n_bits = 4 # number of bits in the instruction memory
# parameters defining the expression of operands
prog_alpha = 10
prog_delta = 0.1#0.01
prog_n = 2
prog_Kd = 10
params_prog = prog_alpha, prog_delta, prog_n, prog_Kd
# proteolysis and induction of protease (conditional jumps)
delta_P= 250
K_M = 100
K_X = 0.1
###Output
_____no_output_____
###Markdown
Processor parametersUse the parameter values that were obtained with the optimization framework proposed in *Pušnik et al., 2019*.
###Code
points = np.loadtxt('selected_points.txt')
params = points[0]
params_ff = list(params[:8])
params_addr = list(params[8:])
params_proteolysis = [delta_P, K_M]
params_condition = [K_X]
###Output
_____no_output_____
###Markdown
Examples of simple programsFew examples are given to show how to compile a program stored in a text file and produce a model that can be simulated with the parameters defined above. Addition```generate Agenerate Badd C, A, B```
###Code
program_name = "programs\\program_add.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Subtraction```generate Agenerate Bsub C, A, B```
###Code
program_name = "programs\\program_sub.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Subtraction 2```generate Asub C, A, B```
###Code
program_name = "programs\\program_sub_one.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Addition and halt```generate Agenerate Badd C, A, Bhalt```
###Code
program_name = "programs\\program_add_halt.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Multiple additions, subtraction and haltThis example demonstrates the execution of multiple instruction in the same clock period.```generate A; generate B; add E, A, Badd C, A, B; sub D, A, Bhalt```
###Code
program_name = "programs\\program_add_multi.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
If-thenCondition is fulfilled```generate Agenerate Bif B add C, A, B```
###Code
program_name = "programs\\program_if.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Condition is not fulfilled```generate Aif B add C, A, B```
###Code
program_name = "programs\\program_if_false.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
While```generate Awhile A generate C```
###Code
program_name = "programs\\program_while.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
If-then-elseCondition is fulfilled```generate Aif A generate B else generate C```
###Code
program_name = "programs\\program_if_else_true.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____
###Markdown
Condition is not fulfilled```if A generate B else generate C```
###Code
program_name = "programs\\program_if_else_false.txt"
simulate_program(program_name, t_end, N, params_ff, params_addr, params_proteolysis, params_condition, params_prog, n_bits)
###Output
_____no_output_____ |
Visualization-for-Company-Stakeholders/code.ipynb | ###Markdown
Visualization for Company Stakeholders Importing header files
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Reading the file
###Code
path = 'loan_prediction.csv'
data = pd.read_csv(path)
###Output
_____no_output_____
###Markdown
Step 1 Let's start with the simple task of visualizing the company's record with respect to loan approvals.
###Code
loan_status = data['Loan_Status'].value_counts()
loan_status.plot(kind='bar')
plt.show()
###Output
_____no_output_____
###Markdown
Step 2 The company provides financial assistance across the different regions of the country. One interesting statistic that stakeholders want to see is the loan approval distribution across the regions.
###Code
property_and_loan = data.groupby(['Property_Area', 'Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar', stacked=False)
plt.xlabel('Property Area')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
###Output
_____no_output_____
###Markdown
Step 4 Higher education has always been an expensive endeavour for people but it results in better career opportunities and stability in life. But does higher education result in a better guarantee in issuing loans?
###Code
education_and_loan = data.groupby(['Education', 'Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar', stacked=True)
plt.xlabel('Education Status')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
###Output
_____no_output_____
###Markdown
Step 4 After seeing the loan status distribution, let's check whether being graduate or not also leads to different loan amount distribution by plotting an overlapping density plot of two values
###Code
graduate = data[data['Education'] == 'Graduate']
not_graduate = data[data['Education'] == 'Not Graduate']
graduate['LoanAmount'].plot(kind='density', label='Graduate')
not_graduate['LoanAmount'].plot(kind='density', label='Not Graduate')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Step 5 For any financial institution to be successful in its loan lending system, there has to be a correlation between the borrower's income and loan amount he is lent. Let's see how our company fares in that respect
###Code
fig ,(ax_1,ax_2,ax_3) = plt.subplots(nrows = 3 , ncols = 1, figsize=(20, 20))
ax_1.scatter(data['ApplicantIncome'], data['LoanAmount'])
ax_1.set_title('Applicant Income')
ax_2.scatter(data['CoapplicantIncome'], data['LoanAmount'])
ax_2.set_title('Coapplicant Income')
data['TotalIncome'] = data['ApplicantIncome'] + data['CoapplicantIncome']
ax_3.scatter(data['TotalIncome'], data['LoanAmount'])
ax_3.set_title('Total Income')
plt.show()
###Output
_____no_output_____ |
plots_tutorial/Read_and_Plot.ipynb | ###Markdown
Reading data with headerThere are at least 4 ways of reading data with header (4 ways that I've seen).I will show one way using the `numpy` library. We will use the function:```np.genfromtxt('file.format', names=True)```where `file.format` is a string with the filename and format of the file we want to use. This fucntion can work with just the file argument, but with you want to read the header and access them, you need the `names=True` argument. There are other usefull arguments: delimeter=',' -> set the delimiter between values comments='' -> the default comment is the one python uses. This is usefull to skip commentaries on file skip_header=0 -> set the line where the header is and skip it and some more...*full documentation in [here](https://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.htmlnumpy.genfromtxt)* Reading a file
###Code
data = np.genfromtxt('OGLE-LMC-CEP-0018.dat', names=True)
###Output
_____no_output_____
###Markdown
Print its header
###Code
#print(data)
print(data.dtype.names)
#print(data['Mag_I'])
###Output
('HJD', 'Mag_I', 'mag_err')
###Markdown
Plot the data
###Code
plt.plot(data['HJD'], data['Mag_I'], 'o')
plt.show()
#plt.errorbar(data['HJD'], data['Mag_I'], yerr=data['mag_err'], linestyle='', marker='o')
#plt.show()
###Output
_____no_output_____
###Markdown
But this is a Cepheid variable with period p=4.0478526
###Code
period = 4.0478526
# Rephase the data
rephase = (data['HJD'] / period ) % 1 # the '% 1' extract the float part of the division
# Plot
plt.plot(rephase, data['Mag_I'], 'bo')
plt.plot(rephase + 1, data['Mag_I'], 'bo')
###Output
_____no_output_____ |
notebooks/Comparing-TF-and-PT-models-SQuAD.ipynb | ###Markdown
Comparing TensorFlow (original) and PyTorch model on the SQuAD taskYou can use this small notebook to check the loss computation from the TensorFlow model to the PyTorch model. In the following, we compare the total loss computed by the models starting from identical initializations (position prediction linear layers with weights at 1 and bias at 0).To run this notebook, follow these instructions:- make sure that your Python environment has both TensorFlow and PyTorch installed,- download the original TensorFlow implementation,- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code.
###Code
import os
os.chdir('../')
###Output
_____no_output_____
###Markdown
1/ TensorFlow code
###Code
original_tf_inplem_dir = "./tensorflow_code/"
model_dir = "../google_models/uncased_L-12_H-768_A-12/"
vocab_file = model_dir + "vocab.txt"
bert_config_file = model_dir + "bert_config.json"
init_checkpoint = model_dir + "bert_model.ckpt"
input_file = "../data/squad_data/train-v1.1.json"
max_seq_length = 384
outside_pos = max_seq_length + 10
doc_stride = 128
max_query_length = 64
max_answer_length = 30
output_dir = "/tmp/squad_base/"
learning_rate = 3e-5
import importlib.util
import sys
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/modeling.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['modeling_tensorflow'] = module
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_squad.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['run_squad_tensorflow'] = module
import modeling_tensorflow
from run_squad_tensorflow import *
bert_config = modeling_tensorflow.BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.BertTokenizer(
vocab_file=vocab_file, do_lower_case=True)
eval_examples = read_squad_examples(
input_file=input_file, is_training=True, max_num=16)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True)
# You can use that to test the behavior of the models when target are outside of the model input sequence
# for feature in eval_features:
# feature.start_position = outside_pos
# feature.end_position = outside_pos
eval_unique_id_to_feature = {}
for eval_feature in eval_features:
eval_unique_id_to_feature[eval_feature.unique_id] = eval_feature
def input_fn_builder(features, seq_length, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_start_positions = []
all_end_positions = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_start_positions.append(feature.start_position)
all_end_positions.append(feature.end_position)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
feature_map = {
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"start_positions":
tf.constant(
all_start_positions,
shape=[num_examples],
dtype=tf.int32),
"end_positions":
tf.constant(
all_end_positions,
shape=[num_examples],
dtype=tf.int32),
}
d = tf.data.Dataset.from_tensor_slices(feature_map)
d = d.repeat()
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
batch_size = modeling_tensorflow.get_shape_list(start_logits)[0]
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
"total_loss": tf.reshape(total_loss, [batch_size, 1]),
"start_loss": tf.reshape(start_loss, [batch_size, 1]),
"end_loss": tf.reshape(end_loss, [batch_size, 1]),
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=None,
num_warmup_steps=None,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=12,
predict_batch_size=1)
predict_input_fn = input_fn_builder(
features=eval_features,
seq_length=max_seq_length,
drop_remainder=True)
tensorflow_all_out = []
tensorflow_all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
eval_feature = eval_unique_id_to_feature[unique_id]
start_logits = result["start_logits"]
end_logits = result["end_logits"]
total_loss = result["total_loss"]
start_loss = result["start_loss"]
end_loss = result["end_loss"]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["start_logits"] = [round(float(x), 6) for x in start_logits.flat]
output_json["end_logits"] = [round(float(x), 6) for x in end_logits.flat]
output_json["total_loss"] = [round(float(x), 6) for x in total_loss.flat]
output_json["start_loss"] = [round(float(x), 6) for x in start_loss.flat]
output_json["end_loss"] = [round(float(x), 6) for x in end_loss.flat]
tensorflow_all_out.append(output_json)
tensorflow_all_results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
break
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case):
"""Compute final predictions."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
all_predictions, all_nbest_json = compute_predictions(eval_examples[:1], eval_features[:1], tensorflow_all_results, 20, max_answer_length, True)
all_nbest_json
print(len(tensorflow_all_out))
print(len(tensorflow_all_out[0]))
print(tensorflow_all_out[0].keys())
print("number of tokens", len(tensorflow_all_out[0]['tokens']))
print("number of start_logits", len(tensorflow_all_out[0]['start_logits']))
print("shape of end_logits", len(tensorflow_all_out[0]['end_logits']))
tensorflow_outputs = [tensorflow_all_out[0]['start_logits'], tensorflow_all_out[0]['end_logits'],
tensorflow_all_out[0]['total_loss'], tensorflow_all_out[0]['start_loss'],
tensorflow_all_out[0]['end_loss']]
###Output
_____no_output_____
###Markdown
2/ PyTorch code
###Code
import modeling
from run_squad import *
init_checkpoint_pt = "../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin"
device = torch.device("cpu")
model = modeling.BertForQuestionAnswering(bert_config)
model.bert.load_state_dict(torch.load(init_checkpoint_pt, map_location='cpu'))
model.to(device)
model.qa_outputs.weight.data.fill_(1.0)
model.qa_outputs.bias.data.zero_()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_start_positions = torch.tensor([[f.start_position] for f in eval_features], dtype=torch.long)
all_end_positions = torch.tensor([[f.end_position] for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
None
batch = iter(eval_dataloader).next()
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
print([t.shape for t in batch])
start_positions.size()
pytorch_all_out = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
start_positions = start_positions.to(device)
end_positions = end_positions.to(device)
total_loss, (start_logits, end_logits) = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
eval_feature = eval_features[example_index.item()]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["total_loss"] = total_loss.detach().cpu().numpy()
output_json["start_logits"] = start_logits.detach().cpu().numpy()
output_json["end_logits"] = end_logits.detach().cpu().numpy()
pytorch_all_out.append(output_json)
break
print(len(pytorch_all_out))
print(len(pytorch_all_out[0]))
print(pytorch_all_out[0].keys())
print("number of tokens", len(pytorch_all_out[0]['tokens']))
print("number of start_logits", len(pytorch_all_out[0]['start_logits']))
print("number of end_logits", len(pytorch_all_out[0]['end_logits']))
pytorch_outputs = [pytorch_all_out[0]['start_logits'], pytorch_all_out[0]['end_logits'], pytorch_all_out[0]['total_loss']]
###Output
_____no_output_____
###Markdown
3/ Comparing the standard deviation of start_logits, end_logits and loss of both models
###Code
import numpy as np
print('shape tensorflow layer, shape pytorch layer, standard deviation')
print('\n'.join(list(str((np.array(tensorflow_outputs[i]).shape,
np.array(pytorch_outputs[i]).shape,
np.sqrt(np.mean((np.array(tensorflow_outputs[i]) - np.array(pytorch_outputs[i]))**2.0)))) for i in range(3))))
print("Total loss of the TF model {} - Total loss of the PT model {}".format(tensorflow_outputs[2][0], pytorch_outputs[2]))
###Output
Total loss of the TF model 9.06024 - Total loss of the PT model 9.0602445602417
###Markdown
Comparing TensorFlow (original) and PyTorch model on the SQuAD taskYou can use this small notebook to check the loss computation from the TensorFlow model to the PyTorch model. In the following, we compare the total loss computed by the models starting from identical initializations (position prediction linear layers with weights at 1 and bias at 0).To run this notebook, follow these instructions:- make sure that your Python environment has both TensorFlow and PyTorch installed,- download the original TensorFlow implementation,- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code.
###Code
import os
os.chdir('../')
###Output
_____no_output_____
###Markdown
1/ TensorFlow code
###Code
original_tf_inplem_dir = "./tensorflow_code/"
model_dir = "../google_models/uncased_L-12_H-768_A-12/"
vocab_file = model_dir + "vocab.txt"
bert_config_file = model_dir + "bert_config.json"
init_checkpoint = model_dir + "bert_model.ckpt"
input_file = "../data/squad_data/train-v1.1.json"
max_seq_length = 384
outside_pos = max_seq_length + 10
doc_stride = 128
max_query_length = 64
max_answer_length = 30
output_dir = "/tmp/squad_base/"
learning_rate = 3e-5
import importlib.util
import sys
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/modeling.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['modeling_tensorflow'] = module
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_squad.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['run_squad_tensorflow'] = module
import modeling_tensorflow
from run_squad_tensorflow import *
bert_config = modeling_tensorflow.BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.BertTokenizer(
vocab_file=vocab_file, do_lower_case=True)
eval_examples = read_squad_examples(
input_file=input_file, is_training=True, max_num=16)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True)
# You can use that to test the behavior of the models when target are outside of the model input sequence
# for feature in eval_features:
# feature.start_position = outside_pos
# feature.end_position = outside_pos
eval_unique_id_to_feature = {}
for eval_feature in eval_features:
eval_unique_id_to_feature[eval_feature.unique_id] = eval_feature
def input_fn_builder(features, seq_length, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_start_positions = []
all_end_positions = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_start_positions.append(feature.start_position)
all_end_positions.append(feature.end_position)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
feature_map = {
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"start_positions":
tf.constant(
all_start_positions,
shape=[num_examples],
dtype=tf.int32),
"end_positions":
tf.constant(
all_end_positions,
shape=[num_examples],
dtype=tf.int32),
}
d = tf.data.Dataset.from_tensor_slices(feature_map)
d = d.repeat()
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
batch_size = modeling_tensorflow.get_shape_list(start_logits)[0]
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
"total_loss": tf.reshape(total_loss, [batch_size, 1]),
"start_loss": tf.reshape(start_loss, [batch_size, 1]),
"end_loss": tf.reshape(end_loss, [batch_size, 1]),
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=None,
num_warmup_steps=None,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=12,
predict_batch_size=1)
predict_input_fn = input_fn_builder(
features=eval_features,
seq_length=max_seq_length,
drop_remainder=True)
tensorflow_all_out = []
tensorflow_all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
eval_feature = eval_unique_id_to_feature[unique_id]
start_logits = result["start_logits"]
end_logits = result["end_logits"]
total_loss = result["total_loss"]
start_loss = result["start_loss"]
end_loss = result["end_loss"]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["start_logits"] = [round(float(x), 6) for x in start_logits.flat]
output_json["end_logits"] = [round(float(x), 6) for x in end_logits.flat]
output_json["total_loss"] = [round(float(x), 6) for x in total_loss.flat]
output_json["start_loss"] = [round(float(x), 6) for x in start_loss.flat]
output_json["end_loss"] = [round(float(x), 6) for x in end_loss.flat]
tensorflow_all_out.append(output_json)
tensorflow_all_results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
break
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case):
"""Compute final predictions."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
all_predictions, all_nbest_json = compute_predictions(eval_examples[:1], eval_features[:1], tensorflow_all_results, 20, max_answer_length, True)
all_nbest_json
print(len(tensorflow_all_out))
print(len(tensorflow_all_out[0]))
print(tensorflow_all_out[0].keys())
print("number of tokens", len(tensorflow_all_out[0]['tokens']))
print("number of start_logits", len(tensorflow_all_out[0]['start_logits']))
print("shape of end_logits", len(tensorflow_all_out[0]['end_logits']))
tensorflow_outputs = [tensorflow_all_out[0]['start_logits'], tensorflow_all_out[0]['end_logits'],
tensorflow_all_out[0]['total_loss'], tensorflow_all_out[0]['start_loss'],
tensorflow_all_out[0]['end_loss']]
###Output
_____no_output_____
###Markdown
2/ PyTorch code
###Code
import modeling
from run_squad import *
init_checkpoint_pt = "../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin"
device = torch.device("cpu")
model = modeling.BertForQuestionAnswering(bert_config)
model.bert.load_state_dict(torch.load(init_checkpoint_pt, map_location='cpu'))
model.to(device)
model.qa_outputs.weight.data.fill_(1.0)
model.qa_outputs.bias.data.zero_()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_start_positions = torch.tensor([[f.start_position] for f in eval_features], dtype=torch.long)
all_end_positions = torch.tensor([[f.end_position] for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
None
batch = iter(eval_dataloader).next()
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
print([t.shape for t in batch])
start_positions.size()
pytorch_all_out = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
start_positions = start_positions.to(device)
end_positions = end_positions.to(device)
total_loss, (start_logits, end_logits) = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
eval_feature = eval_features[example_index.item()]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["total_loss"] = total_loss.detach().cpu().numpy()
output_json["start_logits"] = start_logits.detach().cpu().numpy()
output_json["end_logits"] = end_logits.detach().cpu().numpy()
pytorch_all_out.append(output_json)
break
print(len(pytorch_all_out))
print(len(pytorch_all_out[0]))
print(pytorch_all_out[0].keys())
print("number of tokens", len(pytorch_all_out[0]['tokens']))
print("number of start_logits", len(pytorch_all_out[0]['start_logits']))
print("number of end_logits", len(pytorch_all_out[0]['end_logits']))
pytorch_outputs = [pytorch_all_out[0]['start_logits'], pytorch_all_out[0]['end_logits'], pytorch_all_out[0]['total_loss']]
###Output
_____no_output_____
###Markdown
3/ Comparing the standard deviation of start_logits, end_logits and loss of both models
###Code
import numpy as np
print('shape tensorflow layer, shape pytorch layer, standard deviation')
print('\n'.join(list(str((np.array(tensorflow_outputs[i]).shape,
np.array(pytorch_outputs[i]).shape,
np.sqrt(np.mean((np.array(tensorflow_outputs[i]) - np.array(pytorch_outputs[i]))**2.0)))) for i in range(3))))
print("Total loss of the TF model {} - Total loss of the PT model {}".format(tensorflow_outputs[2][0], pytorch_outputs[2]))
###Output
Total loss of the TF model 9.06024 - Total loss of the PT model 9.0602445602417
###Markdown
Comparing TensorFlow (original) and PyTorch model on the SQuAD taskYou can use this small notebook to check the loss computation from the TensorFlow model to the PyTorch model. In the following, we compare the total loss computed by the models starting from identical initializations (position prediction linear layers with weights at 1 and bias at 0).To run this notebook, follow these instructions:- make sure that your Python environment has both TensorFlow and PyTorch installed,- download the original TensorFlow implementation,- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code.
###Code
import os
os.chdir('../')
###Output
_____no_output_____
###Markdown
1/ TensorFlow code
###Code
original_tf_inplem_dir = "./tensorflow_code/"
model_dir = "../google_models/uncased_L-12_H-768_A-12/"
vocab_file = model_dir + "vocab.txt"
bert_config_file = model_dir + "bert_config.json"
init_checkpoint = model_dir + "bert_model.ckpt"
input_file = "../data/squad_data/train-v1.1.json"
max_seq_length = 384
outside_pos = max_seq_length + 10
doc_stride = 128
max_query_length = 64
max_answer_length = 30
output_dir = "/tmp/squad_base/"
learning_rate = 3e-5
import importlib.util
import sys
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/modeling.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['modeling_tensorflow'] = module
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_bert_squad.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['run_squad_tensorflow'] = module
import modeling_tensorflow
from run_squad_tensorflow import *
bert_config = modeling_tensorflow.BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.BertTokenizer(
vocab_file=vocab_file, do_lower_case=True)
eval_examples = read_squad_examples(
input_file=input_file, is_training=True, max_num=16)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True)
# You can use that to test the behavior of the models when target are outside of the model input sequence
# for feature in eval_features:
# feature.start_position = outside_pos
# feature.end_position = outside_pos
eval_unique_id_to_feature = {}
for eval_feature in eval_features:
eval_unique_id_to_feature[eval_feature.unique_id] = eval_feature
def input_fn_builder(features, seq_length, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_start_positions = []
all_end_positions = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_start_positions.append(feature.start_position)
all_end_positions.append(feature.end_position)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
feature_map = {
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"start_positions":
tf.constant(
all_start_positions,
shape=[num_examples],
dtype=tf.int32),
"end_positions":
tf.constant(
all_end_positions,
shape=[num_examples],
dtype=tf.int32),
}
d = tf.data.Dataset.from_tensor_slices(feature_map)
d = d.repeat()
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
batch_size = modeling_tensorflow.get_shape_list(start_logits)[0]
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
"total_loss": tf.reshape(total_loss, [batch_size, 1]),
"start_loss": tf.reshape(start_loss, [batch_size, 1]),
"end_loss": tf.reshape(end_loss, [batch_size, 1]),
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=None,
num_warmup_steps=None,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=12,
predict_batch_size=1)
predict_input_fn = input_fn_builder(
features=eval_features,
seq_length=max_seq_length,
drop_remainder=True)
tensorflow_all_out = []
tensorflow_all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
eval_feature = eval_unique_id_to_feature[unique_id]
start_logits = result["start_logits"]
end_logits = result["end_logits"]
total_loss = result["total_loss"]
start_loss = result["start_loss"]
end_loss = result["end_loss"]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["start_logits"] = [round(float(x), 6) for x in start_logits.flat]
output_json["end_logits"] = [round(float(x), 6) for x in end_logits.flat]
output_json["total_loss"] = [round(float(x), 6) for x in total_loss.flat]
output_json["start_loss"] = [round(float(x), 6) for x in start_loss.flat]
output_json["end_loss"] = [round(float(x), 6) for x in end_loss.flat]
tensorflow_all_out.append(output_json)
tensorflow_all_results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
break
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case):
"""Compute final predictions."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
all_predictions, all_nbest_json = compute_predictions(eval_examples[:1], eval_features[:1], tensorflow_all_results, 20, max_answer_length, True)
all_nbest_json
print(len(tensorflow_all_out))
print(len(tensorflow_all_out[0]))
print(tensorflow_all_out[0].keys())
print("number of tokens", len(tensorflow_all_out[0]['tokens']))
print("number of start_logits", len(tensorflow_all_out[0]['start_logits']))
print("shape of end_logits", len(tensorflow_all_out[0]['end_logits']))
tensorflow_outputs = [tensorflow_all_out[0]['start_logits'], tensorflow_all_out[0]['end_logits'],
tensorflow_all_out[0]['total_loss'], tensorflow_all_out[0]['start_loss'],
tensorflow_all_out[0]['end_loss']]
###Output
_____no_output_____
###Markdown
2/ PyTorch code
###Code
import modeling
from run_squad import *
init_checkpoint_pt = "../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin"
device = torch.device("cpu")
model = modeling.BertForQuestionAnswering(bert_config)
model.bert.load_state_dict(torch.load(init_checkpoint_pt, map_location='cpu'))
model.to(device)
model.qa_outputs.weight.data.fill_(1.0)
model.qa_outputs.bias.data.zero_()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_start_positions = torch.tensor([[f.start_position] for f in eval_features], dtype=torch.long)
all_end_positions = torch.tensor([[f.end_position] for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
None
batch = iter(eval_dataloader).next()
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
print([t.shape for t in batch])
start_positions.size()
pytorch_all_out = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
start_positions = start_positions.to(device)
end_positions = end_positions.to(device)
total_loss, (start_logits, end_logits) = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
eval_feature = eval_features[example_index.item()]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["total_loss"] = total_loss.detach().cpu().numpy()
output_json["start_logits"] = start_logits.detach().cpu().numpy()
output_json["end_logits"] = end_logits.detach().cpu().numpy()
pytorch_all_out.append(output_json)
break
print(len(pytorch_all_out))
print(len(pytorch_all_out[0]))
print(pytorch_all_out[0].keys())
print("number of tokens", len(pytorch_all_out[0]['tokens']))
print("number of start_logits", len(pytorch_all_out[0]['start_logits']))
print("number of end_logits", len(pytorch_all_out[0]['end_logits']))
pytorch_outputs = [pytorch_all_out[0]['start_logits'], pytorch_all_out[0]['end_logits'], pytorch_all_out[0]['total_loss']]
###Output
_____no_output_____
###Markdown
3/ Comparing the standard deviation of start_logits, end_logits and loss of both models
###Code
import numpy as np
print('shape tensorflow layer, shape pytorch layer, standard deviation')
print('\n'.join(list(str((np.array(tensorflow_outputs[i]).shape,
np.array(pytorch_outputs[i]).shape,
np.sqrt(np.mean((np.array(tensorflow_outputs[i]) - np.array(pytorch_outputs[i]))**2.0)))) for i in range(3))))
print("Total loss of the TF model {} - Total loss of the PT model {}".format(tensorflow_outputs[2][0], pytorch_outputs[2]))
###Output
Total loss of the TF model 9.06024 - Total loss of the PT model 9.0602445602417
###Markdown
Comparing TensorFlow (original) and PyTorch model on the SQuAD taskYou can use this small notebook to check the loss computation from the TensorFlow model to the PyTorch model. In the following, we compare the total loss computed by the models starting from identical initializations (position prediction linear layers with weights at 1 and bias at 0).To run this notebook, follow these instructions:- make sure that your Python environment has both TensorFlow and PyTorch installed,- download the original TensorFlow implementation,- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code.
###Code
import os
os.chdir('../')
###Output
_____no_output_____
###Markdown
1/ TensorFlow code
###Code
original_tf_inplem_dir = "./tensorflow_code/"
model_dir = "../google_models/uncased_L-12_H-768_A-12/"
vocab_file = model_dir + "vocab.txt"
bert_config_file = model_dir + "bert_config.json"
init_checkpoint = model_dir + "bert_model.ckpt"
input_file = "../data/squad_data/train-v1.1.json"
max_seq_length = 384
outside_pos = max_seq_length + 10
doc_stride = 128
max_query_length = 64
max_answer_length = 30
output_dir = "/tmp/squad_base/"
learning_rate = 3e-5
import importlib.util
import sys
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/modeling.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['modeling_tensorflow'] = module
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_bert_squad.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['run_squad_tensorflow'] = module
import modeling_tensorflow
from run_squad_tensorflow import *
bert_config = modeling_tensorflow.BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.BertTokenizer(
vocab_file=vocab_file, do_lower_case=True)
eval_examples = read_squad_examples(
input_file=input_file, is_training=True, max_num=16)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True)
# You can use that to test the behavior of the models when target are outside of the model input sequence
# for feature in eval_features:
# feature.start_position = outside_pos
# feature.end_position = outside_pos
eval_unique_id_to_feature = {}
for eval_feature in eval_features:
eval_unique_id_to_feature[eval_feature.unique_id] = eval_feature
def input_fn_builder(features, seq_length, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_start_positions = []
all_end_positions = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_start_positions.append(feature.start_position)
all_end_positions.append(feature.end_position)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
feature_map = {
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"start_positions":
tf.constant(
all_start_positions,
shape=[num_examples],
dtype=tf.int32),
"end_positions":
tf.constant(
all_end_positions,
shape=[num_examples],
dtype=tf.int32),
}
d = tf.data.Dataset.from_tensor_slices(feature_map)
d = d.repeat()
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
batch_size = modeling_tensorflow.get_shape_list(start_logits)[0]
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
"total_loss": tf.reshape(total_loss, [batch_size, 1]),
"start_loss": tf.reshape(start_loss, [batch_size, 1]),
"end_loss": tf.reshape(end_loss, [batch_size, 1]),
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=None,
num_warmup_steps=None,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=12,
predict_batch_size=1)
predict_input_fn = input_fn_builder(
features=eval_features,
seq_length=max_seq_length,
drop_remainder=True)
tensorflow_all_out = []
tensorflow_all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
eval_feature = eval_unique_id_to_feature[unique_id]
start_logits = result["start_logits"]
end_logits = result["end_logits"]
total_loss = result["total_loss"]
start_loss = result["start_loss"]
end_loss = result["end_loss"]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["start_logits"] = [round(float(x), 6) for x in start_logits.flat]
output_json["end_logits"] = [round(float(x), 6) for x in end_logits.flat]
output_json["total_loss"] = [round(float(x), 6) for x in total_loss.flat]
output_json["start_loss"] = [round(float(x), 6) for x in start_loss.flat]
output_json["end_loss"] = [round(float(x), 6) for x in end_loss.flat]
tensorflow_all_out.append(output_json)
tensorflow_all_results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
break
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case):
"""Compute final predictions."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
all_predictions, all_nbest_json = compute_predictions(eval_examples[:1], eval_features[:1], tensorflow_all_results, 20, max_answer_length, True)
all_nbest_json
print(len(tensorflow_all_out))
print(len(tensorflow_all_out[0]))
print(tensorflow_all_out[0].keys())
print("number of tokens", len(tensorflow_all_out[0]['tokens']))
print("number of start_logits", len(tensorflow_all_out[0]['start_logits']))
print("shape of end_logits", len(tensorflow_all_out[0]['end_logits']))
tensorflow_outputs = [tensorflow_all_out[0]['start_logits'], tensorflow_all_out[0]['end_logits'],
tensorflow_all_out[0]['total_loss'], tensorflow_all_out[0]['start_loss'],
tensorflow_all_out[0]['end_loss']]
###Output
_____no_output_____
###Markdown
2/ PyTorch code
###Code
import modeling
from run_squad import *
init_checkpoint_pt = "../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin"
device = torch.device("cpu")
model = modeling.BertForQuestionAnswering(bert_config)
model.bert.load_state_dict(torch.load(init_checkpoint_pt, map_location='cpu'))
model.to(device)
model.qa_outputs.weight.data.fill_(1.0)
model.qa_outputs.bias.data.zero_()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_start_positions = torch.tensor([[f.start_position] for f in eval_features], dtype=torch.long)
all_end_positions = torch.tensor([[f.end_position] for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
None
batch = iter(eval_dataloader).next()
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
print([t.shape for t in batch])
start_positions.size()
pytorch_all_out = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
start_positions = start_positions.to(device)
end_positions = end_positions.to(device)
total_loss, (start_logits, end_logits) = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
eval_feature = eval_features[example_index.item()]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["total_loss"] = total_loss.detach().cpu().numpy()
output_json["start_logits"] = start_logits.detach().cpu().numpy()
output_json["end_logits"] = end_logits.detach().cpu().numpy()
pytorch_all_out.append(output_json)
break
print(len(pytorch_all_out))
print(len(pytorch_all_out[0]))
print(pytorch_all_out[0].keys())
print("number of tokens", len(pytorch_all_out[0]['tokens']))
print("number of start_logits", len(pytorch_all_out[0]['start_logits']))
print("number of end_logits", len(pytorch_all_out[0]['end_logits']))
pytorch_outputs = [pytorch_all_out[0]['start_logits'], pytorch_all_out[0]['end_logits'], pytorch_all_out[0]['total_loss']]
###Output
_____no_output_____
###Markdown
3/ Comparing the standard deviation of start_logits, end_logits and loss of both models
###Code
import numpy as np
print('shape tensorflow layer, shape pytorch layer, standard deviation')
print('\n'.join(list(str((np.array(tensorflow_outputs[i]).shape,
np.array(pytorch_outputs[i]).shape,
np.sqrt(np.mean((np.array(tensorflow_outputs[i]) - np.array(pytorch_outputs[i]))**2.0)))) for i in range(3))))
print("Total loss of the TF model {} - Total loss of the PT model {}".format(tensorflow_outputs[2][0], pytorch_outputs[2]))
###Output
Total loss of the TF model 9.06024 - Total loss of the PT model 9.0602445602417
###Markdown
Comparing TensorFlow (original) and PyTorch model on the SQuAD taskYou can use this small notebook to check the loss computation from the TensorFlow model to the PyTorch model. In the following, we compare the total loss computed by the models starting from identical initializations (position prediction linear layers with weights at 1 and bias at 0).To run this notebook, follow these instructions:- make sure that your Python environment has both TensorFlow and PyTorch installed,- download the original TensorFlow implementation,- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code.
###Code
import os
os.chdir('../')
###Output
_____no_output_____
###Markdown
1/ TensorFlow code
###Code
original_tf_inplem_dir = "./tensorflow_code/"
model_dir = "../google_models/uncased_L-12_H-768_A-12/"
vocab_file = model_dir + "vocab.txt"
bert_config_file = model_dir + "bert_config.json"
init_checkpoint = model_dir + "bert_model.ckpt"
input_file = "../data/squad_data/train-v1.1.json"
max_seq_length = 384
outside_pos = max_seq_length + 10
doc_stride = 128
max_query_length = 64
max_answer_length = 30
output_dir = "/tmp/squad_base/"
learning_rate = 3e-5
import importlib.util
import sys
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/modeling.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['modeling_tensorflow'] = module
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_squad.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['run_squad_tensorflow'] = module
import modeling_tensorflow
from run_squad_tensorflow import *
bert_config = modeling_tensorflow.BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.BertTokenizer(
vocab_file=vocab_file, do_lower_case=True)
eval_examples = read_squad_examples(
input_file=input_file, is_training=True, max_num=16)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True)
# You can use that to test the behavior of the models when target are outside of the model input sequence
# for feature in eval_features:
# feature.start_position = outside_pos
# feature.end_position = outside_pos
eval_unique_id_to_feature = {}
for eval_feature in eval_features:
eval_unique_id_to_feature[eval_feature.unique_id] = eval_feature
def input_fn_builder(features, seq_length, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_start_positions = []
all_end_positions = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_start_positions.append(feature.start_position)
all_end_positions.append(feature.end_position)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
feature_map = {
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"start_positions":
tf.constant(
all_start_positions,
shape=[num_examples],
dtype=tf.int32),
"end_positions":
tf.constant(
all_end_positions,
shape=[num_examples],
dtype=tf.int32),
}
d = tf.data.Dataset.from_tensor_slices(feature_map)
d = d.repeat()
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
batch_size = modeling_tensorflow.get_shape_list(start_logits)[0]
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
"total_loss": tf.reshape(total_loss, [batch_size, 1]),
"start_loss": tf.reshape(start_loss, [batch_size, 1]),
"end_loss": tf.reshape(end_loss, [batch_size, 1]),
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=None,
num_warmup_steps=None,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=12,
predict_batch_size=1)
predict_input_fn = input_fn_builder(
features=eval_features,
seq_length=max_seq_length,
drop_remainder=True)
tensorflow_all_out = []
tensorflow_all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
eval_feature = eval_unique_id_to_feature[unique_id]
start_logits = result["start_logits"]
end_logits = result["end_logits"]
total_loss = result["total_loss"]
start_loss = result["start_loss"]
end_loss = result["end_loss"]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["start_logits"] = [round(float(x), 6) for x in start_logits.flat]
output_json["end_logits"] = [round(float(x), 6) for x in end_logits.flat]
output_json["total_loss"] = [round(float(x), 6) for x in total_loss.flat]
output_json["start_loss"] = [round(float(x), 6) for x in start_loss.flat]
output_json["end_loss"] = [round(float(x), 6) for x in end_loss.flat]
tensorflow_all_out.append(output_json)
tensorflow_all_results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
break
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case):
"""Compute final predictions."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
all_predictions, all_nbest_json = compute_predictions(eval_examples[:1], eval_features[:1], tensorflow_all_results, 20, max_answer_length, True)
all_nbest_json
print(len(tensorflow_all_out))
print(len(tensorflow_all_out[0]))
print(tensorflow_all_out[0].keys())
print("number of tokens", len(tensorflow_all_out[0]['tokens']))
print("number of start_logits", len(tensorflow_all_out[0]['start_logits']))
print("shape of end_logits", len(tensorflow_all_out[0]['end_logits']))
tensorflow_outputs = [tensorflow_all_out[0]['start_logits'], tensorflow_all_out[0]['end_logits'],
tensorflow_all_out[0]['total_loss'], tensorflow_all_out[0]['start_loss'],
tensorflow_all_out[0]['end_loss']]
###Output
_____no_output_____
###Markdown
2/ PyTorch code
###Code
import modeling
from run_squad import *
init_checkpoint_pt = "../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin"
device = torch.device("cpu")
model = modeling.BertForQuestionAnswering(bert_config)
model.bert.load_state_dict(torch.load(init_checkpoint_pt, map_location='cpu'))
model.to(device)
model.qa_outputs.weight.data.fill_(1.0)
model.qa_outputs.bias.data.zero_()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_start_positions = torch.tensor([[f.start_position] for f in eval_features], dtype=torch.long)
all_end_positions = torch.tensor([[f.end_position] for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
None
batch = iter(eval_dataloader).next()
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
print([t.shape for t in batch])
start_positions.size()
pytorch_all_out = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
start_positions = start_positions.to(device)
end_positions = end_positions.to(device)
total_loss, (start_logits, end_logits) = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
eval_feature = eval_features[example_index.item()]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["total_loss"] = total_loss.detach().cpu().numpy()
output_json["start_logits"] = start_logits.detach().cpu().numpy()
output_json["end_logits"] = end_logits.detach().cpu().numpy()
pytorch_all_out.append(output_json)
break
print(len(pytorch_all_out))
print(len(pytorch_all_out[0]))
print(pytorch_all_out[0].keys())
print("number of tokens", len(pytorch_all_out[0]['tokens']))
print("number of start_logits", len(pytorch_all_out[0]['start_logits']))
print("number of end_logits", len(pytorch_all_out[0]['end_logits']))
pytorch_outputs = [pytorch_all_out[0]['start_logits'], pytorch_all_out[0]['end_logits'], pytorch_all_out[0]['total_loss']]
###Output
_____no_output_____
###Markdown
3/ Comparing the standard deviation of start_logits, end_logits and loss of both models
###Code
import numpy as np
print('shape tensorflow layer, shape pytorch layer, standard deviation')
print('\n'.join(list(str((np.array(tensorflow_outputs[i]).shape,
np.array(pytorch_outputs[i]).shape,
np.sqrt(np.mean((np.array(tensorflow_outputs[i]) - np.array(pytorch_outputs[i]))**2.0)))) for i in range(3))))
print("Total loss of the TF model {} - Total loss of the PT model {}".format(tensorflow_outputs[2][0], pytorch_outputs[2]))
###Output
Total loss of the TF model 9.06024 - Total loss of the PT model 9.0602445602417
###Markdown
Comparing TensorFlow (original) and PyTorch model on the SQuAD taskYou can use this small notebook to check the loss computation from the TensorFlow model to the PyTorch model. In the following, we compare the total loss computed by the models starting from identical initializations (position prediction linear layers with weights at 1 and bias at 0).To run this notebook, follow these instructions:- make sure that your Python environment has both TensorFlow and PyTorch installed,- download the original TensorFlow implementation,- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code.
###Code
import os
os.chdir('../')
###Output
_____no_output_____
###Markdown
1/ TensorFlow code
###Code
original_tf_inplem_dir = "./tensorflow_code/"
model_dir = "../google_models/uncased_L-12_H-768_A-12/"
vocab_file = model_dir + "vocab.txt"
bert_config_file = model_dir + "bert_config.json"
init_checkpoint = model_dir + "bert_model.ckpt"
input_file = "../data/squad_data/train-v1.1.json"
max_seq_length = 384
outside_pos = max_seq_length + 10
doc_stride = 128
max_query_length = 64
max_answer_length = 30
output_dir = "/tmp/squad_base/"
learning_rate = 3e-5
import importlib.util
import sys
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/modeling.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['modeling_tensorflow'] = module
spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_squad.py')
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules['run_squad_tensorflow'] = module
import modeling_tensorflow
from run_squad_tensorflow import *
bert_config = modeling_tensorflow.BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.BertTokenizer(
vocab_file=vocab_file, do_lower_case=True)
eval_examples = read_squad_examples(
input_file=input_file, is_training=True, max_num=16)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True)
# You can use that to test the behavior of the models when target are outside of the model input sequence
# for feature in eval_features:
# feature.start_position = outside_pos
# feature.end_position = outside_pos
eval_unique_id_to_feature = {}
for eval_feature in eval_features:
eval_unique_id_to_feature[eval_feature.unique_id] = eval_feature
def input_fn_builder(features, seq_length, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_start_positions = []
all_end_positions = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_start_positions.append(feature.start_position)
all_end_positions.append(feature.end_position)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
feature_map = {
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"start_positions":
tf.constant(
all_start_positions,
shape=[num_examples],
dtype=tf.int32),
"end_positions":
tf.constant(
all_end_positions,
shape=[num_examples],
dtype=tf.int32),
}
d = tf.data.Dataset.from_tensor_slices(feature_map)
d = d.repeat()
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
batch_size = modeling_tensorflow.get_shape_list(start_logits)[0]
seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
"total_loss": tf.reshape(total_loss, [batch_size, 1]),
"start_loss": tf.reshape(start_loss, [batch_size, 1]),
"end_loss": tf.reshape(end_loss, [batch_size, 1]),
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
model_dir=output_dir,
save_checkpoints_steps=1000,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=learning_rate,
num_train_steps=None,
num_warmup_steps=None,
use_tpu=False,
use_one_hot_embeddings=False)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=12,
predict_batch_size=1)
predict_input_fn = input_fn_builder(
features=eval_features,
seq_length=max_seq_length,
drop_remainder=True)
tensorflow_all_out = []
tensorflow_all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
eval_feature = eval_unique_id_to_feature[unique_id]
start_logits = result["start_logits"]
end_logits = result["end_logits"]
total_loss = result["total_loss"]
start_loss = result["start_loss"]
end_loss = result["end_loss"]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["start_logits"] = [round(float(x), 6) for x in start_logits.flat]
output_json["end_logits"] = [round(float(x), 6) for x in end_logits.flat]
output_json["total_loss"] = [round(float(x), 6) for x in total_loss.flat]
output_json["start_loss"] = [round(float(x), 6) for x in start_loss.flat]
output_json["end_loss"] = [round(float(x), 6) for x in end_loss.flat]
tensorflow_all_out.append(output_json)
tensorflow_all_results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
break
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case):
"""Compute final predictions."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
all_predictions, all_nbest_json = compute_predictions(eval_examples[:1], eval_features[:1], tensorflow_all_results, 20, max_answer_length, True)
all_nbest_json
print(len(tensorflow_all_out))
print(len(tensorflow_all_out[0]))
print(tensorflow_all_out[0].keys())
print("number of tokens", len(tensorflow_all_out[0]['tokens']))
print("number of start_logits", len(tensorflow_all_out[0]['start_logits']))
print("shape of end_logits", len(tensorflow_all_out[0]['end_logits']))
tensorflow_outputs = [tensorflow_all_out[0]['start_logits'], tensorflow_all_out[0]['end_logits'],
tensorflow_all_out[0]['total_loss'], tensorflow_all_out[0]['start_loss'],
tensorflow_all_out[0]['end_loss']]
###Output
_____no_output_____
###Markdown
2/ PyTorch code
###Code
import modeling
from run_squad import *
init_checkpoint_pt = "../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin"
device = torch.device("cpu")
model = modeling.BertForQuestionAnswering(bert_config)
model.bert.load_state_dict(torch.load(init_checkpoint_pt, map_location='cpu'))
model.to(device)
model.qa_outputs.weight.data.fill_(1.0)
model.qa_outputs.bias.data.zero_()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
all_start_positions = torch.tensor([[f.start_position] for f in eval_features], dtype=torch.long)
all_end_positions = torch.tensor([[f.end_position] for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
None
batch = iter(eval_dataloader).next()
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
print([t.shape for t in batch])
start_positions.size()
pytorch_all_out = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
start_positions = start_positions.to(device)
end_positions = end_positions.to(device)
total_loss, (start_logits, end_logits) = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
eval_feature = eval_features[example_index.item()]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
output_json["tokens"] = [token for (i, token) in enumerate(eval_feature.tokens)]
output_json["total_loss"] = total_loss.detach().cpu().numpy()
output_json["start_logits"] = start_logits.detach().cpu().numpy()
output_json["end_logits"] = end_logits.detach().cpu().numpy()
pytorch_all_out.append(output_json)
break
print(len(pytorch_all_out))
print(len(pytorch_all_out[0]))
print(pytorch_all_out[0].keys())
print("number of tokens", len(pytorch_all_out[0]['tokens']))
print("number of start_logits", len(pytorch_all_out[0]['start_logits']))
print("number of end_logits", len(pytorch_all_out[0]['end_logits']))
pytorch_outputs = [pytorch_all_out[0]['start_logits'], pytorch_all_out[0]['end_logits'], pytorch_all_out[0]['total_loss']]
###Output
_____no_output_____
###Markdown
3/ Comparing the standard deviation of start_logits, end_logits and loss of both models
###Code
import numpy as np
print('shape tensorflow layer, shape pytorch layer, standard deviation')
print('\n'.join(list(str((np.array(tensorflow_outputs[i]).shape,
np.array(pytorch_outputs[i]).shape,
np.sqrt(np.mean((np.array(tensorflow_outputs[i]) - np.array(pytorch_outputs[i]))**2.0)))) for i in range(3))))
print("Total loss of the TF model {} - Total loss of the PT model {}".format(tensorflow_outputs[2][0], pytorch_outputs[2]))
###Output
Total loss of the TF model 9.06024 - Total loss of the PT model 9.0602445602417
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.