path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
GeneticAlgorithm/3-SAT_AlgoritmoGenetico.ipynb
|
###Markdown
El problema binario 3-SAT utilizando un Algoritmo Genรฉtico
###Code
import pandas as pd
import numpy as np
import math
import matplotlib.pylab as plt
#### Cargando la instancia
X = pd.read_csv("uf20-017.cnf", sep=' ', header = 0)
numVariables = int(X.columns[2])
numClausulas = int(X.columns[3])
X.drop(X.columns[3], axis = 'columns', inplace = True)
X.columns = range(X.shape[1])
print(X)
print(numVariables)
print(numClausulas)
###Output
0 1 2
0 5 -6 2
1 -14 4 -15
2 4 -2 16
3 9 -2 4
4 16 -8 6
.. .. .. ..
86 -19 7 -13
87 1 3 15
88 20 10 -9
89 11 -12 1
90 -13 -7 10
[91 rows x 3 columns]
20
91
###Markdown
Funciones fitness Funciรณn de fitness 1: Evaluar expresiรณn booleana dada por X
###Code
def fitness1(individuo, X, numVariables, numClausulas):
expression = True
for i in range(numClausulas):
# Operar las 3 variables de la clausula i con el or
aux = False
x1 = X[0][i]
x2 = X[1][i]
x3 = X[2][i]
if x1 < 0:
aux = aux or (not individuo[-x1-1])
else:
aux = aux or individuo[x1-1]
if x2 < 0:
aux = aux or (not individuo[-x2-1])
else:
aux = aux or individuo[x2-1]
if x3 < 0:
aux = aux or (not individuo[-x3-1])
else:
aux = aux or individuo[x3-1]
expression = expression and aux
return int(expression)
###Output
_____no_output_____
###Markdown
Funciรณn de fitness 2: Contar el nรบmero de "Trues" en en las clรกusulas
###Code
def fitness2(individuo, X, numVariables, numClausulas):
count = 0
for i in range(numClausulas):
# Operar las 3 variables de la clausula i con el or
aux = False
x1 = X[0][i]
x2 = X[1][i]
x3 = X[2][i]
if x1 < 0:
aux = aux or (not individuo[-x1-1])
else:
aux = aux or individuo[x1-1]
if x2 < 0:
aux = aux or (not individuo[-x2-1])
else:
aux = aux or individuo[x2-1]
if x3 < 0:
aux = aux or (not individuo[-x3-1])
else:
aux = aux or individuo[x3-1]
count += aux
return count
###Output
_____no_output_____
###Markdown
Operador de selecciรณnTorneo binario
###Code
def selectionOperator(X, numVariables, numClausulas, P, N, fitness, mu):
# 2*mu es el numero de torneos binarios, por lo que habra mu ganadores
# regresa los indices de los individuos en P seleccionados
randomIndex = np.random.choice(N, 2*mu, replace=True)
winners = []
k=0
while 2*k+1 < 2*mu:
i1 = randomIndex[2*k]
i2 = randomIndex[2*k+1]
i1_fitness = fitness(P[i1], X, numVariables, numClausulas)
i2_fitness = fitness(P[i2], X, numVariables, numClausulas)
if i1_fitness > i2_fitness:
winners.append(i1)
else:
winners.append(i2)
k += 1
return winners
###Output
_____no_output_____
###Markdown
Operador de cruzaCruza en dos puntos
###Code
def crossoverOperator(S, numVars, mu):
# S es la poblacion seleccionada de P a traves del operador de seleccion
# mu es el numero de padres a cruzar, el tamaรฑo de S
# La cruza se hace en S mismo para optimizar
randomIndex = np.random.choice(mu, mu, replace=False)
i=0
while 2*i+1<mu :
i1 = randomIndex[2*i]
i2 = randomIndex[2*i+1]
# 2 puntos de crossover
crossPoints = np.random.choice(range(1,numVars-1), 2, replace=False)
crossPoints.sort()
# Cruza de S[i1] con S[i2]
offspring1 = S[i1]
offspring2 = S[i2]
for k in range(numVars):
if k>=crossPoints[0] and k<=crossPoints[1]: # intercambio de bits de los individuos entre los puntos de cruza
aux = offspring1[k]
offspring1[k] = offspring2[k]
offspring2[k] = aux
i += 1
###Output
_____no_output_____
###Markdown
Operador de mutaciรณnMutaciรณn uniforme
###Code
def uniformMutationOperator(C, numVars, p):
# C es el conjunto de individuos obtenidos al aplicar el operador de cruza
# p es la probabilidad de mutacion
# La mutacion dentro de C para optimizar
for i in range(len(C)):
for j in range(numVars):
if np.random.uniform() <= p:
C[i][j] = not C[i][j]
###Output
_____no_output_____
###Markdown
Operador de reemplazoElitismo del 10%
###Code
def reemplazoElitismo(X, numVars, numClausulas, P, C_mut, percent, fitness):
fit = []
for i in range(len(P)):
pair = (fitness(P[i], X, numVars, numClausulas), i)
fit.append(pair)
fit.sort(reverse=True)
print(fit)
elite = fit[:int(len(P)*percent)]
print('Fitness del mejor de P: ' + str(elite[0][0]))
R = []
for i in range(len(elite)):
R.append(P[elite[i][1]][:])
for i in range(len(C_mut)):
R.append(C_mut[i][:])
return R
###Output
_____no_output_____
###Markdown
Algoritmo genรฉticoTiene como argumentos a un dataframe con la informaciรณn de la instancia del problema en formado cnf, el nรบmero de variables booleanas (numVariables), el nรบmero de clรกusulas (numClausulas), y la funciรณn fitness.
###Code
def solveSAT(X, numVariables, numClausulas, fitness, N=100, elite_percent=0.1, maxIter=100):
mu = int((1-elite_percent)*N) # complemento del porcentaje de la elite
prob_mutation = 1.0/numVariables
# Poblacion inicial de tamaรฑo N con distribucion uniforme
P = []
for i in range(N):
P.append(list(np.random.choice([True, False], numVariables, replace=True)))
best_fitness = []
for t in range(maxIter):
# Evaluacion de P
fit = []
for i in range(N):
pair = (fitness(P[i], X, numVariables, numClausulas), i)
fit.append(pair)
fit.sort(reverse=True)
elite = fit[:int(len(P)*elite_percent)]
print('Generacion: ' + str(t) + ' - Mejor fitness de P: ' + str(elite[0][0]) + " - Indice " + str(elite[0][1]))
best_fitness.append(elite[0][0])
# Seleccion por torneo binario
winners = selectionOperator(X, numVariables, numClausulas, P, N, fitness, mu)
P2 = []
for i in range(len(winners)):
P2.append(P[winners[i]][:])
# Cruza
crossoverOperator(P2, numVariables, mu)
# Mutation
uniformMutationOperator(P2, numVariables, prob_mutation)
# Reemplazo
for i in range(len(elite)):
P2.append(P[elite[i][1]][:])
P = P2
# Obtener el maximo fitness de la generacion maxIter
maxFit = 0
maxIndex = 0
for i in range(N):
aux = fitness(P[i], X, numVariables, numClausulas)
if aux > maxFit:
maxFit = aux
maxIndex = i
best_fitness.append(maxFit)
print('Generacion: ' + str(maxIter) + ' - Mejor fitness de P: ' + str(maxFit) + " - Indice " + str(maxIndex))
return P[maxIndex], best_fitness
###Output
_____no_output_____
###Markdown
Aplicaciรณn del algoritmo genรฉtico a la instancia del problema 3-SAT
###Code
maxIter = 100
sol1, best_fitness1 = solveSAT(X, numVariables, numClausulas, fitness1, maxIter = maxIter)
sol1
fitness1(sol1, X, numVariables, numClausulas)
plt.plot(range(maxIter+1), best_fitness1)
plt.show()
sol2, best_fitness2 = solveSAT(X, numVariables, numClausulas, fitness2)
sol2
fitness2(sol2, X, numVariables, numClausulas)
plt.plot(range(maxIter+1), best_fitness2)
plt.show()
###Output
_____no_output_____
|
W3D3/W3D3_intro_to_ml_and_lr.ipynb
|
###Markdown
Lighthouse Labs - Synaptive Medical Introduction to Machine Learning W3D1 part 1 Machine Learning & Linear RegressionInstructor: Socorro Dominguez December 04, 2020 Agenda1. Machine Learning - Supervised vs. Unsupervised Learning 2. Supervised Learning 101 - `X` and `y` - Regression vs. Classification - The golden rule: train/test split3. Simple Linear Regression
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
Machine LearningIt is seen as a subset of AI. Machine learning algorithms build a model based on sample data (training data), in order to make predictions without being explicitly programmed to do so. Machine Learning: Supervised Learning- In supervised learning, we have a set of observations (__*X*__) with an associated response (__*y*__)- We wish to find a model function that relates __*X*__ to __*y*__- Then use that model function to predict future observations Machine Learning: Unsupervised Learning- We have __*X*__ (the data) but no __*y*__ (associated response) Supervised Learning 101Lots of terminology!For tabular data:- **examples** = rows = samples = records = instances (usually denoted by $n$)- **features** = inputs = predictors = explanatory variables = regressors = independent variables = covariates (usually denoted by $d$) = **X**- **targets** = outputs = outcomes = response variable = dependent variable = labels (if categorical) = **y**- **training** = learning = fitting Classification vs. Regression* Classification problems: predicting among two or more categories, also known as classes - Example1: Predict whether a patient has a liver disease or not - Example2: Predict whether the letter grade of a student (A,B,C,D or F)* Regression problem: predicting a continuous (in other words, a number) value - Example1: Predict housing prices - Example2: Predict a studentโs score in this courseโs quiz2 Let's load some toy data
###Code
Stock_Market = {'Year': [2017,2017,2017,2017,2017,2017,2017,2017,2017,2017,2017,2017,2016,2016,2016,2016,2016,2016,2016,2016,2016,2016,2016,2016],
'Month': [12, 11,10,9,8,7,6,5,4,3,2,1,12,11,10,9,8,7,6,5,4,3,2,1],
'Interest_Rate': [2.75,2.5,2.5,2.5,2.5,2.5,2.5,2.25,2.25,2.25,2,2,2,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75],
'Unemployment_Rate': [5.3,5.3,5.3,5.3,5.4,5.6,5.5,5.5,5.5,5.6,5.7,5.9,6,5.9,5.8,6.1,6.2,6.1,6.1,6.1,5.9,6.2,6.2,6.1],
'Stock_Index_Price': [1464,1394,1357,1293,1256,1254,1234,1195,1159,1167,1130,1075,1047,965,943,958,971,949,884,866,876,822,704,719]
}
stock_df = pd.DataFrame(Stock_Market,columns=['Year','Month','Interest_Rate','Unemployment_Rate','Stock_Index_Price'])
stock_df.head()
stock_df.shape
###Output
_____no_output_____
###Markdown
Splitting out our X and y- In this case, are we working with a regression problem. Could you say why?- Can you help me identify what the features are? - On the same line, what would the output or target variable be?
###Code
X = stock_df[['Interest_Rate','Unemployment_Rate']]
y = stock_df['Stock_Index_Price']
X.head()
y.head()
###Output
_____no_output_____
###Markdown
The golden rule- When you're doing machine learning, now that you've identified **X** and **y**- BEFORE YOU DO ANYTHING ELSE...- **Including** exploratory data analysis, visualization etc.- **You need to split your data into train and test**- **You only work with the training data** Why?- As soon as you start making decisions on what features to include, drop etc., you are letting a part of the test data influence your decision-making- Your results will not be truly representative of "unseen data" So... how do we split?- Most common way is to `train_test_split` in `sklearn`- Shuffles the data first and then split it- 80/20, 75/25, 70/30 are common splits
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train.shape
X_test.shape
###Output
_____no_output_____
###Markdown
The big picture- We train using the **training data**- We test what is learned by the model on the **test data**- We have two scores: **training** vs. **test** Which matters more?- It doesn't matter how good our **training score** is because the **test score is what matters**- Good models that generalize well though will have **similar training and testing scores****We want to pick models that generalize well to unseen data** The fundamental tradeoff (aka the bias-variance tradeoff)| Model | Training Score relative to Test Score | Performance ||:-|:-|:-||Complex|High training score compared to test score| Overfit ||Simple|Low training score and low test score|Underfit|- Models that have **extremely high training scores** (that are too good to be true) that are **highly complex** that learned very complex relationships in the training data **can be overfit**- On the other hand, models that have **low training scores** that are **very simple** may not have learned the necessary relationships in the training data needed to predict well on unseen data; they are **underfit** Linear Regression 101- Used as a predictive model- Assumes a linear relationship between the dependent variable (which is the variable we are trying to predict/estimate, **y**) and the independent variable/s (input variable/s used in the prediction, **X**) Let's start with **simple** linear regression- Only one independent/input variable is used to predict the dependent variable. Simple Linear Regression$$Y = C + M*X$$$Y$ = Dependent variable (output/outcome/prediction/estimation)$C$ = Constant (y-intercept)$M$ = Slope of the regression line (the effect that X has on Y)$X$ = Independent variable (input variable used in the prediction of Y) Multiple Linear Regression- Many $X$ and $M$$$Y = C + M_1X_1 + M_2X_2 + M_3X_3 ...$$- The higher the M is, the more influence the relevant X has on the variable Y Matrix representation- $\hat{y}$ is the linear function of features $x$ and weights $w$. $$\hat{y} = w^Tx + b$$ - $\hat{y} \rightarrow$ prediction- $w \rightarrow$ weight vector- $b \rightarrow$ bias- $x \rightarrow$ features$$\hat{y} = \begin{bmatrix}w_1 & w_2 & \cdots & w_d\end{bmatrix}\begin{bmatrix}x_1 \\ x_2 \\ \vdots \\ x_d\end{bmatrix} + b$$ Let's try it!Remembering our dataset...
###Code
import statsmodels.api as sm
X_train.head()
y_train.head()
###Output
_____no_output_____
###Markdown
Fitting a linear model- Let's look at the results
###Code
X_train = sm.add_constant(X_train) # adding a constant
model = sm.OLS(y_train, X_train).fit()
model.summary()
###Output
/Users/seiryu8808/opt/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1604: UserWarning: kurtosistest only valid for n>=20 ... continuing anyway, n=19
"anyway, n=%i" % int(n))
|
soluciones/ja.peinado/tarea3/Tarea3final201913758l.ipynb
|
###Markdown
Tarea 3: Encuentre la regresiรณnUd recibe unos datos $x$ y $y$ cรณmo se muestran a continuaciรณn. Ud debe responder cuatro preguntas a partir de estos datos. Suponga que ud tiene un modelo tal que $y=f(x)$ mรกs aรบn desconoce $f$.
###Code
df = pd.read_pickle('ex1.gz')
sns.scatterplot(x='x',y='y',data=df)
plt.show()
df
###Output
_____no_output_____
###Markdown
(A) Pendiente e interceptoDetermine la pendiente de los datos en el intervalo $[0,1.5]$ y el valor del intercepto con el eje $y$. Es decir, $f(0)=?$. ยฟCuรกl es el valor de $r^2$?
###Code
d = df[(df.x >= 0) & (df.x <= 1.5)]
d
X_A= d['x'].values.reshape(-1,1)
Y_A= d['y'].values.reshape(-1,1)
modelo = LinearRegression()
modelo.fit(X_A, Y_A)
print("Intercepto eje y:", modelo.intercept_)
print("Pendiente:", modelo.coef_)
print("R^2:", modelo.score(X_A,Y_A))
###Output
Intercepto eje y: [0.18270691]
Pendiente: [[0.81638696]]
R^2: 0.9316416262309236
###Markdown
(B) Regresiรณn polinomialSuponga que quiere realizar la siguiente regresiรณn polinomial,$$y=\beta_1+\beta_2x+\beta_2x^2+\beta_2x^3+\beta_2x^4+\beta_2x^5.$$Plantee la funciรณn de costo que le permita calcular los coeficientes y calcule $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. ยฟCuรกl es el $r^2$?Calcule $f(0)$ y compare con los resultados anteriores
###Code
# Definiciรณn de la funciรณn de costo
def L(x,A,b):
m,n = A.shape
X = np.matrix(x).T
DeltaB=(A*X-b) # b gorro - b
return (DeltaB.T*DeltaB)[0,0]/m # matriz 1x1
Y = df.loc[:, ['y']]
X = df.loc[:, ['x']].rename(columns={'x': 'x1'})
X.insert(0, 'x0', 1)
X['x2'] = X['x1']*X['x1']
X['x3'] = X['x2']*X['x1']
X['x4'] = X['x3']*X['x1']
X['x5'] = X['x4']*X['x1']
Xn = X.to_numpy()
Yn = Y.to_numpy()
opti = sp.optimize.minimize(fun=L,x0=np.zeros(Xn.shape[1]), args = (Xn,Yn), tol=1e-10)
print("El valor de los coeficientes es:",opti['x'])
print("El valor de f(0):",opti['x'][0])
yb = df["y"]
b = np.linspace(0,4,100)
def fb(a,b,c,d,e,f,x):
return a*x**5 + b*x**4 + c*x**3 + d*x**2 + e*x + f
prediccion = fb(opti['x'][5],opti['x'][4],opti['x'][3],opti['x'][2],opti['x'][1],opti['x'][0],b)
r2 = 1-np.sum((prediccion-yb)**2)/np.sum((yb-yb.mean())**2)
r2
print("Se observa un resultado similar al de la polinomial exacta, varian los valores decimales,ambos cuentan con una gran precisiรณn")
###Output
Se observa un resultado similar al de la polinomial exacta, varian los valores decimales,ambos cuentan con una gran precisiรณn
###Markdown
(C) Regresiรณn polinomial exactaResulta, que cuando se quiere hacer alguna regresiรณn polinomial esta se puede hacer de forma exacta. ยฟCรณmo? Suponga que ud va a considerar que su problema en lugar de tener $1$ variable ($x$) tiene $n+1$, siendo $n$ el orden del polinomio a ajustar. Es decir, sus nuevas variables van a ser $\{x_0,\,x_1,\,x_2,\,x_3,\dots,\,x_n\}$ definiendo $x_j=x^j$. Asรญ pues, siguiendo el mismo procedimiento para la regresiรณn lineal multidimensional que realizamos para el ejercicio de datos inmobiliarios, puede encontrar los valores de los coeficientes $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. Encuentre estos valores y compare con los resultados en la secciรณn **(B)**.Calcule $f(0)$ y compare con los resultados anteriores.> Si ud se pregunta si esto es posible la respuesta es sรญ. Inclusive, esto se puede extender a cualquier a cualquier conjunto de funciones, tal que $x_j=f_j(x)$, que represente un conjunto "linealmente independiente" (ยกMe estoy adelantando a *Fourier*!). Para quienes quieran explorar algunas curiosidades matemรกticas, cuando $n+1$ es igual al nรบmero de puntos o valores de $x$ (y todos diferentes) la matriz es siempre invertible y resulta ser la inversa de una matriz de Vandermonde.
###Code
Y = df.loc[:, ['y']]
Y
X = df.loc[:, ['x']].rename(columns={'x': 'x1'})
X.insert(0, 'x0', 1)
X['x2'] = X['x1']*X['x1']
X['x3'] = X['x2']*X['x1']
X['x4'] = X['x3']*X['x1']
X['x5'] = X['x4']*X['x1']
Xn = X.to_numpy()
Yn = Y.to_numpy()
Rpe= np.linalg.inv(Xn.T @ Xn) @ Xn.T @ Yn
b0, b1, b2, b3, b4, b5 = Rpe
coeficientes = str(b0) +','+ str(b1) + ',' + str(b2) + ',' + str(b3) + ',' + str(b4) + ',' + str(b5)
print(f"los coeficientes encontrados son = {coeficientes}")
print(f"El valor de f(0) es :", Rpe[0])
print("Se observa una gran similaridad con el valor de la regresiรณn polinomica, ambos datos se encuentran bien respecto a lo esperado con la grafรญca ")
###Output
Se observa una gran similaridad con el valor de la regresiรณn polinomica, ambos datos se encuentran bien respecto a lo esperado con la grafรญca
###Markdown
(D) Regresiรณn a un modelo teรณricoSuponga que su modelo teรณrico es el siguiente:$$y=\frac{a}{\left[(x-b)^2+c\right]^\gamma}.$$Halle $a$, $b$, $c$ y $\gamma$.Calcule $f(0)$ y compare con los resultados anteriores
###Code
def f(param,x):
return (param[0])/((x-param[1])**2 + param[2])**param[3]
def Lfit(parametros,x,y): # funcion de costo MSE (No es la mejor!)
# L = promedio sobre todos los puntos (f(a,b,c;x)-y)^2
# parametros np.array([a,b,c])
deltaY=f(parametros,x) - y
return np.dot(deltaY,deltaY)/len(y)
xb = df["x"]
opti2 = sp.optimize.minimize(fun=Lfit, x0=np.array([0,0,1,0]), args = (xb,yb), method='L-BFGS-B', tol=1e-8)
print("El valor de a,b,c y omega es respectivamente:",opti2['x'])
print("El valor de f(0) es:", f(opti.x,0))
print("Este metodo es el mรกs impreciso respecto a los demas, se observa una significanete diferencia respecto a los demas metodos en el valor de f(0).")
###Output
Este metodo es el mรกs impreciso respecto a los demas, se observa una significanete diferencia respecto a los demas metodos en el valor de f(0).
|
week2/1_Conditions.ipynb
|
###Markdown
ืชื ืืื ืืจืืืื ืืืืื ืืฉืืืข ืืฉื ื ืฉื ืืงืืจืก!ืื ืื ื ืคืืชืืื ืืช ืืฉืืืข ืื ืืฉื ืืขื ืืื; ืืจืืฉืื ืฉืืืคืฉืจ ืื ื ืืืฆืืจ ืชืืื ืืืช ืืืจืืืืช ืฉืืงืืืืช ืืืืืืช. ืืืืืืื ืืืืืื ืืื ื ืชืืื ืืชืืืืจืช ืื ืืืข ืืืืืืืื ืืืืืื ืืื, ืฉืืืชื ืืืจื ื ืืฉืืืข ืฉืขืืจ:
###Code
print("hello" == "Hello")
print(1 > 2)
print(2 * 3 == 1 or 1 + 1 == 2)
print('a' + 'b' == 'ab' and not 1 + 2 == 3)
###Output
_____no_output_____
###Markdown
ืื ืื, ืืืืืืื ืืืืืื ืืื ืื ืืืืืืื ืฉืชืืฆืืชื ืืื True ืื False.ืืขืืชืื ืงืจืืืืช ืืืืืืื ืืื ืืืืื ืืืคืจืืืจืื ืืืืืื, ืืื and, or ืึพnot. ืืชื ืื ืฉ... Two roads diverged in a yellow wood,And sorry I could not travel both- Robert Frost, The Road Not Taken ื ืงืคืืฅ ืืืื ืืื ืืงืืื. ืื ื ืงืื ืืขื ืืื:
###Code
user_age = int(input("Please enter your age: "))
if user_age < 18:
print("You are not allowed to enter :(")
print("This is the end of the program.")
###Output
_____no_output_____
###Markdown
ืื ืงืืข ืืงืื ืืื ืืขืฉื?ืืช ืืฉืืจื ืืจืืฉืื ื ืื ืื ื ืืืจ ืืืืขืื ืืงืจืื: ืงืืื ื ืืช ืืื ืืืฉืชืืฉ, ืืืจื ื ืืืชื ืืืกืคืจ ืฉืื, ืืขืืฉืื ืืืฉืชื ื user_age ืืฆืืืข ืขืืื.ืืฉืืจื ืืฉื ืืื ืืจืื ืืืจ ืืืืจ: ื ืืืืง ืืื ืืืื ืงืื ืึพ18. ืืื ืื ืื ืึพif ืืื? ืืฉืื ืื ื ืืขืื ืืื ืื ืืืืื ืืคืืืชืื ืืืฆืข ืจืฆืฃ ืฉื ืฉืืจืืช ืจืง ืืชื ืื ืฉืืงืจื ืืฉืื ืืกืืื, ื ืฉืชืืฉ ืืืืืช ืืืคืชื if.ื ืืืง ืืช ืืืื ื ืฉื ืชื ืื ืืคืืืชืื ืึพ2 ืืืงืื: ืืืืืื ืืืืืืื ื, ืฉืืืื ืืืื ืืืจื ืืืืช ืืืคืชื if. ืืคืงืืืืช ืฉืืฉ ืืืฆืข, ืฉืืืืื ืืฉืืจืืช ืฉืืืจืื. ืืืืืืืช ืืชื ืืืืืืืืืืืช ืืืืืช, ืืชื ืื ืืืคืืข ืื, ืืืคืขืืืืช ืฉืืงืจื ืืขืงืืืชืื ืืืคืืขืืช ืื.ืืฉืืข ืืช ืืืืืืงื ืฉื ืืฉืขืื ืืืขืืจืจ ืจืง ืืฉืืฉืขื ืืื 20:00.ืื ืืืืฅ ืืืืืก ืื ืชืงืื, ืืืจื ืื ืืกืขืื ืืช ืืกืืืช ืืืืฆื ืืืืืจ ืืื ืื ืืืื ืก ืืืฆื ืืืจืื.ืื ืืืฉืชืืฉ ืกืืื ืงืืืฅ ืืื ืื ืืืชืจ ืืืืงืฉ ืฉื ืืืฅ ืืื DEL, ืืืง ืืช ืืงืืฆืื ืืืกืืื ืื.ืจืง ืื ืขืืืช ืืงื ืืืช ืฉืื ืืืืื ืืืฆืจืื ืืฉืืื 100 ืฉ"ื ืืคืืืช, ืืคืฉืจ ืืืฉืชืืฉ ืืืฆืข ืืืื ื ืขื ืืฉืืื.ืืกืคืืื, ืืืฆื ืืืฉืชืืฉ ืืกืฃ, ืืืคืก ืงืืื ืืฆื ืืืืืฉืง ืื ืืืฉืชืืฉ ืืืื ืกืืื ืฉืืชืืืง ืึพ50, ืืื ืืืื ืืช ืืงืื ืืกืืื ืื ืืื ืืืกืืื ืืื ืงืืื ืืืฉืืื ืืื ืง ืฉืื. ืืจืืืช ืืชืืื ืืช: ืฆืืืจ ืืืืืื ืืืชืืื ืชื ืืื ืืกืื ืฉื ืชื ืื ืืืืจ ืืืืจืืืช ืืื:if ืืชื ืื ืฉืืื, ืืืืื ืืืืืื ื: ืื ืฉืืชื ืจืืฆืื ืืขืฉืืช (ืืืฃ ืืชื ืื)ื ืกืืืจ:ืืชื ืื ืืชืืื ืืืืืช ืืืคืชื if. ืืื ืคื ืื ืืฉื ืืช.ืืืจื ืืืืช ืืืคืชื if, ืืืื ืืืืื ืืืืืื ื. ืื ืชืืฆืืชื ืชืืื True, ืื ืฉื ืืฆื ืืืืฃ ืืชื ืื ืืชืืฆืข. ืื False, ืคืืืชืื ืชืชืขืื ืืืืจื ืืืืฃ ืืชื ืื.ืืืื ืืืจื ืืืืืื ืืืืืืื ื ืชืืืื ื ื ืงืืืชืืื, ืฉืืกืื ืืช ืฉืืื ื ืืืจืช ืืฉืืื (ืืืืืื ืืืืืืื ื), ืืืชืืื ืืืืง ืืืืฆืืขื โ ืืืฃ ืืชื ืื.ืืคื ื ืฉื ืืงืฉ ืืคืืืชืื ืืืฆืข ืืฉืื ืืขืงืืืช ืงืืื ืืชื ืื ืฉืืื ืึพif, ื ืฆืืจื ืืืืกืืฃ ืืคื ืื ืืืื. ืืืื ืืื ืชืืืื ืฉื ืืฉืืจื ืืืืื, ืืืื ื ืืขืื ืืกืื ืืื ืืืช ืืกืืืืช ืืงืื โ ื ื ืื ืฉืืฉืืจื ืืื ืืชืืืืกืช ืึพif ืฉื ืืฆื ืืคื ืื. ืื ืื ื ืจืืฆืื ืฉืืงืื ืฉื ืืชืื ืืืจื ืืืืื ืืจืืฅ ืจืง ืื ืืชื ืื ืฉืืคื ื ืืืืื ืืชืงืืื. ืืคืืืชืื ืืืื ืืืืืจืช ืืืจืืขื ืจืืืืื, ืืื ืืคืฉืจ ืคืฉืื ืืืืืฅ Tab โน ืืืงืืืช ืืืืืืจืช ืชืืืืฃ ืืื ืืช ืืืื ืืจืืืืื ืืขืฆืื.ืืกืืฃ ืกืืฃ ืืืขื ื ืืืืจื: ืืืจื ืืืืื, ืืชืื ืืช ืื ืฉืืชื ืจืืฆืื ืืขืฉืืช โ ืื ืคืขืืืช ืคืืืชืื ืฉืขืืื ืขื ืืขืชืื ืชืชืืื ืืื.ืชืืืื ืืืืกืืฃ ืืื ืฉืืจืืช ืฉืชืจืฆื ืืืจื ืึพif. ืืงืื ืืฉืืจื ืืจืืฅ ืืืืง ืืืชื ืื ืื ืขืื ืืื ืืืืืช ืื ืืฆืืช ืืฉืืจืืช ืืืจื ืึพif, ืื ืืืจื ืฉืืจืืช ืืืืืืช ืืืจืืช ืืฉืืจืืช ืืชืืช ืึพif. ืืืืืืืช ืงืื ื ืืกืคืืช
###Code
user_fullname = input('Please enter your full name: ')
is_temple_open = False
if 'Cohen' in user_fullname:
print('You may enter the temple!')
is_temple_open = True
print('')
print('Thank you, ' + user_fullname + '.')
print('Is the temple open?: ' + str(is_temple_open))
###Output
You may enter the temple!
Thank you, ItaiCohenBOB.
Is the temple open?: True
###Markdown
ื ืกื ืืืืื ืื ืืงืื ืืืขืื ืืืืจ. ืืจืืฆื ืืืืงื ืฉืขื ืืชื ื ืืื ื ืืคื ื ืฉืชืชืงืืื ืืืื. ืืฉืื! ืคืชืจื ืืคื ื ืฉืชืืฉืืื! ืืืืื ืืืจืช:
###Code
age = int(input('Please enter your age: '))
allowed_to_enter = False
if age >= 18:
allowed_to_enter = True
if age < 18:
print('Please wait until midnight before answering the following question:')
answer_for_birthday_is_today = input('Is it your birthday today? [yes/no]: ')
if age == 17 and answer_for_birthday_is_today == 'yes':
print('Happy birthday! You are 18 now.')
age = age + 1
allowed_to_enter = True
if allowed_to_enter:
print('Welcome!')
if not allowed_to_enter:
print('Sorry... Byebye')
###Output
Please wait until midnight before answering the following question:
Sorry... Byebye
###Markdown
ืื ืืชืจืืฉ ืืงืื ืืื?ืืฉืืจื ืืจืืฉืื ื, ืืืงืฉื ื ืืช ืืืื ืฉื ืืืฉืชืืฉ ืืืจืื ื ืืืฉืชื ื age ืืืฆืืืข ืืืื.ืืฉืืจื ืืฉื ืืื, ืืืงืฉื ื ืฉึพallowed_to_enter ืืืื False. ืื ืงืฆืช ืืจืืฉืข ืืืืื ืืจืืคื, ืืื ืื ืื ื ืืขืืืคืื ืืืจืืจืช ืืืื ืื ืืืื ืืก ืืฃ ืืื ืืืกืืื ืฉืื ื.ืืฉืื ืืื ืืืงื ื ืืื age (ืืื ืืืฉืชืืฉ ืฉืื ื) ืืื ืืคืืืช 18, ืืื ืื ืืืืจื ื ืฉืืืื ืืืื ืก ืืืกืืื. ืขื ืืื ืื ืงื.ืขืืฉืื ื ืชืืจ ืื ืชืืื ืืืชืจืืฉืืช ืื ืืืื ื ืืื ืึพ18.ืื ืื ื ืืืืื ืืืฆืืช, ืืืืงืฉืื ืืืขืช ืื ืืื ืืืืืืช ืฉื ืืืฉืชืืฉ ืืื ืืืื.ืืืื ืื ืื ื ืคืืชืืื ืชื ืื ื ืืกืฃ, ืืชืื ืืชื ืื ืืงืืื. ืื ืืืืจ ืฉืจืง ืื age ืืื ืงืื ืึพ18 ืืืืืงื ืืืื ืชืงืจื (ืืืื ืฉืื ืื ื ืขืืืื ืืชืื ืืชื ืื age < 18, ืจืื ืืช ืืืืื):ืื ืืืฉืชืืฉ ืืื ืืก ืฉืืืื ืืื ืืืืืืช ืฉืื, ืืืืื ืฉืื ืืื ืขื ืขืืฉืื 17, ืื ืขืืฉืื ืืื ืืื ืืช ืื 18 ืืืืื ืืืืื ืก ืืืกืืื ืฉืื ื. ื ืืืื ืืช ืืืื ืฉืื ืื ืจืฉื ืื ืืืืื ืก.ืขืืฉืื ื ืฉืืจ ืจืง ืืืชืืืืก ืืืฉืชื ื allowed_to_enter ืฉืืืืจื ื ืืืขืื, ืืืืืง ืืื ืืืฉืชืืฉ ืืืชืจ ืืืื ืก ืื ืื, ืืืืืคืืก ืืืืขื ืืชืืืื. ืืืคื! ืชืจืืื: ื ืกื ืืขืจืื ืืช ืืงืื ืฉืืืขืื, ืื ืฉืืฉืื ืื ืืื ืืื ืืืืืืช ืฉื ืืืฉืชืืฉ ืจืง ืื ืืื ืื 17. ืืืงื ืฉืืงืื ืขืืืื ืขืืื ืืืฆืืคื. ืืขืืช ื ืคืืฆื ืืื ืืฉืืื ืืช ืื ืงืืืชืืื ืืืจื ืึพif. ืืชื ืืื ืืืืจืื ืืขืฆืืื ืฉืื ืชืขืฉื ืืช ืื, ืืื ืืชื ืชืขืฉื ืืช ืื, ืกืืื ืขืืื. ืคืืืชืื ืืืจืงืช ืืืืขืืช ืฉืืืื ืืขืฆืื ืืช ืืื ืืื ืืืงืืืืืืช ืืฉืื ืงืืจื. ื ืกื ืืืืืืจ ืืืืืจื ืืื ๐ ืชืจืืื ืื ืืกื ืืื ืง, ืฉืื 1 ืฉื ืืืฉืชืืฉ ืฉืื ืืื ืง ืืื wrong, ืืืกืืกืื ืฉืื ืืื ads sports. ืงืืื ืืืืฉืชืืฉ ืฉื ืืฉืชืืฉ ืืกืืกืื, ืืืืคืืกื ืื ืืืืขื ืืคื ืื ืืฆืืื ืืืชืืืจ. ืื ืื ืืขืืชื ืชืงื ื ืืช ืืงืื ืืื ืื ืฉืืจืืฅ ืืืฆืืื.ืืชืื ืืขืฆืืื ืืช ืืชืืงืื ืื ืฉืืืฆืขืชื ืืื ืืืืื ืข ืืืขืืืืช ืืืืืช ืืฆืืื ืืขืชืื!
###Code
a = 3
b = 4
c = 5
if a ** 2 + b ** 2 == c ** 2:
print("This line should run for 3, 4, 5 but not for 4, 5, 6")
print("This line should run anyway")
###Output
This line should run for 3, 4, 5 but not for 4, 5, 6
This line should run anyway
###Markdown
ืชื ืืื ืืจืืืื ืืืืื ืืฉืืืข ืืฉื ื ืฉื ืืงืืจืก!ืื ืื ื ืคืืชืืื ืืช ืืฉืืืข ืื ืืฉื ืืขื ืืื; ืืจืืฉืื ืฉืืืคืฉืจ ืื ื ืืืฆืืจ ืชืืื ืืืช ืืืจืืืืช ืฉืืงืืืืช ืืืืืืช. ืืืืืืื ืืืืืื ืืื ื ืชืืื ืืชืืืืจืช ืื ืืืข ืืืืืืืื ืืืืืื ืืื, ืฉืืืชื ืืืจื ื ืืฉืืืข ืฉืขืืจ:
###Code
print("hello" == "Hello")
print(1 > 2)
print(2 * 3 == 1 or 1 + 1 == 2)
print('a' + 'b' == 'ab' and not 1 + 2 == 3)
###Output
_____no_output_____
###Markdown
ืื ืื, ืืืืืืื ืืืืืื ืืื ืื ืืืืืืื ืฉืชืืฆืืชื ืืื True ืื False.ืืขืืชืื ืงืจืืืืช ืืืืืืื ืืื ืืืืื ืืืคืจืืืจืื ืืืืืื, ืืื and, or ืึพnot. ืืชื ืื ืฉ... Two roads diverged in a yellow wood,And sorry I could not travel both- Robert Frost, The Road Not Taken ื ืงืคืืฅ ืืืื ืืื ืืงืืื. ืื ื ืงืื ืืขื ืืื:
###Code
user_age = int(input("Please enter your age: "))
if user_age < 18:
print("You are not allowed to enter :(")
print("This is the end of the program.")
###Output
_____no_output_____
###Markdown
ืื ืงืืข ืืงืื ืืื ืืขืฉื?ืืช ืืฉืืจื ืืจืืฉืื ื ืื ืื ื ืืืจ ืืืืขืื ืืงืจืื: ืงืืื ื ืืช ืืื ืืืฉืชืืฉ, ืืืจื ื ืืืชื ืืืกืคืจ ืฉืื, ืืขืืฉืื ืืืฉืชื ื user_age ืืฆืืืข ืขืืื.ืืฉืืจื ืืฉื ืืื ืืจืื ืืืจ ืืืืจ: ื ืืืืง ืืื ืืืื ืงืื ืึพ18. ืืื ืื ืื ืึพif ืืื? ืืฉืื ืื ื ืืขืื ืืื ืื ืืืืื ืืคืืืชืื ืืืฆืข ืจืฆืฃ ืฉื ืฉืืจืืช ืจืง ืืชื ืื ืฉืืงืจื ืืฉืื ืืกืืื, ื ืฉืชืืฉ ืืืืืช ืืืคืชื if.ื ืืืง ืืช ืืืื ื ืฉื ืชื ืื ืืคืืืชืื ืึพ2 ืืืงืื:ืืืืืื ืืืืืืื ื, ืฉืืืื ืืืื ืืืจื ืืืืช ืืืคืชื if.ืืคืงืืืืช ืฉืืฉ ืืืฆืข, ืฉืืืืื ืืฉืืจืืช ืฉืืืจืื. ืืืืืืืช ืืชื ืืืืืืืืืืืช ืืืืืช, ืืชื ืื ืืืคืืข ืื, ืืืคืขืืืืช ืฉืืงืจื ืืขืงืืืชืื ืืืคืืขืืช ืื.ืืฉืืข ืืช ืืืืืืงื ืฉื ืืฉืขืื ืืืขืืจืจ ืจืง ืืฉืืฉืขื ืืื 20:00.ืื ืืืืฅ ืืืืืก ืื ืชืงืื, ืืืจื ืื ืืกืขืื ืืช ืืกืืืช ืืืืฆื ืืืืืจ ืืื ืื ืืืื ืก ืืืฆื ืืืจืื.ืื ืืืฉืชืืฉ ืกืืื ืงืืืฅ ืืื ืื ืืืชืจ ืืืืงืฉ ืฉื ืืืฅ ืืื DEL, ืืืง ืืช ืืงืืฆืื ืืืกืืื ืื.ืจืง ืื ืขืืืช ืืงื ืืืช ืฉืื ืืืืื ืืืฆืจืื ืืฉืืื 100 ืฉ"ื ืืคืืืช, ืืคืฉืจ ืืืฉืชืืฉ ืืืฆืข ืืืื ื ืขื ืืฉืืื.ืืกืคืืื, ืืืฆื ืืืฉืชืืฉ ืืกืฃ, ืืืคืก ืงืืื ืืฆื ืืืืืฉืง ืื ืืืฉืชืืฉ ืืืื ืกืืื ืฉืืชืืืง ืึพ50, ืืื ืืืื ืืช ืืงืื ืืกืืื ืื ืืื ืืืกืืื ืืื ืงืืื ืืืฉืืื ืืื ืง ืฉืื. ืืจืืืช ืืชืืื ืืช: ืฆืืืจ ืืืืืื ืืืชืืื ืชื ืืื ืืกืื ืฉื ืชื ืื ืืืืจ ืืืืจืืืช ืืื:if ืืชื ืื ืฉืืื, ืืืืื ืืืืืื ื:ย ย ย ย ืื ืฉืืชื ืจืืฆืื ืืขืฉืืช (ืืืฃ ืืชื ืื)ื ืกืืืจ:ืืชื ืื ืืชืืื ืืืืืช ืืืคืชื if. ืืื ืคื ืื ืืฉื ืืช.ืืืจื ืืืืช ืืืคืชื if, ืืืื ืืืืื ืืืืืื ื. ืื ืชืืฆืืชื ืชืืื True, ืื ืฉื ืืฆื ืืืืฃ ืืชื ืื ืืชืืฆืข. ืื False, ืคืืืชืื ืชืชืขืื ืืืืจื ืืืืฃ ืืชื ืื.ืืืื ืืืจื ืืืืืื ืืืืืืื ื ืชืืืื ื ื ืงืืืชืืื, ืฉืืกืื ืืช ืฉืืื ื ืืืจืช ืืฉืืื (ืืืืืื ืืืืืืื ื), ืืืชืืื ืืืืง ืืืืฆืืขื โ ืืืฃ ืืชื ืื.ืืคื ื ืฉื ืืงืฉ ืืคืืืชืื ืืืฆืข ืืฉืื ืืขืงืืืช ืงืืื ืืชื ืื ืฉืืื ืึพif, ื ืฆืืจื ืืืืกืืฃ ืืคื ืื ืืืื. ืืืื ืืื ืชืืืื ืฉื ืืฉืืจื ืืืืื, ืืืื ื ืืขืื ืืกืื ืืื ืืืช ืืกืืืืช ืืงืื โ ื ื ืื ืฉืืฉืืจื ืืื ืืชืืืืกืช ืึพif ืฉื ืืฆื ืืคื ืื. ืื ืื ื ืจืืฆืื ืฉืืงืื ืฉื ืืชืื ืืืจื ืืืืื ืืจืืฅ ืจืง ืื ืืชื ืื ืฉืืคื ื ืืืืื ืืชืงืืื. ืืคืืืชืื ืืืื ืืืืืจืช ืืืจืืขื ืจืืืืื, ืืื ืืคืฉืจ ืคืฉืื ืืืืืฅ Tab โน ืืืงืืืช ืืืืืืจืช ืชืืืืฃ ืืื ืืช ืืืื ืืจืืืืื ืืขืฆืื.ืืกืืฃ ืกืืฃ ืืืขื ื ืืืืจื: ืืืจื ืืืืื, ืืชืื ืืช ืื ืฉืืชื ืจืืฆืื ืืขืฉืืช โ ืื ืคืขืืืช ืคืืืชืื ืฉืขืืื ืขื ืืขืชืื ืชืชืืื ืืื.ืชืืืื ืืืืกืืฃ ืืื ืฉืืจืืช ืฉืชืจืฆื ืืืจื ืึพif. ืืงืื ืืฉืืจื ืืจืืฅ ืืืืง ืืืชื ืื ืื ืขืื ืืื ืืืืืช ืื ืืฆืืช ืืฉืืจืืช ืืืจื ืึพif, ืื ืืืจื ืฉืืจืืช ืืืืืืช ืืืจืืช ืืฉืืจืืช ืืชืืช ืึพif. ืืืืืืืช ืงืื ื ืืกืคืืช
###Code
user_fullname = input('Please enter your full name: ')
is_temple_open = False
if 'Cohen' in user_fullname:
print('You may enter the temple!')
is_temple_open = True
print('')
print('Thank you, ' + user_fullname + '.')
print('Is the temple open?: ' + str(is_temple_open))
###Output
_____no_output_____
###Markdown
ื ืกื ืืืืื ืื ืืงืื ืืืขืื ืืืืจ. ืืจืืฆื ืืืืงื ืฉืขื ืืชื ื ืืื ื ืืคื ื ืฉืชืชืงืืื ืืืื. ืืฉืื! ืคืชืจื ืืคื ื ืฉืชืืฉืืื! ืืืืื ืืืจืช:
###Code
age = int(input('Please enter your age: '))
allowed_to_enter = False
if age >= 18:
allowed_to_enter = True
if age < 18:
print('Please wait until midnight before answering the following question:')
answer_for_birthday_is_today = input('Is it your birthday today? [yes/no]: ')
if age == 17 and answer_for_birthday_is_today == 'yes':
print('Happy birthday! You are 18 now.')
age = age + 1
allowed_to_enter = True
if allowed_to_enter:
print('Welcome!')
if not allowed_to_enter:
print('Sorry... Byebye')
###Output
_____no_output_____
###Markdown
ืื ืืชืจืืฉ ืืงืื ืืื?ืืฉืืจื ืืจืืฉืื ื, ืืืงืฉื ื ืืช ืืืื ืฉื ืืืฉืชืืฉ ืืืจืื ื ืืืฉืชื ื age ืืืฆืืืข ืืืื.ืืฉืืจื ืืฉื ืืื, ืืืงืฉื ื ืฉึพallowed_to_enter ืืืื False. ืื ืงืฆืช ืืจืืฉืข ืืืืื ืืจืืคื, ืืื ืื ืื ื ืืขืืืคืื ืืืจืืจืช ืืืื ืื ืืืื ืืก ืืฃ ืืื ืืืกืืื ืฉืื ื.ืืฉืื ืืื ืืืงื ื ืืื age (ืืื ืืืฉืชืืฉ ืฉืื ื) ืืื ืืคืืืช 18, ืืื ืื ืืืืจื ื ืฉืืืื ืืืื ืก ืืืกืืื. ืขื ืืื ืื ืงื.ืขืืฉืื ื ืชืืจ ืื ืชืืื ืืืชืจืืฉืืช ืื ืืืื ื ืืื ืึพ18.ืื ืื ื ืืืืื ืืืฆืืช, ืืืืงืฉืื ืืืขืช ืื ืืื ืืืืืืช ืฉื ืืืฉืชืืฉ ืืื ืืืื.ืืืื ืื ืื ื ืคืืชืืื ืชื ืื ื ืืกืฃ, ืืชืื ืืชื ืื ืืงืืื. ืื ืืืืจ ืฉืจืง ืื age ืืื ืงืื ืึพ18 ืืืืืงื ืืืื ืชืงืจื (ืืืื ืฉืื ืื ื ืขืืืื ืืชืื ืืชื ืื age < 18, ืจืื ืืช ืืืืื):ืื ืืืฉืชืืฉ ืืื ืืก ืฉืืืื ืืื ืืืืืืช ืฉืื, ืืืืื ืฉืื ืืื ืขื ืขืืฉืื 17, ืื ืขืืฉืื ืืื ืืื ืืช ืื 18 ืืืืื ืืืืื ืก ืืืกืืื ืฉืื ื. ื ืืืื ืืช ืืืื ืฉืื ืื ืจืฉื ืื ืืืืื ืก.ืขืืฉืื ื ืฉืืจ ืจืง ืืืชืืืืก ืืืฉืชื ื allowed_to_enter ืฉืืืืจื ื ืืืขืื, ืืืืืง ืืื ืืืฉืชืืฉ ืืืชืจ ืืืื ืก ืื ืื, ืืืืืคืืก ืืืืขื ืืชืืืื. ืืืคื! ืชืจืืื: ื ืกื ืืขืจืื ืืช ืืงืื ืฉืืืขืื, ืื ืฉืืฉืื ืื ืืื ืืื ืืืืืืช ืฉื ืืืฉืชืืฉ ืจืง ืื ืืื ืื 17. ืืืงื ืฉืืงืื ืขืืืื ืขืืื ืืืฆืืคื. ืืขืืช ื ืคืืฆื ืืื ืืฉืืื ืืช ืื ืงืืืชืืื ืืืจื ืึพif. ืืชื ืืื ืืืืจืื ืืขืฆืืื ืฉืื ืชืขืฉื ืืช ืื, ืืื ืืชื ืชืขืฉื ืืช ืื, ืกืืื ืขืืื. ืคืืืชืื ืืืจืงืช ืืืืขืืช ืฉืืืื ืืขืฆืื ืืช ืืื ืืื ืืืงืืืืืืช ืืฉืื ืงืืจื. ื ืกื ืืืืืืจ ืืืืืจื ืืื ๐ ืชืจืืื ืื ืืกื ืืื ืง, ืฉืื 1 ืฉื ืืืฉืชืืฉ ืฉืื ืืื ืง ืืื wrong, ืืืกืืกืื ืฉืื ืืื ads sports. ืงืืื ืืืืฉืชืืฉ ืฉื ืืฉืชืืฉ ืืกืืกืื, ืืืืคืืกื ืื ืืืืขื ืืคื ืื ืืฆืืื ืืืชืืืจ. ืื ืื ืืขืืชื ืชืงื ื ืืช ืืงืื ืืื ืื ืฉืืจืืฅ ืืืฆืืื.ืืชืื ืืขืฆืืื ืืช ืืชืืงืื ืื ืฉืืืฆืขืชื ืืื ืืืืื ืข ืืืขืืืืช ืืืืืช ืืฆืืื ืืขืชืื!
###Code
a = 3
b = 4
c = 5
if a ** 2 + b ** 2 == c ** 2
print("This line should run for 3, 4, 5 but not for 4, 5, 6")
print("This line should run anyway")
###Output
_____no_output_____
###Markdown
ืชื ืืื ืืจืืืื ืืืืื ืืฉืืืข ืืฉื ื ืฉื ืืงืืจืก!ืื ืื ื ืคืืชืืื ืืช ืืฉืืืข ืื ืืฉื ืืขื ืืื; ืืจืืฉืื ืฉืืืคืฉืจ ืื ื ืืืฆืืจ ืชืืื ืืืช ืืืจืืืืช ืฉืืงืืืืช ืืืืืืช. ืืืืืืื ืืืืืื ืืื ื ืชืืื ืืชืืืืจืช ืื ืืืข ืืืืืืืื ืืืืืื ืืื, ืฉืืืชื ืืืจื ื ืืฉืืืข ืฉืขืืจ:
###Code
print("hello" == "Hello")
print(1 > 2)
print(2 * 3 == 1 or 1 + 1 == 2)
print('a' + 'b' == 'ab' and not 1 + 2 == 3)
###Output
_____no_output_____
###Markdown
ืื ืื, ืืืืืืื ืืืืืื ืืื ืื ืืืืืืื ืฉืชืืฆืืชื ืืื True ืื False.ืืขืืชืื ืงืจืืืืช ืืืืืืื ืืื ืืืืื ืืืคืจืืืจืื ืืืืืื, ืืื and, or ืึพnot. ืืชื ืื ืฉ... Two roads diverged in a yellow wood,And sorry I could not travel both- Robert Frost, The Road Not Taken ื ืงืคืืฅ ืืืื ืืื ืืงืืื. ืื ื ืงืื ืืขื ืืื:
###Code
user_age = int(input("Please enter your age: "))
if user_age < 18:
print("You are not allowed to enter :(")
print("This is the end of the program.")
###Output
_____no_output_____
###Markdown
ืื ืงืืข ืืงืื ืืื ืืขืฉื?ืืช ืืฉืืจื ืืจืืฉืื ื ืื ืื ื ืืืจ ืืืืขืื ืืงืจืื: ืงืืื ื ืืช ืืื ืืืฉืชืืฉ, ืืืจื ื ืืืชื ืืืกืคืจ ืฉืื, ืืขืืฉืื ืืืฉืชื ื user_age ืืฆืืืข ืขืืื.ืืฉืืจื ืืฉื ืืื ืืจืื ืืืจ ืืืืจ: ื ืืืืง ืืื ืืืื ืงืื ืึพ18. ืืื ืื ืื ืึพif ืืื? ืืฉืื ืื ื ืืขืื ืืื ืื ืืืืื ืืคืืืชืื ืืืฆืข ืจืฆืฃ ืฉื ืฉืืจืืช ืจืง ืืชื ืื ืฉืืงืจื ืืฉืื ืืกืืื, ื ืฉืชืืฉ ืืืืืช ืืืคืชื if.ื ืืืง ืืช ืืืื ื ืฉื ืชื ืื ืืคืืืชืื ืึพ2 ืืืงืื: ืืืืืื ืืืืืืื ื, ืฉืืืื ืืืื ืืืจื ืืืืช ืืืคืชื if. ืืคืงืืืืช ืฉืืฉ ืืืฆืข, ืฉืืืืื ืืฉืืจืืช ืฉืืืจืื. ืืืืืืืช ืืชื ืืืืืืืืืืืช ืืืืืช, ืืชื ืื ืืืคืืข ืื, ืืืคืขืืืืช ืฉืืงืจื ืืขืงืืืชืื ืืืคืืขืืช ืื.ืืฉืืข ืืช ืืืืืืงื ืฉื ืืฉืขืื ืืืขืืจืจ ืจืง ืืฉืืฉืขื ืืื 20:00.ืื ืืืืฅ ืืืืืก ืื ืชืงืื, ืืืจื ืื ืืกืขืื ืืช ืืกืืืช ืืืืฆื ืืืืืจ ืืื ืื ืืืื ืก ืืืฆื ืืืจืื.ืื ืืืฉืชืืฉ ืกืืื ืงืืืฅ ืืื ืื ืืืชืจ ืืืืงืฉ ืฉื ืืืฅ ืืื DEL, ืืืง ืืช ืืงืืฆืื ืืืกืืื ืื.ืจืง ืื ืขืืืช ืืงื ืืืช ืฉืื ืืืืื ืืืฆืจืื ืืฉืืื 100 ืฉ"ื ืืคืืืช, ืืคืฉืจ ืืืฉืชืืฉ ืืืฆืข ืืืื ื ืขื ืืฉืืื.ืืกืคืืื, ืืืฆื ืืืฉืชืืฉ ืืกืฃ, ืืืคืก ืงืืื ืืฆื ืืืืืฉืง ืื ืืืฉืชืืฉ ืืืื ืกืืื ืฉืืชืืืง ืึพ50, ืืื ืืืื ืืช ืืงืื ืืกืืื ืื ืืื ืืืกืืื ืืื ืงืืื ืืืฉืืื ืืื ืง ืฉืื. ืืจืืืช ืืชืืื ืืช: ืฆืืืจ ืืืืืื ืืืชืืื ืชื ืืื ืืกืื ืฉื ืชื ืื ืืืืจ ืืืืจืืืช ืืื:if ืืชื ืื ืฉืืื, ืืืืื ืืืืืื ื: ืื ืฉืืชื ืจืืฆืื ืืขืฉืืช (ืืืฃ ืืชื ืื)ื ืกืืืจ:ืืชื ืื ืืชืืื ืืืืืช ืืืคืชื if. ืืื ืคื ืื ืืฉื ืืช.ืืืจื ืืืืช ืืืคืชื if, ืืืื ืืืืื ืืืืืื ื. ืื ืชืืฆืืชื ืชืืื True, ืื ืฉื ืืฆื ืืืืฃ ืืชื ืื ืืชืืฆืข. ืื False, ืคืืืชืื ืชืชืขืื ืืืืจื ืืืืฃ ืืชื ืื.ืืืื ืืืจื ืืืืืื ืืืืืืื ื ืชืืืื ื ื ืงืืืชืืื, ืฉืืกืื ืืช ืฉืืื ื ืืืจืช ืืฉืืื (ืืืืืื ืืืืืืื ื), ืืืชืืื ืืืืง ืืืืฆืืขื โ ืืืฃ ืืชื ืื.ืืคื ื ืฉื ืืงืฉ ืืคืืืชืื ืืืฆืข ืืฉืื ืืขืงืืืช ืงืืื ืืชื ืื ืฉืืื ืึพif, ื ืฆืืจื ืืืืกืืฃ ืืคื ืื ืืืื. ืืืื ืืื ืชืืืื ืฉื ืืฉืืจื ืืืืื, ืืืื ื ืืขืื ืืกืื ืืื ืืืช ืืกืืืืช ืืงืื โ ื ื ืื ืฉืืฉืืจื ืืื ืืชืืืืกืช ืึพif ืฉื ืืฆื ืืคื ืื. ืื ืื ื ืจืืฆืื ืฉืืงืื ืฉื ืืชืื ืืืจื ืืืืื ืืจืืฅ ืจืง ืื ืืชื ืื ืฉืืคื ื ืืืืื ืืชืงืืื. ืืคืืืชืื ืืืื ืืืืืจืช ืืืจืืขื ืจืืืืื, ืืื ืืคืฉืจ ืคืฉืื ืืืืืฅ Tab โน ืืืงืืืช ืืืืืืจืช ืชืืืืฃ ืืื ืืช ืืืื ืืจืืืืื ืืขืฆืื.ืืกืืฃ ืกืืฃ ืืืขื ื ืืืืจื: ืืืจื ืืืืื, ืืชืื ืืช ืื ืฉืืชื ืจืืฆืื ืืขืฉืืช โ ืื ืคืขืืืช ืคืืืชืื ืฉืขืืื ืขื ืืขืชืื ืชืชืืื ืืื.ืชืืืื ืืืืกืืฃ ืืื ืฉืืจืืช ืฉืชืจืฆื ืืืจื ืึพif. ืืงืื ืืฉืืจื ืืจืืฅ ืืืืง ืืืชื ืื ืื ืขืื ืืื ืืืืืช ืื ืืฆืืช ืืฉืืจืืช ืืืจื ืึพif, ืื ืืืจื ืฉืืจืืช ืืืืืืช ืืืจืืช ืืฉืืจืืช ืืชืืช ืึพif. ืืืืืืืช ืงืื ื ืืกืคืืช
###Code
user_fullname = input('Please enter your full name: ')
is_temple_open = False
if 'Cohen' in user_fullname:
print('You may enter the temple!')
is_temple_open = True
print('')
print('Thank you, ' + user_fullname + '.')
print('Is the temple open?: ' + str(is_temple_open))
###Output
_____no_output_____
###Markdown
ื ืกื ืืืืื ืื ืืงืื ืืืขืื ืืืืจ. ืืจืืฆื ืืืืงื ืฉืขื ืืชื ื ืืื ื ืืคื ื ืฉืชืชืงืืื ืืืื. ืืฉืื! ืคืชืจื ืืคื ื ืฉืชืืฉืืื! ืืืืื ืืืจืช:
###Code
age = int(input('Please enter your age: '))
allowed_to_enter = False
if age >= 18:
allowed_to_enter = True
if age < 18:
print('Please wait until midnight before answering the following question:')
answer_for_birthday_is_today = input('Is it your birthday today? [yes/no]: ')
if age == 17 and answer_for_birthday_is_today == 'yes':
print('Happy birthday! You are 18 now.')
age = age + 1
allowed_to_enter = True
if allowed_to_enter:
print('Welcome!')
if not allowed_to_enter:
print('Sorry... Byebye')
###Output
_____no_output_____
###Markdown
ืื ืืชืจืืฉ ืืงืื ืืื?ืืฉืืจื ืืจืืฉืื ื, ืืืงืฉื ื ืืช ืืืื ืฉื ืืืฉืชืืฉ ืืืจืื ื ืืืฉืชื ื age ืืืฆืืืข ืืืื.ืืฉืืจื ืืฉื ืืื, ืืืงืฉื ื ืฉึพallowed_to_enter ืืืื False. ืื ืงืฆืช ืืจืืฉืข ืืืืื ืืจืืคื, ืืื ืื ืื ื ืืขืืืคืื ืืืจืืจืช ืืืื ืื ืืืื ืืก ืืฃ ืืื ืืืกืืื ืฉืื ื.ืืฉืื ืืื ืืืงื ื ืืื age (ืืื ืืืฉืชืืฉ ืฉืื ื) ืืื ืืคืืืช 18, ืืื ืื ืืืืจื ื ืฉืืืื ืืืื ืก ืืืกืืื. ืขื ืืื ืื ืงื.ืขืืฉืื ื ืชืืจ ืื ืชืืื ืืืชืจืืฉืืช ืื ืืืื ื ืืื ืึพ18.ืื ืื ื ืืืืื ืืืฆืืช, ืืืืงืฉืื ืืืขืช ืื ืืื ืืืืืืช ืฉื ืืืฉืชืืฉ ืืื ืืืื.ืืืื ืื ืื ื ืคืืชืืื ืชื ืื ื ืืกืฃ, ืืชืื ืืชื ืื ืืงืืื. ืื ืืืืจ ืฉืจืง ืื age ืืื ืงืื ืึพ18 ืืืืืงื ืืืื ืชืงืจื (ืืืื ืฉืื ืื ื ืขืืืื ืืชืื ืืชื ืื age < 18, ืจืื ืืช ืืืืื):ืื ืืืฉืชืืฉ ืืื ืืก ืฉืืืื ืืื ืืืืืืช ืฉืื, ืืืืื ืฉืื ืืื ืขื ืขืืฉืื 17, ืื ืขืืฉืื ืืื ืืื ืืช ืื 18 ืืืืื ืืืืื ืก ืืืกืืื ืฉืื ื. ื ืืืื ืืช ืืืื ืฉืื ืื ืจืฉื ืื ืืืืื ืก.ืขืืฉืื ื ืฉืืจ ืจืง ืืืชืืืืก ืืืฉืชื ื allowed_to_enter ืฉืืืืจื ื ืืืขืื, ืืืืืง ืืื ืืืฉืชืืฉ ืืืชืจ ืืืื ืก ืื ืื, ืืืืืคืืก ืืืืขื ืืชืืืื. ืืืคื! ืชืจืืื: ื ืกื ืืขืจืื ืืช ืืงืื ืฉืืืขืื, ืื ืฉืืฉืื ืื ืืื ืืื ืืืืืืช ืฉื ืืืฉืชืืฉ ืจืง ืื ืืื ืื 17. ืืืงื ืฉืืงืื ืขืืืื ืขืืื ืืืฆืืคื. ืืขืืช ื ืคืืฆื ืืื ืืฉืืื ืืช ืื ืงืืืชืืื ืืืจื ืึพif. ืืชื ืืื ืืืืจืื ืืขืฆืืื ืฉืื ืชืขืฉื ืืช ืื, ืืื ืืชื ืชืขืฉื ืืช ืื, ืกืืื ืขืืื. ืคืืืชืื ืืืจืง ืืืืขืืช ืฉืืืื ืืขืฆืื ืืช ืืื ืืื ืืืงืืืืืืช ืืฉืื ืงืืจื. ื ืกื ืืืืืืจ ืืืืืจื ืืื ๐ ืชืจืืื ืื ืืกื ืืื ืง, ืฉืื 1 ืฉื ืืืฉืชืืฉ ืฉืื ืืื ืง ืืื wrong, ืืืกืืกืื ืฉืื ืืื ads sports. ืงืืื ืืืืฉืชืืฉ ืฉื ืืฉืชืืฉ ืืกืืกืื, ืืืืคืืกื ืื ืืืืขื ืืคื ืื ืืฆืืื ืืืชืืืจ. ืื ืื ืืขืืชื ืชืงื ื ืืช ืืงืื ืืื ืื ืฉืืจืืฅ ืืืฆืืื.ืืชืื ืืขืฆืืื ืืช ืืชืืงืื ืื ืฉืืืฆืขืชื ืืื ืืืืื ืข ืืืขืืืืช ืืืืืช ืืฆืืื ืืขืชืื!
###Code
a = 3
b = 4
c = 5
if a ** 2 + b ** 2 == c ** 2
print("This line should run for 3, 4, 5 but not for 4, 5, 6")
print("This line should run anyway")
###Output
_____no_output_____
|
Section-04-Missing-Data-Imputation/04.17-Arbitrary-Value-Imputation-Feature-Engine.ipynb
|
###Markdown
Arbitrary Imputation ==> Feature-Engine What is Feature-EngineFeature-Engine is an open source python package that I created at the back of this course. - Feature-Engine includes all the feature engineering techniques described in the course- Feature-Engine works like to Scikit-learn, so it is easy to learn- Feature-Engine allows you to implement specific engineering steps to specific feature subsets- Feature-Engine can be integrated with the Scikit-learn pipeline allowing for smooth model building- **Feature-Engine allows you to design and store a feature engineering pipeline with bespoke procedures for different variable groups.**-------------------------------------------------------------------Feature-Engine can be installed via pip ==> pip install feature-engine- Make sure you have installed feature-engine before running this notebookFor more information visit:my website In this demoWe will use Feature-Engine to perform mean or median imputation using the Ames House Price Dataset.- To download the dataset visit the lecture **Datasets** in **Section 1** of the course.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# to split the datasets
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
# from feature-engine
from feature_engine.imputation import ArbitraryNumberImputer
# let's load the dataset with a selected group of variables
cols_to_use = [
'BsmtQual', 'FireplaceQu', 'LotFrontage', 'MasVnrArea', 'GarageYrBlt',
'SalePrice'
]
data = pd.read_csv('../houseprice.csv', usecols=cols_to_use)
data.head()
data.isnull().mean()
###Output
_____no_output_____
###Markdown
All the predictor variables contain missing data.
###Code
# let's separate into training and testing set
# first drop the target from the feature list
cols_to_use.remove('SalePrice')
X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use],
data['SalePrice'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
###Output
_____no_output_____
###Markdown
Feature-Engine captures the numerical variables automatically
###Code
# we call the imputer from feature-engine
# we specify the arbitrary value as an argument
imputer = ArbitraryNumberImputer(arbitrary_number = -999)
# we fit the imputer
imputer.fit(X_train)
# we see that the imputer found the numerical variables to
# impute with the arbitrary value
imputer.variables_
# here we can see the arbitrary value stored
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
tmp.head()
# let's check that the numerical variables don't
# contain NA any more
tmp[imputer.variables_].isnull().mean()
###Output
_____no_output_____
###Markdown
Feature-engine allows you to specify variable groups easily
###Code
# let's do it imputation but this time
# and let's do it over 2 of the 3 numerical variables
imputer = ArbitraryNumberImputer(arbitrary_number=-999,
variables=['LotFrontage', 'MasVnrArea'])
imputer.fit(X_train)
# now the imputer uses only the variables we indicated
imputer.variables_
# and we can see the value assigned to each variable
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
# let's check null values are gone
tmp[imputer.variables_].isnull().mean()
###Output
_____no_output_____
###Markdown
We can impute different variables with different numbers
###Code
# let's look at the distributions to determine the
# arbitraty values to use
X_train.hist()
plt.show()
imputer = ArbitraryNumberImputer(
imputer_dict={'LotFrontage': -999, 'MasVnrArea': -999, 'GarageYrBlt': -1})
imputer.fit(X_train)
# now the imputer uses only the variables we indicated
imputer.variables_
imputer.imputer_dict_
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
# let's check null values are gone
tmp[imputer.variables_].isnull().mean()
# let's check the histograms of the variables
# after the imputation
tmp[imputer.variables_].hist()
plt.show()
###Output
_____no_output_____
###Markdown
Arbitrary Imputation ==> Feature-Engine What is Feature-EngineFeature-Engine is an open source python package that I created at the back of this course. - Feature-Engine includes all the feature engineering techniques described in the course- Feature-Engine works like to Scikit-learn, so it is easy to learn- Feature-Engine allows you to implement specific engineering steps to specific feature subsets- Feature-Engine can be integrated with the Scikit-learn pipeline allowing for smooth model building- **Feature-Engine allows you to design and store a feature engineering pipeline with bespoke procedures for different variable groups.**-------------------------------------------------------------------Feature-Engine can be installed via pip ==> pip install feature-engine- Make sure you have installed feature-engine before running this notebookFor more information visit:my website In this demoWe will use Feature-Engine to perform mean or median imputation using the Ames House Price Dataset.- To download the dataset visit the lecture **Datasets** in **Section 1** of the course.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# to split the datasets
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
# from feature-engine
from feature_engine.imputation import ArbitraryNumberImputer
# let's load the dataset with a selected group of variables
cols_to_use = [
'BsmtQual', 'FireplaceQu', 'LotFrontage', 'MasVnrArea', 'GarageYrBlt',
'SalePrice'
]
data = pd.read_csv('../houseprice.csv', usecols=cols_to_use)
data.head()
data.isnull().mean()
###Output
_____no_output_____
###Markdown
All the predictor variables contain missing data.
###Code
# let's separate into training and testing set
# first drop the target from the feature list
cols_to_use.remove('SalePrice')
X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use],
data['SalePrice'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
###Output
_____no_output_____
###Markdown
Feature-Engine captures the numerical variables automatically
###Code
# we call the imputer from feature-engine
# we specify the arbitrary value as an argument
imputer = ArbitraryNumberImputer(arbitrary_number = -999)
# we fit the imputer
imputer.fit(X_train)
# we see that the imputer found the numerical variables to
# impute with the arbitrary value
imputer.variables
# here we can see the arbitrary value stored
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
tmp.head()
# let's check that the numerical variables don't
# contain NA any more
tmp[imputer.variables].isnull().mean()
###Output
_____no_output_____
###Markdown
Feature-engine allows you to specify variable groups easily
###Code
# let's do it imputation but this time
# and let's do it over 2 of the 3 numerical variables
imputer = ArbitraryNumberImputer(arbitrary_number=-999,
variables=['LotFrontage', 'MasVnrArea'])
imputer.fit(X_train)
# now the imputer uses only the variables we indicated
imputer.variables
# and we can see the value assigned to each variable
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
# let's check null values are gone
tmp[imputer.variables].isnull().mean()
###Output
_____no_output_____
###Markdown
We can impute different variables with different numbers
###Code
# let's look at the distributions to determine the
# arbitraty values to use
X_train.hist()
imputer = ArbitraryNumberImputer(
imputer_dict={'LotFrontage': -999, 'MasVnrArea': -999, 'GarageYrBlt': -1})
imputer.fit(X_train)
# now the imputer uses only the variables we indicated
imputer.variables
imputer.imputer_dict_
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
# let's check null values are gone
tmp[imputer.variables].isnull().mean()
# let's check the histograms of the variables
# after the imputation
tmp[imputer.variables].hist()
plt.show()
###Output
_____no_output_____
###Markdown
Arbitrary Imputation ==> Feature-Engine What is Feature-EngineFeature-Engine is an open source python package that I created at the back of this course. - Feature-Engine includes all the feature engineering techniques described in the course- Feature-Engine works like to Scikit-learn, so it is easy to learn- Feature-Engine allows you to implement specific engineering steps to specific feature subsets- Feature-Engine can be integrated with the Scikit-learn pipeline allowing for smooth model building- **Feature-Engine allows you to design and store a feature engineering pipeline with bespoke procedures for different variable groups.**-------------------------------------------------------------------Feature-Engine can be installed via pip ==> pip install feature-engine- Make sure you have installed feature-engine before running this notebookFor more information visit:my website In this demoWe will use Feature-Engine to perform mean or median imputation using the Ames House Price Dataset.- To download the dataset visit the lecture **Datasets** in **Section 1** of the course.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# to split the datasets
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
# from feature-engine
from feature_engine import missing_data_imputers as mdi
# let's load the dataset with a selected group of variables
cols_to_use = [
'BsmtQual', 'FireplaceQu', 'LotFrontage', 'MasVnrArea', 'GarageYrBlt',
'SalePrice'
]
data = pd.read_csv('../houseprice.csv', usecols=cols_to_use)
data.head()
data.isnull().mean()
###Output
_____no_output_____
###Markdown
All the predictor variables contain missing data.
###Code
# let's separate into training and testing set
# first drop the target from the feature list
cols_to_use.remove('SalePrice')
X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use],
data['SalePrice'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
###Output
_____no_output_____
###Markdown
Feature-Engine captures the numerical variables automatically
###Code
# we call the imputer from feature-engine
# we specify the arbitrary value as an argument
imputer = mdi.ArbitraryNumberImputer(arbitrary_number = -999)
# we fit the imputer
imputer.fit(X_train)
# we see that the imputer found the numerical variables to
# impute with the arbitrary value
imputer.variables
# here we can see the arbitrary value stored
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
tmp.head()
# let's check that the numerical variables don't
# contain NA any more
tmp[imputer.variables].isnull().mean()
###Output
_____no_output_____
###Markdown
Feature-engine allows you to specify variable groups easily
###Code
# let's do it imputation but this time
# and let's do it over 2 of the 3 numerical variables
imputer = mdi.ArbitraryNumberImputer(arbitrary_number = -999,
variables=['LotFrontage', 'MasVnrArea'])
imputer.fit(X_train)
# now the imputer uses only the variables we indicated
imputer.variables
# and we can see the value assigned to each variable
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
# let's check null values are gone
tmp[imputer.variables].isnull().mean()
###Output
_____no_output_____
###Markdown
Feature-engine can be used with the Scikit-learn pipeline
###Code
# let's look at the distributions to determine the
# arbitraty values to use
X_train.hist()
pipe = Pipeline([
('imputer_999', mdi.ArbitraryNumberImputer(arbitrary_number = -999,
variables = ['LotFrontage', 'MasVnrArea'])),
('imputer_minus1', mdi.ArbitraryNumberImputer(arbitrary_number = -1,
variables = ['GarageYrBlt'])),
])
pipe.fit(X_train)
pipe.named_steps['imputer_999'].arbitrary_number
pipe.named_steps['imputer_minus1'].arbitrary_number
# let's transform the data with the pipeline
tmp = pipe.transform(X_train)
# let's check null values are gone
tmp.isnull().mean()
###Output
_____no_output_____
###Markdown
Arbitrary Imputation ==> Feature-Engine What is Feature-EngineFeature-Engine is an open source python package that I created at the back of this course. - Feature-Engine includes all the feature engineering techniques described in the course- Feature-Engine works like to Scikit-learn, so it is easy to learn- Feature-Engine allows you to implement specific engineering steps to specific feature subsets- Feature-Engine can be integrated with the Scikit-learn pipeline allowing for smooth model building- **Feature-Engine allows you to design and store a feature engineering pipeline with bespoke procedures for different variable groups.**-------------------------------------------------------------------Feature-Engine can be installed via pip ==> pip install feature-engine- Make sure you have installed feature-engine before running this notebookFor more information visit:my website In this demoWe will use Feature-Engine to perform mean or median imputation using the Ames House Price Dataset.- To download the dataset visit the lecture **Datasets** in **Section 1** of the course.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# to split the datasets
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
# from feature-engine
from feature_engine.imputation import ArbitraryNumberImputer
# let's load the dataset with a selected group of variables
cols_to_use = [
'BsmtQual', 'FireplaceQu', 'LotFrontage', 'MasVnrArea', 'GarageYrBlt',
'SalePrice'
]
data = pd.read_csv('../houseprice.csv', usecols=cols_to_use)
data.head()
data.isnull().mean()
###Output
_____no_output_____
###Markdown
All the predictor variables contain missing data.
###Code
# let's separate into training and testing set
# first drop the target from the feature list
cols_to_use.remove('SalePrice')
X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use],
data['SalePrice'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
###Output
_____no_output_____
###Markdown
Feature-Engine captures the numerical variables automatically
###Code
# we call the imputer from feature-engine
# we specify the arbitrary value as an argument
imputer = ArbitraryNumberImputer(arbitrary_number = -999)
# we fit the imputer
imputer.fit(X_train)
# we see that the imputer found the numerical variables to
# impute with the arbitrary value
imputer.variables
# here we can see the arbitrary value stored
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
tmp.head()
# let's check that the numerical variables don't
# contain NA any more
tmp[imputer.variables].isnull().mean()
###Output
_____no_output_____
###Markdown
Feature-engine allows you to specify variable groups easily
###Code
# let's do it imputation but this time
# and let's do it over 2 of the 3 numerical variables
imputer = ArbitraryNumberImputer(arbitrary_number=-999,
variables=['LotFrontage', 'MasVnrArea'])
imputer.fit(X_train)
# now the imputer uses only the variables we indicated
imputer.variables
# and we can see the value assigned to each variable
imputer.arbitrary_number
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
# let's check null values are gone
tmp[imputer.variables].isnull().mean()
###Output
_____no_output_____
###Markdown
We can impute different variables with different numbers
###Code
# let's look at the distributions to determine the
# arbitraty values to use
X_train.hist()
imputer = ArbitraryNumberImputer(
imputer_dict={'LotFrontage': -999, 'MasVnrArea': -999, 'GarageYrBlt': -1})
imputer.fit(X_train)
# now the imputer uses only the variables we indicated
imputer.variables
imputer.imputer_dict_
# feature-engine returns a dataframe
tmp = imputer.transform(X_train)
# let's check null values are gone
tmp[imputer.variables].isnull().mean()
# let's check the histograms of the variables
# after the imputation
tmp[imputer.variables].hist()
plt.show()
###Output
_____no_output_____
|
docs/user-guide/groupby.ipynb
|
###Markdown
GroupBy"Group by" refers to an implementation of the "split-apply-combine" approach known from [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html) and [xarray](http://xarray.pydata.org/en/stable/groupby.html).Scipp currently supports only a limited number of operations that can be applied. Grouping based on label valuesSuppose we have measured data for a number of parameter values, potentially repeating measurements with the same parameter multiple times:
###Code
import numpy as np
import scipp as sc
np.random.seed(0)
param = sc.Variable(dims=['x'], values=[1,3,1,1,5,3])
values = sc.Variable(dims=['x', 'y'], values=np.random.rand(6,16))
values += 1.0 + param
###Output
_____no_output_____
###Markdown
If we store this data as a data array we obtain the following plot:
###Code
data = sc.DataArray(
values,
coords={
'x': sc.Variable(dims=['x'], values=np.arange(6)),
'y': sc.Variable(dims=['y'], values=np.arange(16))
})
sc.plot(data)
###Output
_____no_output_____
###Markdown
Note that we chose the "measured" values such that the three distinct values of the underlying parameter are visible.We can now use the split-apply-combine mechanism to transform our data into a more useful representation.We start by storing the parameter values (or any value to be used for grouping) as a non-dimension coordinate:
###Code
data.coords['param'] = param
###Output
_____no_output_____
###Markdown
Next, we call `scipp.groupby` to split the data and call `mean` on each of the groups:
###Code
grouped = sc.groupby(data, group='param').mean('x')
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
Apart from `mean`, `groupby` also supports `sum`, `concatenate`, and more. See [GroupByDataArray](../generated/classes/scipp.GroupByDataArray.rst) and [GroupByDataset](../generated/classes/scipp.GroupByDataset.rst) for a full list. Grouping based on binned label valuesGrouping based on non-dimension coordinate values (also known as labels) is most useful when labels are strings or integers.If labels are floating-point values or cover a wide range, it is more convenient to group values into bins, i.e., all values within certain bounds are mapped into the same group.We modify the above example to use a contiuously-valued parameter:
###Code
param = sc.Variable(dims=['x'], values=np.random.rand(16))
values = sc.Variable(dims=['x', 'y'], values=np.random.rand(16,16))
values += 1.0 + 5.0*param
data = sc.DataArray(
values,
coords={
'x': sc.Variable(dims=['x'], values=np.arange(16)),
'y': sc.Variable(dims=['y'], values=np.arange(16))
})
sc.plot(data)
###Output
_____no_output_____
###Markdown
We create a variable defining the desired binning:
###Code
bins = sc.Variable(dims=["z"], values=np.linspace(0.0, 1.0, 10))
###Output
_____no_output_____
###Markdown
As before, we can now use `groupby` and `mean` to transform the data:
###Code
data.coords['param'] = param
grouped = sc.groupby(data, group='param', bins=bins).mean('x')
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
The values in the white rows are `NaN`.This is the result of empty bins, which do not have a meaningful mean value. Alternatively, grouping can be done based on groups defined as Variables rather than strings. This, however, requires bins to be specified, since bins define the new dimension label.
###Code
grouped = sc.groupby(data, group=param, bins=bins).mean('x') # note the lack of quotes around param!
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
Usage examples Filtering a variable using `groupby.copy`Apart from reduction operations discussed above, `groupby` also supports `copy`, which allows us to extract a group without changes.We can use this, e.g., to filter data.This can be used for filtering variables:
###Code
var = sc.array(dims=['x'], values=np.random.rand(100))
select = var < 0.5 * sc.Unit('')
###Output
_____no_output_____
###Markdown
We proceed as follows:1. Create a helper data array with a dummy coord that will be used to group the data elements.2. Call `groupby`, grouping by the `dummy` coord. Here `select` contains two distinct values, `False` and `True`, so `groupby` returns an object with two groups.2. Pass `1` to `copy` to extract the second group (group indices start at 0) which contains all elements where the dummy coord value is `True`.3. Finally, the `data` property returns only the filtered variable without the temporary coords that were required for `groupby`.
###Code
helper = sc.DataArray(var, coords={'dummy':select})
grouped = sc.groupby(helper, group='dummy')
filtered_var = grouped.copy(1).data
filtered_var
###Output
_____no_output_____
###Markdown
Note that we can also avoid the named helpers `helper` and `grouped` and write:
###Code
filtered_var = sc.groupby(sc.DataArray(var, coords={'dummy':select}), group='dummy').copy(1).data
###Output
_____no_output_____
###Markdown
GroupBy"Group by" operations refers to an implementation of the "split-apply-combine" approach known from [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html) and [xarray](http://xarray.pydata.org/en/stable/groupby.html).We currently support only a limited number of operations that can be applied. Grouping with binsNote that this notebook requires [Mantid](https://www.mantidproject.org/Main_Page).A [binder](https://mybinder.org/v2/gh/scipp/scipp-neutron-jupyter-demo/main) is available that can run this notebook.
###Code
import numpy as np
import scipp as sc
import scippneutron as scn
# Load event data. Here, we use `get_path` to find a data file that comes bundled
# with scippneutron. Normally, we would simply pass a file path to `scn.load`.
events = scn.load(scn.data.get_path('PG3_4844_event.nxs'), load_pulse_times=False)
###Output
_____no_output_____
###Markdown
Example 1 (dense data): split-sum-combineWe histogram the event data:
###Code
bins = sc.arange(dim='tof', start=0.0, stop=17000.0, step=50.0, unit='us')
pos_hist = sc.histogram(events, bins=bins)
###Output
_____no_output_____
###Markdown
A plot shows the shortcoming of the data representation.There is no physical meaning attached to the "spectrum" dimension and the plot is hard to interpret:
###Code
sc.plot(pos_hist)
###Output
_____no_output_____
###Markdown
To improve the plot, we first store the scattering angle as labels in the data array.Then we create a variable containing the desired target binning:
###Code
pos_hist.coords['two_theta'] = scn.two_theta(pos_hist)
two_theta = sc.linspace(dim='two_theta', unit='rad', start=0.0, stop=np.pi, num=500)
###Output
_____no_output_____
###Markdown
We use `scipp.groupby` with the desired bins and apply a `sum` over dimension `spectrum`:
###Code
theta_hist = pos_hist.groupby('two_theta', bins=two_theta).sum('spectrum')
###Output
_____no_output_____
###Markdown
The result has `spectrum` replaced by the physically meaningful `two_theta` dimension and the resulting plot is easily interpretable:
###Code
theta_hist.plot()
###Output
_____no_output_____
###Markdown
Example 2 (event data): split-flatten-combineThis is essentially the same as example 1 but avoids histogramming data too early.A plot of the original data is hard to interpret:
###Code
sc.histogram(events, bins=bins).plot()
###Output
_____no_output_____
###Markdown
Again, we improve the plot by first storing the scattering angle as labels in the data array with the events.Then we create a variable containing the desired target binning:
###Code
events.coords['two_theta'] = scn.two_theta(events)
two_theta = sc.linspace(dim='two_theta', unit='rad', start=0.0, stop=np.pi, num=500)
###Output
_____no_output_____
###Markdown
We use `scipp.groupby` with the desired bins and apply a concatenation operation on dimension `spectrum`.This is the event-data equivalent to summing histograms:
###Code
theta_events = events.groupby('two_theta', bins=two_theta).concat('spectrum')
###Output
_____no_output_____
###Markdown
The result has dimension `spectrum` replaced by the physically meaningful `two_theta` and results in the same plot as before with histogrammed data.
###Code
sc.histogram(theta_events, bins=bins).plot()
###Output
_____no_output_____
###Markdown
GroupBy"Group by" operations refers to an implementation of the "split-apply-combine" approach known from [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html) and [xarray](http://xarray.pydata.org/en/stable/groupby.html).We currently support only a limited number of operations that can be applied. Grouping with binsNote that this notebook requires [Mantid](https://www.mantidproject.org/Main_Page).A [binder](https://mybinder.org/v2/gh/scipp/scipp-neutron-jupyter-demo/main) is available that can run this notebook.
###Code
import numpy as np
import scipp as sc
import scippneutron as scn
# Load event data. Here, we use `get_path` to find a data file that comes bundled with scippneutron.
# Normally, we would simply pass a file path to `scn.load`.
events = scn.load(scn.data.get_path('PG3_4844_event.nxs'), load_pulse_times=False)
###Output
_____no_output_____
###Markdown
Example 1 (dense data): split-sum-combineWe histogram the event data:
###Code
bins = sc.Variable(dims=['tof'], values=np.arange(0.0, 17000.0, 50.0), unit=sc.units.us)
pos_hist = sc.histogram(events, bins=bins)
###Output
_____no_output_____
###Markdown
A plot shows the shortcoming of the data representation.There is no physical meaning attached to the "spectrum" dimension and the plot is hard to interpret:
###Code
sc.plot(pos_hist)
###Output
_____no_output_____
###Markdown
To improve the plot, we first store the scattering angle as labels in the data array.Then we create a variable containing the desired target binning:
###Code
pos_hist.coords['two_theta'] = scn.two_theta(pos_hist)
two_theta = sc.Variable(dims=['two_theta'],
unit=sc.units.rad,
values=np.linspace(0.0, np.pi, num=500))
###Output
_____no_output_____
###Markdown
We use `scipp.groupby` with the desired bins and apply a `sum` over dimension `spectrum`:
###Code
theta_hist = sc.groupby(pos_hist, 'two_theta', bins=two_theta).sum('spectrum')
###Output
_____no_output_____
###Markdown
The result has `spectrum` replaced by the physically meaningful `two_theta` dimension and the resulting plot is easily interpretable:
###Code
sc.plot(theta_hist)
###Output
_____no_output_____
###Markdown
Example 2 (event data): split-flatten-combineThis is essentially the same as example 1 but avoids histogramming data too early.A plot of the original data is hard to interpret:
###Code
sc.plot(sc.histogram(events, bins=bins))
###Output
_____no_output_____
###Markdown
Again, we improve the plot by first storing the scattering angle as labels in the data array with the events.Then we create a variable containing the desired target binning:
###Code
events.coords['two_theta'] = scn.two_theta(events)
theta = sc.Variable(dims=['two_theta'],
unit=sc.units.rad,
values=np.linspace(0.0, np.pi, num=500))
###Output
_____no_output_____
###Markdown
We use `scipp.groupby` with the desired bins and apply a `concatenate` operation on dimension `spectrum`.This is the event-data equivalent to summing histograms:
###Code
theta_events = sc.groupby(events, 'two_theta', bins=theta).concatenate('spectrum')
###Output
_____no_output_____
###Markdown
The result has dimension `spectrum` replaced by the physically meaningful `two_theta` and results in the same plot as before with histogrammed data.
###Code
sc.plot(sc.histogram(theta_events, bins=bins))
###Output
_____no_output_____
###Markdown
GroupBy"Group by" refers to an implementation of the "split-apply-combine" approach known from [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html) and [xarray](http://xarray.pydata.org/en/stable/groupby.html).Scipp currently supports only a limited number of operations that can be applied. Grouping based on label valuesSuppose we have measured data for a number of parameter values, potentially repeating measurements with the same parameter multiple times:
###Code
import numpy as np
import scipp as sc
np.random.seed(0)
param = sc.Variable(dims=['x'], values=[1,3,1,1,5,3])
values = sc.Variable(dims=['x', 'y'], values=np.random.rand(6,16))
values += 1.0 + param
###Output
_____no_output_____
###Markdown
If we store this data as a data array we obtain the following plot:
###Code
data = sc.DataArray(
values,
coords={
'x': sc.Variable(dims=['x'], values=np.arange(6)),
'y': sc.Variable(dims=['y'], values=np.arange(16))
})
sc.plot(data)
###Output
_____no_output_____
###Markdown
Note that we chose the "measured" values such that the three distinct values of the underlying parameter are visible.We can now use the split-apply-combine mechanism to transform our data into a more useful representation.We start by storing the parameter values (or any value to be used for grouping) as a non-dimension coordinate:
###Code
data.coords['param'] = param
###Output
_____no_output_____
###Markdown
Next, we call `scipp.groupby` to split the data and call `mean` on each of the groups:
###Code
grouped = sc.groupby(data, group='param').mean('x')
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
Apart from `mean`, `groupby` also supports `sum`, `concat`, and more. See [GroupByDataArray](../generated/classes/scipp.GroupByDataArray.rst) and [GroupByDataset](../generated/classes/scipp.GroupByDataset.rst) for a full list. Grouping based on binned label valuesGrouping based on non-dimension coordinate values (also known as labels) is most useful when labels are strings or integers.If labels are floating-point values or cover a wide range, it is more convenient to group values into bins, i.e., all values within certain bounds are mapped into the same group.We modify the above example to use a contiuously-valued parameter:
###Code
param = sc.Variable(dims=['x'], values=np.random.rand(16))
values = sc.Variable(dims=['x', 'y'], values=np.random.rand(16,16))
values += 1.0 + 5.0*param
data = sc.DataArray(
values,
coords={
'x': sc.Variable(dims=['x'], values=np.arange(16)),
'y': sc.Variable(dims=['y'], values=np.arange(16))
})
sc.plot(data)
###Output
_____no_output_____
###Markdown
We create a variable defining the desired binning:
###Code
bins = sc.Variable(dims=["z"], values=np.linspace(0.0, 1.0, 10))
###Output
_____no_output_____
###Markdown
As before, we can now use `groupby` and `mean` to transform the data:
###Code
data.coords['param'] = param
grouped = sc.groupby(data, group='param', bins=bins).mean('x')
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
The values in the white rows are `NaN`.This is the result of empty bins, which do not have a meaningful mean value. Alternatively, grouping can be done based on groups defined as Variables rather than strings. This, however, requires bins to be specified, since bins define the new dimension label.
###Code
grouped = sc.groupby(data, group=param, bins=bins).mean('x') # note the lack of quotes around param!
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
Usage examples Filtering a variable using `groupby.copy`Apart from reduction operations discussed above, `groupby` also supports `copy`, which allows us to extract a group without changes.We can use this, e.g., to filter data.This can be used for filtering variables:
###Code
var = sc.array(dims=['x'], values=np.random.rand(100))
select = var < 0.5 * sc.Unit('')
###Output
_____no_output_____
###Markdown
We proceed as follows:1. Create a helper data array with a dummy coord that will be used to group the data elements.2. Call `groupby`, grouping by the `dummy` coord. Here `select` contains two distinct values, `False` and `True`, so `groupby` returns an object with two groups.2. Pass `1` to `copy` to extract the second group (group indices start at 0) which contains all elements where the dummy coord value is `True`.3. Finally, the `data` property returns only the filtered variable without the temporary coords that were required for `groupby`.
###Code
helper = sc.DataArray(var, coords={'dummy':select})
grouped = sc.groupby(helper, group='dummy')
filtered_var = grouped.copy(1).data
filtered_var
###Output
_____no_output_____
###Markdown
Note that we can also avoid the named helpers `helper` and `grouped` and write:
###Code
filtered_var = sc.groupby(sc.DataArray(var, coords={'dummy':select}), group='dummy').copy(1).data
###Output
_____no_output_____
###Markdown
GroupBy"Group by" refers to an implementation of the "split-apply-combine" approach known from [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html) and [xarray](http://xarray.pydata.org/en/stable/groupby.html).Scipp currently supports only a limited number of operations that can be applied. Grouping based on label valuesSuppose we have measured data for a number of parameter values, potentially repeating measurements with the same parameter multiple times:
###Code
import numpy as np
import scipp as sc
np.random.seed(0)
param = sc.Variable(['x'], values=[1,3,1,1,5,3])
values = sc.Variable(['x', 'y'], values=np.random.rand(6,16))
values += 1.0 + param
###Output
_____no_output_____
###Markdown
If we store this data as a data array we obtain the following plot:
###Code
data = sc.DataArray(
values,
coords={
'x': sc.Variable(['x'], values=np.arange(6)),
'y': sc.Variable(['y'], values=np.arange(16))
})
sc.plot(data)
###Output
_____no_output_____
###Markdown
Note that we chose the "measured" values such that the three distinct values of the underlying parameter are visible.We can now use the split-apply-combine mechanism to transform our data into a more useful representation.We start by storing the parameter values (or any value to be used for grouping) as a non-dimension coordinate:
###Code
data.coords['param'] = param
###Output
_____no_output_____
###Markdown
Next, we call `scipp.groupby` to split the data and call `mean` on each of the groups:
###Code
grouped = sc.groupby(data, group='param').mean('x')
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
Apart from `mean`, `groupby` also supports `sum`, `concatenate`, and more. See [GroupByDataArray](../generated/scipp.GroupByDataArray.rst) and [GroupByDataset](../generated/scipp.GroupByDataset.rst) for a full list. Grouping based on binned label valuesGrouping based on non-dimension coordinate values (also known as labels) is most useful when labels are strings or integers.If labels are floating-point values or cover a wide range, it is more convenient to group values into bins, i.e., all values within certain bounds are mapped into the same group.We modify the above example to use a contiuously-valued parameter:
###Code
param = sc.Variable(['x'], values=np.random.rand(16))
values = sc.Variable(['x', 'y'], values=np.random.rand(16,16))
values += 1.0 + 5.0*param
data = sc.DataArray(
values,
coords={
'x': sc.Variable(['x'], values=np.arange(16)),
'y': sc.Variable(['y'], values=np.arange(16))
})
sc.plot(data)
###Output
_____no_output_____
###Markdown
We create a variable defining the desired binning:
###Code
bins = sc.Variable(["z"], values=np.linspace(0.0, 1.0, 10))
###Output
_____no_output_____
###Markdown
As before, we can now use `groupby` and `mean` to transform the data:
###Code
data.coords['param'] = param
grouped = sc.groupby(data, group='param', bins=bins).mean('x')
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
The values in the white rows are `NaN`.This is the result of empty bins, which do not have a meaningful mean value. Alternatively, grouping can be done based on groups defined as Variables rather than strings. This, however, requires bins to be specified, since bins define the new dimension label.
###Code
grouped = sc.groupby(data, group=param, bins=bins).mean('x') # note the lack of quotes around param!
sc.plot(grouped)
###Output
_____no_output_____
###Markdown
Usage examples Filtering a variable using `groupby.copy`Apart from reduction operations discussed above, `groupby` also supports `copy`, which allows us to extract a group without changes.We can use this, e.g., to filter data.This can be used for filtering variables:
###Code
var = sc.array(dims=['x'], values=np.random.rand(100))
select = var < 0.5 * sc.Unit('')
###Output
_____no_output_____
###Markdown
We proceed as follows:1. Create a helper data array with a dummy coord that will be used to group the data elements.2. Call `groupby`, grouping by the `dummy` coord. Here `select` contains two distinct values, `False` and `True`, so `groupby` returns an object with two groups.2. Pass `1` to `copy` to extract the second group (group indices start at 0) which contains all elements where the dummy coord value is `True`.3. Finally, the `data` property returns only the filtered variable without the temporary coords that were required for `groupby`.
###Code
helper = sc.DataArray(var, coords={'dummy':select})
grouped = sc.groupby(helper, group='dummy')
filtered_var = grouped.copy(1).data
filtered_var
###Output
_____no_output_____
###Markdown
Note that we can also avoid the named helpers `helper` and `grouped` and write:
###Code
filtered_var = sc.groupby(sc.DataArray(var, coords={'dummy':select}), group='dummy').copy(1).data
###Output
_____no_output_____
|
A2_ReproducibilityWorkflow.ipynb
|
###Markdown
Course Human-Centered Data Science ([HCDS](https://www.mi.fu-berlin.de/en/inf/groups/hcc/teaching/winter_term_2020_21/course_human_centered_data_science.html)) - Winter Term 2020/21 - [HCC](https://www.mi.fu-berlin.de/en/inf/groups/hcc/index.html) | [Freie Universitรคt Berlin](https://www.fu-berlin.de/)*** A2 - Reproducibility Workflow Your assignment is to create a graph that looks a lot like the one below one, starting from scratch, and following best practices for reproducible research. Before you start1. Read all instructions carefully before you begin.1. Read all API documentation carefully before you begin.1. Experiment with queries in the sandbox of the technical documentation for each API to familiarize yourself with the schema and the data.1. Ask questions if you are unsure about anything!1. When documenting your project, please keep the following questions in your mind: * _If I found this GitHub repository, and wanted to fully reproduce the analysis, what information would I want?_ * _What information would I need?_ Step 1๏ธโฃ: Data acquisitionIn order to measure Wikipedia traffic from January 2008 until October 2020, you will need to collect data from two different APIs:1. The **Legacy Pagecounts API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Legacy_Pagecounts), [endpoint](https://wikimedia.org/api/rest_v1/!/Pagecounts_data_(legacy)/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end)) provides access to desktop and mobile traffic data from December 2007 through July 2016.1. The **Pageviews API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Pageviews), [endpoint](https://wikimedia.org/api/rest_v1/!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end)) provides access to desktop, mobile web, and mobile app traffic data from July 2015 through last month.For each API, you need to collect data for all months where data is available and then save the raw results into five (3+2) separate `JSON`files (one file per API query type) before continuing to step 2.To get you started, you can use the following **sample code for API calls**:
###Code
# Source: https://public.paws.wmcloud.org/User:Jtmorgan/data512_a1_example.ipynb?format=raw
import json
import requests
endpoint_legacy = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}'
endpoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
# SAMPLE parameters for getting aggregated legacy view data
# see: https://wikimedia.org/api/rest_v1/#!/Legacy_data/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end
example_params_legacy = {"project" : "en.wikipedia.org",
"access-site" : "desktop-site",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : "2020100100"
}
# SAMPLE parameters for getting aggregated current standard pageview data
# see: https://wikimedia.org/api/rest_v1/#!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end
example_params_pageviews = {"project" : "en.wikipedia.org",
"access" : "desktop",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2020101000'
}
# Customize these with your own information
headers = {
'User-Agent': 'https://github.com/mvrcx',
'From': '[email protected]'
}
def api_call(endpoint,parameters):
call = requests.get(endpoint.format(**parameters), headers=headers)
response = call.json()
return response
example_monthly_pageviews = api_call(endpoint_pageviews, example_params_pageviews)
example_monthly_pageviews
example_monthly_legacy = api_call(endpoint_legacy, example_params_legacy)
example_monthly_legacy
###Output
_____no_output_____
###Markdown
Your `JSON`-formatted source data file must contain the complete and un-edited output of your API queries. The naming convention for the source data files is: `apiname_accesstype_firstmonth-lastmonth.json`. For example, your filename for monthly page views on desktop should be: `pagecounts_desktop-site_200712-202010.json` Important notesโ1. As much as possible, we're interested in *organic* (user) traffic, as opposed to traffic by web crawlers or spiders. The Pageview API (but not the Pagecount API) allows you to filter by `agent=user`. You should do that.1. There is about one year of overlapping traffic data between the two APIs. You need to gather, and later graph, data from both APIs for this period of time. Query, collect, and store data1. Setting Parameters for Pageview desktop/mobilesite/mobileapp and Pagecount desktop/mobile2. Query the data by calling the api with respective parameters3. Creating needed folders to directory4. Saving the collected data to JSON in `raw_data/` directory
###Code
# Setting parameters for pageview desktop
pageviews_desktop_param = {"project" : "en.wikipedia.org",
"access" : "desktop",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
"end" : '2020101000'
}
# Setting parameters for pageview mobile site
pageviews_mobilesite_param = {"project" : "en.wikipedia.org",
"access" : "mobile-web",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
"end" : '2020101000'
}
# Setting parameters for pageview mobile app
pageviews_mobileapp_param = {"project" : "en.wikipedia.org",
"access" : "mobile-app",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
"end" : '2020101000'
}
# Setting parameters for legacy desktop
legacy_desktop_param = {"project" : "en.wikipedia.org",
"access-site" : "desktop-site",
"granularity" : "monthly",
"start" : "2001010100",
"end" : "2020100100"
}
# Setting parameters for legacy mobile
legacy_mobile_param = {"project" : "en.wikipedia.org",
"access-site" : "mobile-site",
"granularity" : "monthly",
"start" : "2001010100",
"end" : "2020100100"
}
# Querying the data
pageviews_monthly_desktop = api_call(endpoint_pageviews, pageviews_desktop_param)
pageviews_monthly_mobilesite = api_call(endpoint_pageviews, pageviews_mobilesite_param)
pageviews_monthly_mobileapp = api_call(endpoint_pageviews, pageviews_mobileapp_param)
legacy_monthly_desktop = api_call(endpoint_legacy, legacy_desktop_param)
legacy_monthly_mobile = api_call(endpoint_legacy, legacy_mobile_param)
###################################################
# I MEAN THIS COULD'VE BEEN DONE MORE EFFICIENTLY #
###################################################
# Creating directories,
# Source: https://stackoverflow.com/questions/11373610/save-matplotlib-file-to-a-directory
def mkdir_p(mypath):
'''Creates a directory. equivalent to using mkdir -p on the command line'''
from errno import EEXIST
from os import makedirs,path
try:
makedirs(mypath)
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and path.isdir(mypath):
pass
else: raise
# Create directory for all raw json files
mkdir_p('raw data')
mkdir_p('raw data/json')
# Saving the queries to files
with open('raw data/json/pagecounts_desktop-site_200101_202010.json', 'w', encoding='utf-8') as file:
json.dump(pageviews_monthly_desktop, file, ensure_ascii=False, indent=4)
with open('raw data/json/pagecounts_mobile-site_200101_202010.json', 'w', encoding='utf-8') as file:
json.dump(pageviews_monthly_mobilesite, file, ensure_ascii=False, indent=4)
with open('raw data/json/pagecounts_mobile-app_200101_202010.json', 'w', encoding='utf-8') as file:
json.dump(pageviews_monthly_mobileapp, file, ensure_ascii=False, indent=4)
with open('raw data/json/legacy_desktop-site_200101_202010.json', 'w', encoding='utf-8') as file:
json.dump(legacy_monthly_desktop, file, ensure_ascii=False, indent=4)
with open('raw data/json/legacy_mobile-site_200101_202010.json', 'w', encoding='utf-8') as file:
json.dump(legacy_monthly_mobile, file, ensure_ascii=False, indent=4)
###Output
_____no_output_____
###Markdown
Step 2: Data processingYou will need to perform a series of processing steps on these data files in order to prepare them for analysis. These steps must be followed exactly in order to prepare the data for analysis. At the end of this step, you will have a single `CSV`-formatted data file `en-wikipedia_traffic_200712-202010.csv` that can be used in your analysis (step 3) with no significant additional processing.* For data collected from the Pageviews API, combine the monthly values for `mobile-app` and `mobile-web` to create a total mobile traffic count for each month.* For all data, separate the value of `timestamp` into four-digit year (`YYYY`) and two-digit month (`MM`) and discard values for day and hour (`DDHH`).Combine all data into a single CSV file with the following headers:| year | month |pagecount_all_views|pagecount_desktop_views|pagecount_mobile_views|pageview_all_views|pageview_desktop_views|pageview_mobile_views||------| ------|-------------------|-----------------------|----------------------|------------------|----------------------|---------------------|| YYYY | MM |num_views |num_views |num_views |num_views |num_views |num_views | Creating corresponding dataframes, merging mobilesite and mobileapp data and doing some reindexing
###Code
import pandas as pd
###################################
# BEGINNING OF FIRST BULLET POINT #
###################################
# Pageviews mobile site views into dataframe
pv_mobilesite_df = pd.DataFrame((list(pageviews_monthly_mobilesite.values())[0]),columns = ['access','timestamp', 'views'])
#pv_mobilesite_df
# Pageviews mobile app views into dataframe
pv_mobileapp_df = pd.DataFrame((list(pageviews_monthly_mobileapp.values())[0]),columns = ['access','timestamp', 'views'])
#pv_mobileapp_df
# Merging the two dataframes of mobile-site and mobile-app access
new = pv_mobilesite_df.merge(pv_mobileapp_df, on='timestamp')
#new
# Swapping columns bc i didnt like it the other way around
columns_titles = ["timestamp","access_x","views_x","access_y","views_y"]
pv_total_mobile_df = new.reindex(columns=columns_titles)
# Adding new column "total mobile" as a sum of views_x and views_y
pv_total_mobile_df['total_mobile'] = pv_total_mobile_df.loc[:,['views_x','views_y']].sum(axis=1)
#pv_total_mobile_df
###################################
# END OF FIRST BULLET POINT #
###################################
###Output
_____no_output_____
###Markdown
Merging, remaining dataframesPretty sure the following could've been done more efficiently ยฏ\\_(ใ)_/ยฏ Strip year and month from timestamp and add those as new columns.Also add `mobilesite + mobileapp` as `totalmobile`. See resulting dataframe below to understand what I mean.
###Code
####################################
# BEGINNING OF SECOND BULLET POINT #
####################################
# Pageviews mobile site views into dataframe
pv_mobilesite_df = pd.DataFrame((list(pageviews_monthly_mobilesite.values())[0]),columns = ['access','timestamp', 'views'])
#pv_mobilesite_df
#Split year (first 4 characters) and month (5th and 6th character)
pv_mobilesite_df['year'] = ""
pv_mobilesite_df['month'] = ""
pv_mobilesite_df['year'] = pv_mobilesite_df.timestamp.str[:4]
pv_mobilesite_df['month'] = pv_mobilesite_df.timestamp.str[4:6]
new_pv_mobilesite_df = pv_mobilesite_df
#new_pv_mobilesite_df
# Swapping columns
columns_titles = ["year", "month", "access","views"]
new_pv_mobilesite_df = new_pv_mobilesite_df.reindex(columns=columns_titles)
#new_pv_mobilesite_df
# Doing basically the same for pageviews mobile app views
pv_mobileapp_df = pd.DataFrame((list(pageviews_monthly_mobileapp.values())[0]), columns = ['access','timestamp','views'])
pv_mobileapp_df['year'] = ""
pv_mobileapp_df['month'] = ""
pv_mobileapp_df['year'] = pv_mobileapp_df.timestamp.str[:4]
pv_mobileapp_df['month'] = pv_mobileapp_df.timestamp.str[4:6]
new_pv_mobileapp_df = pv_mobileapp_df
new_pv_mobileapp_df = new_pv_mobileapp_df.reindex(columns=["year", "month", "access","views"])
#new_pv_mobileapp_df
# Pageviews total mobile
pv_total_mobile_df['year'] = ""
pv_total_mobile_df['month'] = ""
pv_total_mobile_df['year'] = pv_total_mobile_df.timestamp.str[:4]
pv_total_mobile_df['month'] = pv_total_mobile_df.timestamp.str[4:6]
pv_total_mobile_df['access'] = "mobile"
new_pv_totalmobile_df = pv_total_mobile_df
new_pv_totalmobile_df = new_pv_totalmobile_df.reindex(columns=["year", "month", "access", "total_mobile"])
#new_pv_totalmobile_df
# Pageviews desktop views
pv_desktop_df = pd.DataFrame((list(pageviews_monthly_desktop.values())[0]),columns = ['access','timestamp', 'views'])
pv_desktop_df['year'] = ""
pv_desktop_df['month'] = ""
pv_desktop_df['year'] = pv_desktop_df.timestamp.str[:4]
pv_desktop_df['month'] = pv_desktop_df.timestamp.str[4:6]
new_pv_desktop_df = pv_desktop_df
new_pv_desktop_df = new_pv_desktop_df.reindex(columns=["year", "month", "access","views"])
#new_pv_desktop_df
# Legacy mobile views
lg_mobile_df = pd.DataFrame((list(legacy_monthly_mobile.values())[0]), columns = ['access-site', 'timestamp', 'count'])
lg_mobile_df['year'] = lg_mobile_df['month'] = ""
lg_mobile_df['year'] = lg_mobile_df.timestamp.str[:4]
lg_mobile_df['month'] = lg_mobile_df.timestamp.str[4:6]
new_lg_mobile_df = lg_mobile_df
new_lg_mobile_df= new_lg_mobile_df.reindex(columns=["year", "month", "access-site", "count"])
#new_lg_mobile_df
# Legacy Desktop views
lg_desktop_df = pd.DataFrame((list(legacy_monthly_desktop.values())[0]), columns = ['access-site', 'timestamp', 'count'])
lg_desktop_df['year'] = lg_desktop_df['month'] = ""
lg_desktop_df['year'] = lg_desktop_df.timestamp.str[:4]
lg_desktop_df['month'] = lg_desktop_df.timestamp.str[4:6]
new_lg_desktop_df = lg_desktop_df
new_lg_desktop_df= new_lg_desktop_df.reindex(columns=["year", "month", "access-site", "count"])
#new_lg_desktop_df
#new_lg_mobile_df
new_pv_totalmobile_df
#new_pv_desktop_df
###################################
# END OF SECOND BULLET POINT #
###################################
###Output
_____no_output_____
###Markdown
Combining all dataThe goal is to have a dataframe that looks like this:| year | month |pagecount_all_views|pagecount_desktop_views|pagecount_mobile_views|pageview_all_views|pageview_desktop_views|pageview_mobile_views||------| ------|-------------------|-----------------------|----------------------|------------------|----------------------|---------------------|| YYYY | MM |num_views |num_views |num_views |num_views |num_views |num_views | 1. For this, a new dataframe is being initialized containing two columns: `year`, `month`2. Setting the year and month range by copying the dataframe with the maximum amount years. I end up with this| year | month ||------|-------|| 2015 | 07 || .... | .. || 2020 | 09 |3. Start joining dataframes to combine all data into single dataframe using outer joins4. Calculate the `pageview_all_views` as sum of `pageview_desktop_views + pageview_mobile_views`5. Calculate the `pagecount_all_views` as sum of `pagecount_desktop_views + pagecount_mobile_views`6. Creating new directory for clean data7. Saving the dataframe to `clean_data/result.csv` and to `clean_data/result.xlsx` because I like excel
###Code
############################################
# COMBINING ALL DATA INTO SINGLE DATAFRAME #
############################################
# Creating DataFrame with Columns: Year, Month (will be needed for joining later)
result = pd.DataFrame(columns=['year', 'month'])
# Initialize year and month range
result['year'] = new_lg_desktop_df['year']
result['month'] = new_lg_desktop_df['month']
# Merging result table with Pagecount mobile views and renaming column afterwards
result = pd.merge(result, new_lg_mobile_df[['year','month','count']], on=['year','month'], how='outer')
result = result.rename(columns = {'count': 'pagecount_mobile_views'})
# Merging result table wirth pagecount desktop views, renaming and rearrangeing
result = pd.merge(result, new_lg_desktop_df[['year','month','count']], on=['year','month'], how='outer')
result = result.rename(columns = {'count': 'pagecount_desktop_views'})
result = result.reindex(columns=['year','month','pagecount_desktop_views', 'pagecount_mobile_views'])
# Adding pagecount desktop + mobile
sum_pagecount = result["pagecount_desktop_views"] + result["pagecount_mobile_views"]
result["pagecount_all_views"] = sum_pagecount
result = result.reindex(columns=['year','month','pagecount_all_views','pagecount_desktop_views', 'pagecount_mobile_views'])
# Adding column for pageview_all_views
result['pageview_all_views']=""
# Adding pageview_desktop_views
result = pd.merge(result, new_pv_desktop_df[['year', 'month', 'views']], on=['year','month'], how='outer')
result = result.rename(columns = {'views': 'pageview_desktop_views'})
# Adding pageview_mobile_views
result = pd.merge(result, new_pv_totalmobile_df[['year','month','total_mobile']], on=['year','month'],how='outer')
result = result.rename(columns={'total_mobile':'pageview_mobile_views'})
# Summing pageview desktop+mobile
sum_pageview = result['pageview_desktop_views']+result['pageview_mobile_views']
result['pageview_all_views'] = sum_pageview
final_result = result
# Making directory for csv file
mkdir_p('clean data')
# Exporting dataframe to Excel
final_result.to_excel('clean data/result.xlsx', index=False)
# Exporting dataframe to csv
final_result.to_csv('clean data/result.csv', index=False)
#final_result
final_result
###Output
_____no_output_____
###Markdown
Step 3: AnalysisFor this assignment, the "analysis" will be fairly straightforward: you will visualize the dataset you have created as a **time series graph**. Your visualization will track three traffic metrics: mobile traffic, desktop traffic, and all traffic (mobile + desktop). In order to complete the analysis correctly and receive full credit, your graph will need to be the right scale to view the data; all units, axes, and values should be clearly labeled; and the graph should possess a legend and a title. You must also generate a .png or .jpeg formatted image of your final graph.Please graph the data in your notebook, rather than using an external application! Analyzing the given dataOk, so i figured out, that if you have data from pageview_mobile and data from pagecount_mobile it doesnt make sense to sum those up, since they are basically counting the same access type thats why i decided to calculate the average of the two provided data sources and plot it. And thats exactly whats happening here:
###Code
# Extract all needed data: mobiletraffic, desktoptraffic, alltrafic
step3 = pd.DataFrame(columns=['year', 'month'])
step3[['year','month']] = final_result[['year','month']]
# Calculating the mean for months that provide pageview and pagecount data (desktop):
column_desktop = final_result.loc[: , ["pagecount_desktop_views","pageview_desktop_views"]]
step3['desktop traffic'] = column_desktop.mean(axis=1)
#step3
# Calculating the mean for months that provide pageviews and pagecount data (mobile):
column_mobile = final_result.loc[:,["pagecount_mobile_views", "pageview_mobile_views"]]
step3['mobile traffic'] = column_mobile.mean(axis=1)
# Adding mobile mean + desktop mean traffic
step3['all traffic'] = step3.fillna(0)['desktop traffic'] + step3.fillna(0)['mobile traffic']
# Displaying all rows
pd.set_option('display.max_rows', step3.shape[0]+1)
# As a result I get a Dataframe with the following columns:
step3 # <-- Year | Month | desktop traffic | mobile traffic | all traffic
# Exporting result to csv
mkdir_p('result')
step3.to_csv('result/result data.csv', index=False)
###Output
_____no_output_____
###Markdown
Plotting data
###Code
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
df = pd.read_csv("result/result data.csv")
ax = step3.plot(figsize=(20,10), x='year', colormap='tab20c', title='Pageviews on english Wikipedia')
ax.set_xlabel("Year", size=20)
ax.set_ylabel("Views [in Billions]", size=20)
####################################
### Some beauty work from now on ###
####################################
# Scaling y_axis to billions = 10^9
# Source: https://stackoverflow.com/questions/10171618/changing-plot-scale-by-a-factor-in-matplotlib
scale_y = 1e9
ticks_y = ticker.FuncFormatter(lambda y, pos: '{0:g}'.format(y/scale_y))
ax.yaxis.set_major_formatter(ticks_y)
# Limiting y_axis with y_min = 0 and y_max = 12.000.000.000 = 12e9]
# Source: https://stackoverflow.com/questions/3777861/setting-y-axis-limit-in-matplotlib
ax.set_ylim([0,12e9])
# Changing position/size of legend because why tf did it cover my graph
# Source: https://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot
ax.legend(loc=2, prop={'size': 17})
# Making title bigger
ax.set_title(label='Pageviews on english Wikipedia', fontsize=20)
ax = ax.plot()
# Creating directory for img file (if not existent)
mkdir_p('img')
# Saving figure to png
plt.savefig('result/result graph.png', dpi=300)
###Output
_____no_output_____
###Markdown
Human-Centrerd Data Science ([HCDS](https://www.mi.fu-berlin.de/en/inf/groups/hcc/teaching/winter_term_2020_21/course_human_centered_data_science.html)) - Winter Term 2020/21 - [HCC](https://www.mi.fu-berlin.de/en/inf/groups/hcc/index.html) | [Freie Universitรคt Berlin](https://www.fu-berlin.de/)*** A2 - Reproducibility Workflow Step 1๏ธโฃ: Data acquisitionIn order to measure Wikipedia traffic from January 2008 until October 2020, you will need to collect data from two different APIs:1. The **Legacy Pagecounts API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Legacy_Pagecounts), [endpoint](https://wikimedia.org/api/rest_v1/!/Pagecounts_data_(legacy)/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end)) provides access to desktop and mobile traffic data from December 2007 through July 2016.1. The **Pageviews API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Pageviews), [endpoint](https://wikimedia.org/api/rest_v1/!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end)) provides access to desktop, mobile web, and mobile app traffic data from July 2015 through last month.For each API, you will need to collect data for all months where data is available and then save the raw results into five (3+2) separate `JSON`files (one file per API query type) before continuing to step 2. The first step includes to prepare the queries, call the API endpoints and write the JSON files to the folder "data_raw"
###Code
import json
import requests
legacy_desktop = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/en.wikipedia.org/desktop-site/monthly/2007120100/2016070100'
legacy_mobile = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/en.wikipedia.org/mobile-site/monthly/2007120100/2016070100'
pageviews_desktop = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/en.wikipedia.org/desktop/user/monthly/2015070100/2020111400'
pageviews_m_web = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/en.wikipedia.org/mobile-web/user/monthly/2015070100/2020110100'
pageviews_m_app = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/en.wikipedia.org/mobile-app/user/monthly/2015070100/2020110100'
headers = {
'User-Agent': 'https://github.com/Francosinus',
'From': '[email protected]'
}
def api_call(endpoint):
call = requests.get(endpoint, headers=headers)
response = call.json()
return response
def write_to_folder(file,name):
with open(str(name), 'w') as f:
json.dump(file, f)
list_website = [legacy_desktop,legacy_mobile,pageviews_desktop,pageviews_m_web,pageviews_m_app]
###Output
_____no_output_____
###Markdown
Save json files into folder "raw_data"
###Code
path ='data_raw/'
names = ["legacy_desktop-site_200712-201607", "legacy_mobile-site_200712-201607","pagecounts_desktop-site_20150701-20201101","pagecounts_mobile-web_20150701-20201101","pagecounts_mobile-app_20150701-20201101"]
for url,name in zip(list_website,names):
file = api_call(url)
write_to_folder(file,path+name+".json")
###Output
_____no_output_____
###Markdown
Step 2: Data processingYou will need to perform a series of processing steps on these data files in order to prepare them for analysis. These steps must be followed exactly in order to prepare the data for analysis. At the end of this step, you will have a single `CSV`-formatted data file `en-wikipedia_traffic_200712-202010.csv` that can be used in your analysis (step 3) with no significant additional processing.* For data collected from the Pageviews API, combine the monthly values for `mobile-app` and `mobile-web` to create a total mobile traffic count for each month.* For all data, separate the value of `timestamp` into four-digit year (`YYYY`) and two-digit month (`MM`) and discard values for day and hour (`DDHH`).Combine all data into a single CSV file with the following headers:| year | month |pagecount_all_views|pagecount_desktop_views|pagecount_mobile_views|pageview_all_views|pageview_desktop_views|pageview_mobile_views||------| ------|-------------------|-----------------------|----------------------|------------------|----------------------|---------------------|| YYYY | MM |num_views |num_views |num_views |num_views |num_views |num_views | The prepared JSON files are again loaded from the data_raw folder and converted to a pandas dataframe to do further preprocessing steps. For that the files are loaded in loop and afterwards converted to dataframes which are written into a list. All dataframes are combined to a single dataframe.
###Code
import pandas as pd
import numpy as np
dfs = [] # an empty list to store the data frames
for file in names:
f = open(path+file+".json")
d = json.load(f)
data = pd.json_normalize(d, 'items') # read data frame from json file
dfs.append(data) # append the data frame to the list
df = pd.concat(dfs,ignore_index=True)
df
###Output
_____no_output_____
###Markdown
Since the two wikipedia APIs use different naming, we have to rename the columns and combine them afterwards.
###Code
df.columns = df.columns.str.replace('access-site', 'access')
df.columns = df.columns.str.replace('count', 'views')
s=df.stack()
df = s.unstack()
df['access'] = df['access'].str.replace('mobile-web','mobile-new')
df['access'] = df['access'].str.replace('mobile-app','mobile-new')
df
###Output
_____no_output_____
###Markdown
Since the timestamp is not in a valid format, the next step is to convert it to pandas timestamp.
###Code
df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y%m%d%H')
df['month'] = df['timestamp'].dt.month
df['year'] = df['timestamp'].dt.year
df=df.drop(['timestamp'], axis=1)
df
###Output
_____no_output_____
###Markdown
Since we want to know how many pageviews from which device were made, we have to pivot the table. This makes it possible to group by year and month and sum up the number of views for the different access devices (legacy and new).
###Code
tm=pd.pivot_table(df,index=["year","month"],values=["views"],
columns=["access"],aggfunc=[np.sum])
tm
###Output
_____no_output_____
###Markdown
The table has now a multilevel index and multicolumn levels. We drop two column levels and reset the index to have a simple dataframe again.
###Code
tm.columns = tm.columns.droplevel(0)
tm.columns = tm.columns.droplevel(0)
tm.columns.name = None
tm.reset_index(inplace=True)
tm
###Output
_____no_output_____
###Markdown
Now it's time to sum up the total pageviews.
###Code
tm['pagecount_all_views'] = tm.loc[:,['desktop-site','mobile-site']].sum(axis=1)
tm['pageviews_all_views'] = tm.loc[:,['desktop','mobile-new']].sum(axis=1)
tm
tm = tm.rename(columns={'desktop': 'pageviews_desktop_views', 'mobile-new': 'pageviews_mobile_views', 'desktop-site':'pagecount_desktop_views','mobile-site':'pagecount_mobile_views'})
tm
tm.to_csv("data_clean/en-wikipedia_traffic_200712-202010.csv")
###Output
_____no_output_____
###Markdown
Step 3: AnalysisThe data is nicely preprocessed and can now be plotted.
###Code
import matplotlib.pyplot as plt
import seaborn as sns
pl = tm
pl.set_index(pd.to_datetime({
'day':1,
'month': pl.pop('month'),
'year': pl.pop('year')
}), inplace=True)
cols = ["pageviews_all_views","pagecount_all_views"]
pl[cols] = pl[cols].replace({0:np.nan})
fig, ax = plt.subplots(figsize=(20, 10))
desk,=ax.plot(pl.index.values,
pl['pageviews_desktop_views'],
color='blue')
mobile,=ax.plot(pl.index.values,
pl['pageviews_mobile_views'],
color='red')
alle,=ax.plot(pl.index.values,
pl['pageviews_all_views'],
color='black')
ax.plot(pl.index.values,
pl['pagecount_mobile_views'],
color='red',
linestyle="--")
ax.plot(pl.index.values,
pl['pagecount_desktop_views'],
color='blue',
linestyle="--")
ax.plot(pl.index.values,
pl['pagecount_all_views'],
color='black',
linestyle='--')
ax.set(xlabel="Year",
ylabel="Views (Billion)",
title="Page Views on English Wikipedia")
plt.legend([desk,mobile,alle],['Desktop Traffic','Mobile Traffic','All Traffic'])
plt.savefig("wiki.png")
plt.show()
###Output
_____no_output_____
###Markdown
Course Human-Centered Data Science ([HCDS](https://www.mi.fu-berlin.de/en/inf/groups/hcc/teaching/winter_term_2020_21/course_human_centered_data_science.html)) - Winter Term 2020/21 - [HCC](https://www.mi.fu-berlin.de/en/inf/groups/hcc/index.html) | [Freie Universitรคt Berlin](https://www.fu-berlin.de/)*** A2 - Reproducibility Workflow Your assignment is to create a graph that looks a lot like the one below one, starting from scratch, and following best practices for reproducible research. Before you start1. Read all instructions carefully before you begin.1. Read all API documentation carefully before you begin.1. Experiment with queries in the sandbox of the technical documentation for each API to familiarize yourself with the schema and the data.1. Ask questions if you are unsure about anything!1. When documenting your project, please keep the following questions in your mind: * _If I found this GitHub repository, and wanted to fully reproduce the analysis, what information would I want?_ * _What information would I need?_ Step 1๏ธโฃ: Data acquisitionIn order to measure Wikipedia traffic from January 2008 until October 2020, you will need to collect data from two different APIs:1. The **Legacy Pagecounts API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Legacy_Pagecounts), [endpoint](https://wikimedia.org/api/rest_v1/!/Pagecounts_data_(legacy)/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end)) provides access to desktop and mobile traffic data from December 2007 through July 2016.1. The **Pageviews API** ([documentation](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Pageviews), [endpoint](https://wikimedia.org/api/rest_v1/!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end)) provides access to desktop, mobile web, and mobile app traffic data from July 2015 through last month.For each API, you need to collect data for all months where data is available and then save the raw results into five (3+2) separate `JSON`files (one file per API query type) before continuing to step 2.To get you started, you can use the following **sample code for API calls**:
###Code
# Source: https://public.paws.wmcloud.org/User:Jtmorgan/data512_a1_example.ipynb?format=raw
import json
import requests
endpoint_legacy = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}'
endpoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
# SAMPLE parameters for getting aggregated legacy view data
# see: https://wikimedia.org/api/rest_v1/#!/Legacy_data/get_metrics_legacy_pagecounts_aggregate_project_access_site_granularity_start_end
example_params_legacy = {"project" : "en.wikipedia.org",
"access-site" : "desktop-site",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : "2018100100"
}
# SAMPLE parameters for getting aggregated current standard pageview data
# see: https://wikimedia.org/api/rest_v1/#!/Pageviews_data/get_metrics_pageviews_aggregate_project_access_agent_granularity_start_end
example_params_pageviews = {"project" : "en.wikipedia.org",
"access" : "desktop",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2018101000'
}
# Customize these with your own information
headers = {
'User-Agent': 'https://github.com/yuxin16',
'From': '[email protected]'
}
def api_call(endpoint,parameters):
call = requests.get(endpoint.format(**parameters), headers=headers)
response = call.json()
return response
example_monthly_pageviews = api_call(endpoint_pageviews, example_params_pageviews)
example_monthly_pageviews
example_monthly_legacy = api_call(endpoint_legacy, example_params_legacy)
example_monthly_legacy
#Data Collection from API
# Legacy Pagecounts
pagecounts_desktop_param = {"project" : "en.wikipedia.org",
"access-site" : "desktop-site",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : "2020100100"
}
pagecounts_mobile_param = {"project" : "en.wikipedia.org",
"access-site" : "mobile-site",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : "2020100100"
}
# Pageviews
pageviews_desktop_param= {"project" : "en.wikipedia.org",
"access" : "desktop",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2018101000'
}
pageviews_mobileweb_param = {"project" : "en.wikipedia.org",
"access" : "mobile-web",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2018101000'
}
pageviews_mobileapp_param = {"project" : "en.wikipedia.org",
"access" : "mobile-app",
"agent" : "user",
"granularity" : "monthly",
"start" : "2001010100",
# for end use 1st day of month following final month of data
"end" : '2018101000'
}
# Store response into JSON File
pagecounts_desktop = api_call(endpoint_legacy, pagecounts_desktop_param)
pagecounts_mobile = api_call(endpoint_legacy, pagecounts_mobile_param)
pageviews_desktop = api_call(endpoint_pageviews, pageviews_desktop_param)
pageviews_mobileweb = api_call(endpoint_pageviews, pageviews_mobileweb_param)
pageviews_mobileapp = api_call(endpoint_pageviews, pageviews_mobileapp_param)
#pagecounts_desktop
#pagecounts_mobile
#pageviews_desktop
#pageviews_mobileweb
#pageviews_mobileapp
#write into json File
#create a folder
import os
try:
os.mkdir("raw_data")
print('data directory created.')
except:
pass
#create JSON File
"""
Source:
https://docs.python.org/3/library/json.html;
Python Cookbook Chapter 6.2
https://python3-cookbook.readthedocs.io/zh_CN/latest/c06/p02_read-write_json_data.html
https://stackabuse.com/reading-and-writing-json-to-a-file-in-python/
"""
def create_json_file(data,api,accesstype):
startpoint = data["items"][0]["timestamp"][0:6]
endpoint = data["items"][-1]["timestamp"][0:6]
filename = api + "_"+accesstype + "_"+ startpoint + "_" + endpoint + ".json"
with open(filename, "w") as f:
json.dump(data, f)
print(filename + " is written as JSON file.")
create_json_file(pagecounts_desktop,"pagecounts","desktop")
create_json_file(pagecounts_mobile,"pagecounts","mobile")
create_json_file(pageviews_desktop,"pageviews","desktop")
create_json_file(pageviews_mobileweb,"pageviews","mobileweb")
create_json_file(pageviews_mobileapp,"pageviews","mobileapp")
###Output
pagecounts_desktop_200712_201608.json is written as JSON file.
pagecounts_mobile_201410_201608.json is written as JSON file.
pageviews_desktop_201507_201809.json is written as JSON file.
pageviews_mobileweb_201507_201809.json is written as JSON file.
pageviews_mobileapp_201507_201809.json is written as JSON file.
###Markdown
Your `JSON`-formatted source data file must contain the complete and un-edited output of your API queries. The naming convention for the source data files is: `apiname_accesstype_firstmonth-lastmonth.json`. For example, your filename for monthly page views on desktop should be: `pagecounts_desktop-site_200712-202010.json` Important notesโ1. As much as possible, we're interested in *organic* (user) traffic, as opposed to traffic by web crawlers or spiders. The Pageview API (but not the Pagecount API) allows you to filter by `agent=user`. You should do that.1. There is about one year of overlapping traffic data between the two APIs. You need to gather, and later graph, data from both APIs for this period of time. Step 2: Data processingYou will need to perform a series of processing steps on these data files in order to prepare them for analysis. These steps must be followed exactly in order to prepare the data for analysis. At the end of this step, you will have a single `CSV`-formatted data file `en-wikipedia_traffic_200712-202010.csv` that can be used in your analysis (step 3) with no significant additional processing.* For data collected from the Pageviews API, combine the monthly values for `mobile-app` and `mobile-web` to create a total mobile traffic count for each month.* For all data, separate the value of `timestamp` into four-digit year (`YYYY`) and two-digit month (`MM`) and discard values for day and hour (`DDHH`).Combine all data into a single CSV file with the following headers:| year | month |pagecount_all_views|pagecount_desktop_views|pagecount_mobile_views|pageview_all_views|pageview_desktop_views|pageview_mobile_views||------| ------|-------------------|-----------------------|----------------------|------------------|----------------------|---------------------|| YYYY | MM |num_views |num_views |num_views |num_views |num_views |num_views |
###Code
# combine mobile traffic for pageviews API
"""
Source:
https://datatofish.com/load-json-pandas-dataframe/
"""
import pandas as pd
with open('pageviews_mobileweb_201507_201809.json') as f:
pageviews_mobileweb = json.load(f)
df_pageviews_mobileweb = pd.json_normalize(pageviews_mobileweb,["items"])
df_pageviews_mobileweb
with open('pageviews_mobileweb_201507_201809.json') as f:
pageviews_mobileweb = json.load(f)
df_pageviews_mobileweb = pd.json_normalize(pageviews_mobileweb,["items"])
df_pageviews_mobileweb["year"]=df_pageviews_mobileweb["timestamp"].str[0:4]
df_pageviews_mobileweb["month"]=df_pageviews_mobileweb["timestamp"].str[4:6]
df_pageviews_mobileweb = df_pageviews_mobileweb.loc[:, ['year','month','access','views']]
df_pageviews_mobileweb
#pd.json_normalize(r'pageviews_mobileweb_201507_201809.json')
# pageviews_mobileweb = pd.read_json (r'pageviews_mobileweb_201507_201809.json',orient='values')
# pageviews_mobileweb
# for row in pageviews_mobileweb.iterrows():
# print (row[1],row[3],row[4])
with open('pageviews_mobileapp_201507_201809.json') as f:
pageviews_mobileapp = json.load(f)
df_pageviews_mobileapp = pd.json_normalize(pageviews_mobileapp,["items"])
df_pageviews_mobileapp
with open('pageviews_mobileapp_201507_201809.json') as f:
pageviews_mobileapp = json.load(f)
df_pageviews_mobileapp = pd.json_normalize(pageviews_mobileapp,["items"])
df_pageviews_mobileapp["year"]=df_pageviews_mobileapp["timestamp"].str[0:4]
df_pageviews_mobileapp["month"]=df_pageviews_mobileapp["timestamp"].str[4:6]
df_pageviews_mobileapp = df_pageviews_mobileapp.loc[:, ['year','month','access','views']]
df_pageviews_mobileapp
#Combine mobile APP and Mobile Web and convert into monthly view
monthly_pageviews = pd.merge(df_pageviews_mobileapp, df_pageviews_mobileweb, how='outer', on=["year","month"])
monthly_pageviews["pageview_mobile_views"]=monthly_pageviews["views_x"]+monthly_pageviews["views_y"]
monthly_pageviews=monthly_pageviews.loc[:, ['year','month','pageview_mobile_views']]
monthly_pageviews
#create DataFrame for remaining json files
#df_pageviews_desktop
with open('pageviews_desktop_201507_201809.json') as f:
pageviews_desktop = json.load(f)
df_pageviews_desktop = pd.json_normalize(pageviews_desktop,["items"])
df_pageviews_desktop
with open('pageviews_desktop_201507_201809.json') as f:
pageviews_desktop = json.load(f)
df_pageviews_desktop = pd.json_normalize(pageviews_desktop,["items"])
df_pageviews_desktop["year"]=df_pageviews_desktop["timestamp"].str[0:4]
df_pageviews_desktop["month"]=df_pageviews_desktop["timestamp"].str[4:6]
df_pageviews_desktop["pageview_desktop_views"] = df_pageviews_desktop["views"]
df_pageviews_desktop = df_pageviews_desktop.loc[:, ['year','month','pageview_desktop_views']]
df_pageviews_desktop
# df pagecounts_desktop
with open('pagecounts_desktop_200712_201608.json') as f:
pagecounts_desktop = json.load(f)
df_pagecounts_desktop = pd.json_normalize(pagecounts_desktop,["items"])
with open('pagecounts_desktop_200712_201608.json') as f:
pagecounts_desktop = json.load(f)
df_pagecounts_desktop = pd.json_normalize(pagecounts_desktop,["items"])
df_pagecounts_desktop["year"]=df_pagecounts_desktop["timestamp"].str[0:4]
df_pagecounts_desktop["month"]=df_pagecounts_desktop["timestamp"].str[4:6]
df_pagecounts_desktop["pagecount_desktop_views"] = df_pagecounts_desktop["count"]
df_pagecounts_desktop = df_pagecounts_desktop.loc[:, ['year','month','pagecount_desktop_views']]
df_pagecounts_desktop
# pagecounts_mobile_201410_201608.json is written as JSON file.
with open('pagecounts_mobile_201410_201608.json') as f:
pagecounts_mobile = json.load(f)
df_pagecounts_mobile = pd.json_normalize(pagecounts_mobile,["items"])
with open('pagecounts_mobile_201410_201608.json') as f:
pagecounts_mobile = json.load(f)
df_pagecounts_mobile = pd.json_normalize(pagecounts_mobile,["items"])
df_pagecounts_mobile["year"]=df_pagecounts_mobile["timestamp"].str[0:4]
df_pagecounts_mobile["month"]=df_pagecounts_mobile["timestamp"].str[4:6]
df_pagecounts_mobile["pagecount_mobile_views"] = df_pagecounts_mobile["count"]
df_pagecounts_mobile = df_pagecounts_mobile.loc[:, ['year','month','pagecount_mobile_views']]
df_pagecounts_mobile
#Generate summary of API traffic of different access types
summary_pagecount=pd.merge(df_pagecounts_desktop, df_pagecounts_mobile,how='outer', on=["year","month"])
summary_pagecount
summary_pageview=pd.merge(df_pageviews_desktop, monthly_pageviews,how='outer', on=["year","month"])
summary_pageview
summary=pd.merge(summary_pagecount, summary_pageview ,how='outer', on=["year","month"])
summary
summary["pagecount_all_views"]=summary["pagecount_desktop_views"]+summary["pagecount_mobile_views"]
summary["pageview_all_views"]=summary["pageview_desktop_views"]+summary["pageview_mobile_views"]
cols = ["year","month","pagecount_all_views","pagecount_desktop_views","pagecount_mobile_views","pageview_all_views","pageview_desktop_views","pageview_mobile_views"]
#summary=summary[cols]
summary=summary.fillna(0)[cols]
summary
summary.to_csv('en_wikipedia_traffic.csv')
###Output
_____no_output_____
###Markdown
Step 3: AnalysisFor this assignment, the "analysis" will be fairly straightforward: you will visualize the dataset you have created as a **time series graph**. Your visualization will track three traffic metrics: mobile traffic, desktop traffic, and all traffic (mobile + desktop). In order to complete the analysis correctly and receive full credit, your graph will need to be the right scale to view the data; all units, axes, and values should be clearly labeled; and the graph should possess a legend and a title. You must also generate a .png or .jpeg formatted image of your final graph.Please graph the data in your notebook, rather than using an external application!
###Code
summary
#since three metrics should be plotted, therefore, the summary dataframe need to be modified
df_summary=summary
df_summary["mobile traffic"]=(df_summary["pagecount_mobile_views"]+df_summary["pageview_mobile_views"])/1000000000
df_summary["desktop traffic"]=(df_summary["pagecount_desktop_views"]+df_summary["pageview_desktop_views"])/1000000000
df_summary["all traffic"]=df_summary["mobile traffic"]+df_summary["desktop traffic"]
df_summary = df_summary.loc[:,["year","month","desktop traffic","mobile traffic","all traffic"]]
df_summary
import matplotlib.pyplot as plt
plt.figure(figsize=(50,20))
plt.rcParams.update({'font.size': 8})
traffic_visualization = df_summary.plot(x="year",title="Page Views on English Wikipedia", legend = True)
traffic_visualization.set_ylabel('Views [in Billion]',fontdict={'fontsize':8})
traffic_visualization.set_xlabel('years',fontdict={'fontsize':8})
traffic_visualization
plt.savefig('Page Views on English Wikipedia.png', dpi=200);
# plt.figure(figsize=(50,15))
# plt.plot(df_summary['year'], df_summary['desktop traffic'] , color="blue")
# plt.plot(df_summary['year'], df_summary['mobile traffic'] , color="red")
# plt.plot(df_summary['year'], df_summary['all traffic'] , color="black")
# plt.legend(["desktop traffic", "mobile traffic","all traffic"])
# plt.title("Page Views on English Wikipedia")
# plt.show()
###Output
_____no_output_____
|
jupyter/vehlig_macro_corr.ipynb
|
###Markdown
Correlaciones: Vehรญculos ligeros en Mรฉxico, 2005-2019Datos: Registro administrativo de la industria automotriz de vehรญculos ligeros, INEGIFrecuencia: MensualPeriodo: 2005 Enero, 2019 OctubreElaboraciรณn: Subsecretarรญa de Industria, Comercio y Competitividad, Secretarรญa de Economรญa, con datos del INEGI. Pedro Josรฉ Martรญnez AlanisActualizaciรณn: Noviembre 22, 2019
###Code
# library
Packages <- c("tidyverse","lubridate","ggplot2", "dplyr", "seasonal", "ggseas", "ggfortify", "forecast", "mFilter", "plotly", "dynlm", "AER", "MASS", "corrr", "corrplot")
suppressMessages(invisible(lapply(Packages, library, character.only=TRUE)))
#set strings as factors to false
options(stringsAsFactors=FALSE)
#search()
# raw data
rawdata <- read_csv("data/veh_lig.csv", col_types = cols() )
rawdata <- data.frame(mutate(rawdata, time = make_date(year, mes) ))
# variable selection
lmacro <-c("trabajadores" , "masa_salarial" , "ms_wrk" , "tiie" , "tc_fix" , "cetes28" , "inpc" , "pi" , "inpc_auto" , "pi_autos" , "inpc_combusible", "pi_combustible")
lqv <- c("qv" , "qv_mex" , "qv_imp" , "qv_deu" , "qv_bra" , "qv_can" , "qv_jpn" , "qv_gbr" , "qv_usa" , "qv_ind", "qv_kor" , "qv_tha" , "qv_1" , "qv_2" , "qv_7" )
lqp <- c("qp" , "qp_1" , "qp_2" , "qp_7")
lqx <- c("qx" , "qx_deu" , "qx_nafta" , "qx_fca" , "qx_ford" , "qx_gm" , "qx_nsa" , "qx_tyo" , "qx_vw" , "qx_hmc" , "qx_nafta_fca" , "qx_nafta_ford" , "qx_nafta_gm" , "qx_nafta_nsa" , "qx_nafta_tyo" , "qx_nafta_vw" , "qx_nafta_hmc" , "qx_1" , "qx_nafta_1" , "qx_2" , "qx_nafta_2" , "qx_7" , "qx_nafta_7")
# ts data
wrk <- ts(as_tibble(subset(rawdata, select=c("year", "mes",lmacro,lqv,lqp,lqx))), frequency=12, start=c(2005,1), end=c(2019,10))
#head(wrk,3)
# lista de variables: seasonal adjustment
wlist <- lmacro
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_sa") }
lmacro_sa <- wlist
wlist <- lqv
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_sa") }
lqv_sa <- wlist
wlist <- lqp
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_sa") }
lqp_sa <- wlist
wlist <- lqx
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_sa") }
lqx_sa <- wlist
rm(wlist)
# lista de variables: tendencia-ciclo
wlist <- lmacro
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_tr") }
lmacro_tr <- wlist
wlist <- lqv
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_tr") }
lqv_tr <- wlist
wlist <- lqp
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_tr") }
lqp_tr <- wlist
wlist <- lqx
for(i in seq_along(wlist)){ wlist[[i]] <- paste0(wlist[[i]],"_tr") }
lqx_tr <- wlist
rm(wlist)
# for each variable: add to wrk their seasonal adjustment + hp trend
sahp <- function(db, var){
vsa <- seas(db[,var])
#vsa$model$arima
vhp <- hpfilter(vsa$data[,"seasonaladj"])
vsahp <- ts(subset(mutate(as_tibble(vsa$data), hp=vhp$trend),select=c("seasonaladj","trend", "hp")), frequency=12, start=c(2005,1), end=c(2019,10))
db <- mutate(as_tibble(db),
sa = vsahp[,"seasonaladj"],
tr= vsahp[,"trend"],
hp= vsahp[,"hp"]
)
names(db)[names(db)=="sa"] <- paste0(var,"_sa")
names(db)[names(db)=="tr"] <- paste0(var,"_tr")
names(db)[names(db)=="hp"] <- paste0(var,"_hp")
db <- ts(db, , frequency=12, start=c(2005,1), end=c(2019,10))
rm(vsa, vhp, vsahp)
return(db)
}
###Output
_____no_output_____
###Markdown
Base de datos con variables desestacionalizadas, tendencia-ciclo, wlist <- c(lmacro, lqv, lqp, lqx)suppressMessages(for(i in seq_along(wlist)) { print(wlist[[i]]) db <- wrk var <-wlist[[i]] wrk <- sahp(db,var)})dim(wrk)write_csv(as_tibble(wrk), path="output/autos/wrk_macro.csv")
###Code
wrk <- ts(as_tibble(read_csv("output/autos/wrk_macro.csv", col_types = cols() )), frequency=12, start=c(2005,1), end=c(2019,10))
#dim(wrk)
wrk_sa <- ts(subset(as_tibble(wrk), select=c(lmacro_sa,lmacro_tr,lqv_sa,lqv_tr, lqp_sa,lqp_tr, lqx_sa, lqx_tr)), frequency=12, start=c(2005,1), end=c(2019,10))
lwrk_sa <- names(as_tibble(wrk_sa))
#head(wrk_sa,3)
###Output
_____no_output_____
###Markdown
Correlaciones de Variables Macro
###Code
#lmacro_sa
corlist <- lmacro_sa[-c(3,4,7,9,11)]
#corlist
cordata <- subset(as_tibble(wrk_sa), select=corlist)
names(cordata) <- c("trabajadores", "Masa salarial", "Tipo de Cambio", "Tasa de interรฉs", "Inflaciรณn (INPC)", "Inflaciรณn Compra Autos", "Inflaciรณn Combustible Autos")
corrplot(cor(cordata), method="number", type="upper")
round(cor(cordata),2)
rm(cordata)
###Output
_____no_output_____
###Markdown
Correlaciones de las ventas de Vehรญculos Ligeros: autos nacionales e importados segรบn paรญs de origen
###Code
cordata <-subset(as_tibble(wrk_sa), select=c('qv_sa', 'qv_mex_sa', 'qv_imp_sa', "qv_deu_sa" , "qv_bra_sa" , "qv_can_sa" , "qv_jpn_sa" , "qv_gbr_sa" , "qv_usa_sa" , "qv_ind_sa", "qv_kor_sa" , "qv_tha_sa"))
names(cordata) <- c("Ventas totales", "Ventas autos nacional", "ventas autos importados", "autos de Alemania", "autos de Brasil", "autos de Canadรก", "autos de Japรณn", "autos de Reino Unido", "autos de EEUU", "autos de India", "autos de Corea", "autos de Tailandia")
round(cor(cordata),2)
corrplot(cor(cordata), method="number", type="upper")
rm(cordata)
###Output
_____no_output_____
###Markdown
Correlaciones de las ventas de Vehรญculos Ligeros: segรบn tipo de auto
###Code
cordata <-subset(as_tibble(wrk_sa), select=c('qv_sa', 'qv_mex_sa', 'qv_imp_sa', "qv_1_sa" , "qv_2_sa" , "qv_7_sa"))
names(cordata) <- c("Ventas totales", "Ventas autos nacional", "ventas autos importados", "ventas autos compactos", "autos subcompactos", "SUVs")
round(cor(cordata),2)
corrplot(cor(cordata), method="number", type="upper")
rm(cordata)
###Output
_____no_output_____
###Markdown
Correlaciones de la producciรณn de Vehรญculos Ligeros
###Code
cordata <- subset(as_tibble(wrk_sa), select=lqp_sa)
colnames(cordata) <- c("Producciรณn Total", "Autos compactos", "Autos subcompactos", "SUV")
round(cor(cordata),2)
corrplot(cor(cordata), method="number", type="upper")
rm(cordata)
###Output
_____no_output_____
###Markdown
Correlaciones de las exportaciones de Vehรญculos Ligeros
###Code
cordata <- subset(as_tibble(wrk_sa), select=c('qx_sa', 'qx_nafta_sa', 'qx_1_sa', 'qx_2_sa', 'qx_7_sa'))
colnames(cordata) <- c("Exportaciรณn Total de Autos", "Exportaciรณn regiรณn NAFTA/T-MEC", "Exportaciรณn autos compactos", "Exportaciรณn autos subcompactos", "Exportaciรณn SUVs")
round(cor(cordata),2)
corrplot(cor(cordata), method="number", type="upper")
rm(cordata)
###Output
_____no_output_____
|
Mapreduce_Databricks (1).ipynb
|
###Markdown
**READING DATA FILES AND RETURNING RDD**
###Code
rdd = sc.textFile('/FileStore/tables/*.bz2',minPartitions= 4)
rdd.take(4)
###Output
_____no_output_____
###Markdown
**Total Number of Counts**
###Code
rdd.count()
###Output
_____no_output_____
###Markdown
**Removing Header**
###Code
header = rdd.first()
rdd2=rdd.filter(lambda line: line != header)
rdd2.take(2)
###Output
_____no_output_____
###Markdown
**Implementing Mapreduce**
###Code
rdd1= rdd2.map(lambda x: x.split(',')[8]).map(lambda a:(a,1)).sortByKey(ascending=True).reduceByKey(lambda a,b: a+b)
###Output
_____no_output_____
###Markdown
**Sorting**
###Code
rdd2=rdd1.sortByKey()
rdd2.take(2)
###Output
_____no_output_____
###Markdown
**Count of UniqueCarrier**
###Code
rdd2.take(30)
###Output
_____no_output_____
###Markdown
**Reading data in dataframe (Alternative method)**
###Code
DF = spark.read.csv('/FileStore/tables/*.bz2', header="true", inferSchema="true")
###Output
_____no_output_____
###Markdown
**Total counts by UniqueCarrier**
###Code
sorted(DF.groupBy(['UniqueCarrier']).count().collect())
###Output
_____no_output_____
|
Deep Learning Notebooks/5. Sequence Models/Week2/Emojify/Emojify+-+v2.ipynb
|
###Markdown
Emojify! Welcome to the second assignment of Week 2. You are going to use word vector representations to build an Emojifier. Have you ever wanted to make your text messages more expressive? Your emojifier app will help you do that. So rather than writing "Congratulations on the promotion! Lets get coffee and talk. Love you!" the emojifier can automatically turn this into "Congratulations on the promotion! ๐ Lets get coffee and talk. โ๏ธ Love you! โค๏ธ"You will implement a model which inputs a sentence (such as "Let's go see the baseball game tonight!") and finds the most appropriate emoji to be used with this sentence (โพ๏ธ). In many emoji interfaces, you need to remember that โค๏ธ is the "heart" symbol rather than the "love" symbol. But using word vectors, you'll see that even if your training set explicitly relates only a few words to a particular emoji, your algorithm will be able to generalize and associate words in the test set to the same emoji even if those words don't even appear in the training set. This allows you to build an accurate classifier mapping from sentences to emojis, even using a small training set. In this exercise, you'll start with a baseline model (Emojifier-V1) using word embeddings, then build a more sophisticated model (Emojifier-V2) that further incorporates an LSTM. Lets get started! Run the following cell to load the package you are going to use.
###Code
import numpy as np
from emo_utils import *
import emoji
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
1 - Baseline model: Emojifier-V1 1.1 - Dataset EMOJISETLet's start by building a simple baseline classifier. You have a tiny dataset (X, Y) where:- X contains 127 sentences (strings)- Y contains a integer label between 0 and 4 corresponding to an emoji for each sentence **Figure 1**: EMOJISET - a classification problem with 5 classes. A few examples of sentences are given here. Let's load the dataset using the code below. We split the dataset between training (127 examples) and testing (56 examples).
###Code
X_train, Y_train = read_csv('data/train_emoji.csv')
X_test, Y_test = read_csv('data/tesss.csv')
maxLen = len(max(X_train, key=len).split())
###Output
_____no_output_____
###Markdown
Run the following cell to print sentences from X_train and corresponding labels from Y_train. Change `index` to see different examples. Because of the font the iPython notebook uses, the heart emoji may be colored black rather than red.
###Code
index = 12
print(X_train[index], label_to_emoji(Y_train[index]))
###Output
how many points did he score โพ
###Markdown
1.2 - Overview of the Emojifier-V1In this part, you are going to implement a baseline model called "Emojifier-v1". **Figure 2**: Baseline model (Emojifier-V1).The input of the model is a string corresponding to a sentence (e.g. "I love you). In the code, the output will be a probability vector of shape (1,5), that you then pass in an argmax layer to extract the index of the most likely emoji output. To get our labels into a format suitable for training a softmax classifier, lets convert $Y$ from its current shape current shape $(m, 1)$ into a "one-hot representation" $(m, 5)$, where each row is a one-hot vector giving the label of one example, You can do so using this next code snipper. Here, `Y_oh` stands for "Y-one-hot" in the variable names `Y_oh_train` and `Y_oh_test`:
###Code
Y_oh_train = convert_to_one_hot(Y_train, C = 5)
Y_oh_test = convert_to_one_hot(Y_test, C = 5)
###Output
_____no_output_____
###Markdown
Let's see what `convert_to_one_hot()` did. Feel free to change `index` to print out different values.
###Code
index = 50
print(Y_train[index], "is converted into one hot", Y_oh_train[index])
###Output
0 is converted into one hot [ 1. 0. 0. 0. 0.]
###Markdown
All the data is now ready to be fed into the Emojify-V1 model. Let's implement the model! 1.3 - Implementing Emojifier-V1As shown in Figure (2), the first step is to convert an input sentence into the word vector representation, which then get averaged together. Similar to the previous exercise, we will use pretrained 50-dimensional GloVe embeddings. Run the following cell to load the `word_to_vec_map`, which contains all the vector representations.
###Code
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')
###Output
_____no_output_____
###Markdown
You've loaded:- `word_to_index`: dictionary mapping from words to their indices in the vocabulary (400,001 words, with the valid indices ranging from 0 to 400,000)- `index_to_word`: dictionary mapping from indices to their corresponding words in the vocabulary- `word_to_vec_map`: dictionary mapping words to their GloVe vector representation.Run the following cell to check if it works.
###Code
word = "cucumber"
index = 289846
print("the index of", word, "in the vocabulary is", word_to_index[word])
print("the", str(index) + "th word in the vocabulary is", index_to_word[index])
###Output
the index of cucumber in the vocabulary is 113317
the 289846th word in the vocabulary is potatos
###Markdown
**Exercise**: Implement `sentence_to_avg()`. You will need to carry out two steps:1. Convert every sentence to lower-case, then split the sentence into a list of words. `X.lower()` and `X.split()` might be useful. 2. For each word in the sentence, access its GloVe representation. Then, average all these values.
###Code
# GRADED FUNCTION: sentence_to_avg
def sentence_to_avg(sentence, word_to_vec_map):
"""
Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word
and averages its value into a single vector encoding the meaning of the sentence.
Arguments:
sentence -- string, one training example from X
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
Returns:
avg -- average vector encoding information about the sentence, numpy-array of shape (50,)
"""
### START CODE HERE ###
# Step 1: Split sentence into list of lower case words (โ 1 line)
words = (sentence.lower()).split()
# Initialize the average word vector, should have the same shape as your word vectors.
avg = np.zeros((50, ))
# Step 2: average the word vectors. You can loop over the words in the list "words".
for w in words:
avg += word_to_vec_map[w]
avg = avg / len(words)
### END CODE HERE ###
return avg
avg = sentence_to_avg("Morrocan couscous is my favorite dish", word_to_vec_map)
print("avg = ", avg)
###Output
avg = [-0.008005 0.56370833 -0.50427333 0.258865 0.55131103 0.03104983
-0.21013718 0.16893933 -0.09590267 0.141784 -0.15708967 0.18525867
0.6495785 0.38371117 0.21102167 0.11301667 0.02613967 0.26037767
0.05820667 -0.01578167 -0.12078833 -0.02471267 0.4128455 0.5152061
0.38756167 -0.898661 -0.535145 0.33501167 0.68806933 -0.2156265
1.797155 0.10476933 -0.36775333 0.750785 0.10282583 0.348925
-0.27262833 0.66768 -0.10706167 -0.283635 0.59580117 0.28747333
-0.3366635 0.23393817 0.34349183 0.178405 0.1166155 -0.076433
0.1445417 0.09808667]
###Markdown
**Expected Output**: **avg= ** [-0.008005 0.56370833 -0.50427333 0.258865 0.55131103 0.03104983 -0.21013718 0.16893933 -0.09590267 0.141784 -0.15708967 0.18525867 0.6495785 0.38371117 0.21102167 0.11301667 0.02613967 0.26037767 0.05820667 -0.01578167 -0.12078833 -0.02471267 0.4128455 0.5152061 0.38756167 -0.898661 -0.535145 0.33501167 0.68806933 -0.2156265 1.797155 0.10476933 -0.36775333 0.750785 0.10282583 0.348925 -0.27262833 0.66768 -0.10706167 -0.283635 0.59580117 0.28747333 -0.3366635 0.23393817 0.34349183 0.178405 0.1166155 -0.076433 0.1445417 0.09808667] ModelYou now have all the pieces to finish implementing the `model()` function. After using `sentence_to_avg()` you need to pass the average through forward propagation, compute the cost, and then backpropagate to update the softmax's parameters. **Exercise**: Implement the `model()` function described in Figure (2). Assuming here that $Yoh$ ("Y one hot") is the one-hot encoding of the output labels, the equations you need to implement in the forward pass and to compute the cross-entropy cost are:$$ z^{(i)} = W . avg^{(i)} + b$$$$ a^{(i)} = softmax(z^{(i)})$$$$ \mathcal{L}^{(i)} = - \sum_{k = 0}^{n_y - 1} Yoh^{(i)}_k * log(a^{(i)}_k)$$It is possible to come up with a more efficient vectorized implementation. But since we are using a for-loop to convert the sentences one at a time into the avg^{(i)} representation anyway, let's not bother this time. We provided you a function `softmax()`.
###Code
# GRADED FUNCTION: model
def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):
"""
Model to train word vector representations in numpy.
Arguments:
X -- input data, numpy array of sentences as strings, of shape (m, 1)
Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
learning_rate -- learning_rate for the stochastic gradient descent algorithm
num_iterations -- number of iterations
Returns:
pred -- vector of predictions, numpy-array of shape (m, 1)
W -- weight matrix of the softmax layer, of shape (n_y, n_h)
b -- bias of the softmax layer, of shape (n_y,)
"""
np.random.seed(1)
# Define number of training examples
m = Y.shape[0] # number of training examples
n_y = 5 # number of classes
n_h = 50 # dimensions of the GloVe vectors
# Initialize parameters using Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Convert Y to Y_onehot with n_y classes
Y_oh = convert_to_one_hot(Y, C = n_y)
# Optimization loop
for t in range(num_iterations): # Loop over the number of iterations
for i in range(m): # Loop over the training examples
### START CODE HERE ### (โ 4 lines of code)
# Average the word vectors of the words from the i'th training example
avg = sentence_to_avg(X[i], word_to_vec_map)
# Forward propagate the avg through the softmax layer
z = np.dot(W, avg) + b
a = softmax(z)
# Compute cost using the i'th training label's one hot representation and "A" (the output of the softmax)
cost = -(np.sum(Y_oh[i] * np.log(a)))
### END CODE HERE ###
# Compute gradients
dz = a - Y_oh[i]
dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))
db = dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map)
return pred, W, b
print(X_train.shape)
print(Y_train.shape)
print(np.eye(5)[Y_train.reshape(-1)].shape)
print(X_train[0])
print(type(X_train))
Y = np.asarray([5,0,0,5, 4, 4, 4, 6, 6, 4, 1, 1, 5, 6, 6, 3, 6, 3, 4, 4])
print(Y.shape)
X = np.asarray(['I am going to the bar tonight', 'I love you', 'miss you my dear',
'Lets go party and drinks','Congrats on the new job','Congratulations',
'I am so happy for you', 'Why are you feeling bad', 'What is wrong with you',
'You totally deserve this prize', 'Let us go play football',
'Are you down for football this afternoon', 'Work hard play harder',
'It is suprising how people can be dumb sometimes',
'I am very disappointed','It is the best day in my life',
'I think I will end up alone','My life is so boring','Good job',
'Great so awesome'])
print(X.shape)
print(np.eye(5)[Y_train.reshape(-1)].shape)
print(type(X_train))
###Output
(132,)
(132,)
(132, 5)
never talk to me again
<class 'numpy.ndarray'>
(20,)
(20,)
(132, 5)
<class 'numpy.ndarray'>
###Markdown
Run the next cell to train your model and learn the softmax parameters (W,b).
###Code
pred, W, b = model(X_train, Y_train, word_to_vec_map)
# print(pred)
###Output
Epoch: 0 --- cost = 1.95204988128
Accuracy: 0.348484848485
Epoch: 100 --- cost = 0.0797181872601
Accuracy: 0.931818181818
Epoch: 200 --- cost = 0.0445636924368
Accuracy: 0.954545454545
Epoch: 300 --- cost = 0.0343226737879
Accuracy: 0.969696969697
###Markdown
**Expected Output** (on a subset of iterations): **Epoch: 0** cost = 1.95204988128 Accuracy: 0.348484848485 **Epoch: 100** cost = 0.0797181872601 Accuracy: 0.931818181818 **Epoch: 200** cost = 0.0445636924368 Accuracy: 0.954545454545 **Epoch: 300** cost = 0.0343226737879 Accuracy: 0.969696969697 Great! Your model has pretty high accuracy on the training set. Lets now see how it does on the test set. 1.4 - Examining test set performance
###Code
print("Training set:")
pred_train = predict(X_train, Y_train, W, b, word_to_vec_map)
print('Test set:')
pred_test = predict(X_test, Y_test, W, b, word_to_vec_map)
###Output
Training set:
Accuracy: 0.977272727273
Test set:
Accuracy: 0.857142857143
###Markdown
**Expected Output**: **Train set accuracy** 97.7 **Test set accuracy** 85.7 Random guessing would have had 20% accuracy given that there are 5 classes. This is pretty good performance after training on only 127 examples. In the training set, the algorithm saw the sentence "*I love you*" with the label โค๏ธ. You can check however that the word "adore" does not appear in the training set. Nonetheless, lets see what happens if you write "*I adore you*."
###Code
X_my_sentences = np.array(["i adore you", "i love you", "funny lol", "lets play with a ball", "food is ready", "not feeling happy"])
Y_my_labels = np.array([[0], [0], [2], [1], [4],[3]])
pred = predict(X_my_sentences, Y_my_labels , W, b, word_to_vec_map)
print_predictions(X_my_sentences, pred)
###Output
Accuracy: 0.833333333333
i adore you โค๏ธ
i love you โค๏ธ
funny lol ๐
lets play with a ball โพ
food is ready ๐ด
not feeling happy ๐
###Markdown
Amazing! Because *adore* has a similar embedding as *love*, the algorithm has generalized correctly even to a word it has never seen before. Words such as *heart*, *dear*, *beloved* or *adore* have embedding vectors similar to *love*, and so might work too---feel free to modify the inputs above and try out a variety of input sentences. How well does it work?Note though that it doesn't get "not feeling happy" correct. This algorithm ignores word ordering, so is not good at understanding phrases like "not happy." Printing the confusion matrix can also help understand which classes are more difficult for your model. A confusion matrix shows how often an example whose label is one class ("actual" class) is mislabeled by the algorithm with a different class ("predicted" class).
###Code
print(Y_test.shape)
print(' '+ label_to_emoji(0)+ ' ' + label_to_emoji(1) + ' ' + label_to_emoji(2)+ ' ' + label_to_emoji(3)+' ' + label_to_emoji(4))
print(pd.crosstab(Y_test, pred_test.reshape(56,), rownames=['Actual'], colnames=['Predicted'], margins=True))
plot_confusion_matrix(Y_test, pred_test)
###Output
(56,)
โค๏ธ โพ ๐ ๐ ๐ด
Predicted 0.0 1.0 2.0 3.0 4.0 All
Actual
0 6 0 0 1 0 7
1 0 8 0 0 0 8
2 2 0 16 0 0 18
3 1 1 2 12 0 16
4 0 0 1 0 6 7
All 9 9 19 13 6 56
###Markdown
**What you should remember from this part**:- Even with a 127 training examples, you can get a reasonably good model for Emojifying. This is due to the generalization power word vectors gives you. - Emojify-V1 will perform poorly on sentences such as *"This movie is not good and not enjoyable"* because it doesn't understand combinations of words--it just averages all the words' embedding vectors together, without paying attention to the ordering of words. You will build a better algorithm in the next part. 2 - Emojifier-V2: Using LSTMs in Keras: Let's build an LSTM model that takes as input word sequences. This model will be able to take word ordering into account. Emojifier-V2 will continue to use pre-trained word embeddings to represent words, but will feed them into an LSTM, whose job it is to predict the most appropriate emoji. Run the following cell to load the Keras packages.
###Code
import numpy as np
np.random.seed(0)
from keras.models import Model
from keras.layers import Dense, Input, Dropout, LSTM, Activation
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.initializers import glorot_uniform
np.random.seed(1)
###Output
Using TensorFlow backend.
###Markdown
2.1 - Overview of the modelHere is the Emojifier-v2 you will implement: **Figure 3**: Emojifier-V2. A 2-layer LSTM sequence classifier. 2.2 Keras and mini-batching In this exercise, we want to train Keras using mini-batches. However, most deep learning frameworks require that all sequences in the same mini-batch have the same length. This is what allows vectorization to work: If you had a 3-word sentence and a 4-word sentence, then the computations needed for them are different (one takes 3 steps of an LSTM, one takes 4 steps) so it's just not possible to do them both at the same time.The common solution to this is to use padding. Specifically, set a maximum sequence length, and pad all sequences to the same length. For example, of the maximum sequence length is 20, we could pad every sentence with "0"s so that each input sentence is of length 20. Thus, a sentence "i love you" would be represented as $(e_{i}, e_{love}, e_{you}, \vec{0}, \vec{0}, \ldots, \vec{0})$. In this example, any sentences longer than 20 words would have to be truncated. One simple way to choose the maximum sequence length is to just pick the length of the longest sentence in the training set. 2.3 - The Embedding layerIn Keras, the embedding matrix is represented as a "layer", and maps positive integers (indices corresponding to words) into dense vectors of fixed size (the embedding vectors). It can be trained or initialized with a pretrained embedding. In this part, you will learn how to create an [Embedding()](https://keras.io/layers/embeddings/) layer in Keras, initialize it with the GloVe 50-dimensional vectors loaded earlier in the notebook. Because our training set is quite small, we will not update the word embeddings but will instead leave their values fixed. But in the code below, we'll show you how Keras allows you to either train or leave fixed this layer. The `Embedding()` layer takes an integer matrix of size (batch size, max input length) as input. This corresponds to sentences converted into lists of indices (integers), as shown in the figure below. **Figure 4**: Embedding layer. This example shows the propagation of two examples through the embedding layer. Both have been zero-padded to a length of `max_len=5`. The final dimension of the representation is `(2,max_len,50)` because the word embeddings we are using are 50 dimensional. The largest integer (i.e. word index) in the input should be no larger than the vocabulary size. The layer outputs an array of shape (batch size, max input length, dimension of word vectors).The first step is to convert all your training sentences into lists of indices, and then zero-pad all these lists so that their length is the length of the longest sentence. **Exercise**: Implement the function below to convert X (array of sentences as strings) into an array of indices corresponding to words in the sentences. The output shape should be such that it can be given to `Embedding()` (described in Figure 4).
###Code
# GRADED FUNCTION: sentences_to_indices
def sentences_to_indices(X, word_to_index, max_len):
"""
Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences.
The output shape should be such that it can be given to `Embedding()` (described in Figure 4).
Arguments:
X -- array of sentences (strings), of shape (m, 1)
word_to_index -- a dictionary containing the each word mapped to its index
max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this.
Returns:
X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len)
"""
m = X.shape[0] # number of training examples
### START CODE HERE ###
# Initialize X_indices as a numpy matrix of zeros and the correct shape (โ 1 line)
X_indices = np.zeros((m, max_len))
for i in range(m): # loop over training examples
# Convert the ith training sentence in lower case and split is into words. You should get a list of words.
sentence_words = (X[i].lower()).split()
# Initialize j to 0
j = 0
# Loop over the words of sentence_words
for w in sentence_words:
# Set the (i,j)th entry of X_indices to the index of the correct word.
X_indices[i, j] = word_to_index[w]
# Increment j to j + 1
j = j+1
### END CODE HERE ###
return X_indices
###Output
_____no_output_____
###Markdown
Run the following cell to check what `sentences_to_indices()` does, and check your results.
###Code
X1 = np.array(["funny lol", "lets play baseball", "food is ready for you"])
X1_indices = sentences_to_indices(X1,word_to_index, max_len = 5)
print("X1 =", X1)
print("X1_indices =", X1_indices)
###Output
X1 = ['funny lol' 'lets play baseball' 'food is ready for you']
X1_indices = [[ 155345. 225122. 0. 0. 0.]
[ 220930. 286375. 69714. 0. 0.]
[ 151204. 192973. 302254. 151349. 394475.]]
###Markdown
**Expected Output**: **X1 =** ['funny lol' 'lets play football' 'food is ready for you'] **X1_indices =** [[ 155345. 225122. 0. 0. 0.] [ 220930. 286375. 151266. 0. 0.] [ 151204. 192973. 302254. 151349. 394475.]] Let's build the `Embedding()` layer in Keras, using pre-trained word vectors. After this layer is built, you will pass the output of `sentences_to_indices()` to it as an input, and the `Embedding()` layer will return the word embeddings for a sentence. **Exercise**: Implement `pretrained_embedding_layer()`. You will need to carry out the following steps:1. Initialize the embedding matrix as a numpy array of zeroes with the correct shape.2. Fill in the embedding matrix with all the word embeddings extracted from `word_to_vec_map`.3. Define Keras embedding layer. Use [Embedding()](https://keras.io/layers/embeddings/). Be sure to make this layer non-trainable, by setting `trainable = False` when calling `Embedding()`. If you were to set `trainable = True`, then it will allow the optimization algorithm to modify the values of the word embeddings. 4. Set the embedding weights to be equal to the embedding matrix
###Code
# GRADED FUNCTION: pretrained_embedding_layer
def pretrained_embedding_layer(word_to_vec_map, word_to_index):
"""
Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors.
Arguments:
word_to_vec_map -- dictionary mapping words to their GloVe vector representation.
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
embedding_layer -- pretrained layer Keras instance
"""
vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)
emb_dim = word_to_vec_map["cucumber"].shape[0] # define dimensionality of your GloVe word vectors (= 50)
### START CODE HERE ###
# Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)
emb_matrix = np.zeros((vocab_len, emb_dim))
# Set each row "index" of the embedding matrix to be the word vector representation of the "index"th word of the vocabulary
for word, index in word_to_index.items():
emb_matrix[index, :] = word_to_vec_map[word]
# Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False.
embedding_layer = Embedding(vocab_len, emb_dim, trainable=False)
### END CODE HERE ###
# Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the "None".
embedding_layer.build((None,))
# Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.
embedding_layer.set_weights([emb_matrix])
return embedding_layer
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
print("weights[0][1][3] =", embedding_layer.get_weights()[0][1][3])
###Output
weights[0][1][3] = -0.3403
###Markdown
**Expected Output**: **weights[0][1][3] =** -0.3403 2.3 Building the Emojifier-V2Lets now build the Emojifier-V2 model. You will do so using the embedding layer you have built, and feed its output to an LSTM network. **Figure 3**: Emojifier-v2. A 2-layer LSTM sequence classifier. **Exercise:** Implement `Emojify_V2()`, which builds a Keras graph of the architecture shown in Figure 3. The model takes as input an array of sentences of shape (`m`, `max_len`, ) defined by `input_shape`. It should output a softmax probability vector of shape (`m`, `C = 5`). You may need `Input(shape = ..., dtype = '...')`, [LSTM()](https://keras.io/layers/recurrent/lstm), [Dropout()](https://keras.io/layers/core/dropout), [Dense()](https://keras.io/layers/core/dense), and [Activation()](https://keras.io/activations/).
###Code
# GRADED FUNCTION: Emojify_V2
def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
### START CODE HERE ###
# Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).
sentence_indices = Input(shape=input_shape, dtype='int32')
# Create the embedding layer pretrained with GloVe Vectors (โ1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer, you get back the embeddings
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a batch of sequences.
X = LSTM(128, return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a single hidden state, not a batch of sequences.
X = LSTM(128)(X)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.
X = Dense(5, activation='softmax')(X)
# Add a softmax activation
X = Activation('softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(sentence_indices, X)
### END CODE HERE ###
return model
###Output
_____no_output_____
###Markdown
Run the following cell to create your model and check its summary. Because all sentences in the dataset are less than 10 words, we chose `max_len = 10`. You should see your architecture, it uses "20,223,927" parameters, of which 20,000,050 (the word embeddings) are non-trainable, and the remaining 223,877 are. Because our vocabulary size has 400,001 words (with valid indices from 0 to 400,000) there are 400,001\*50 = 20,000,050 non-trainable parameters.
###Code
model = Emojify_V2((maxLen,), word_to_vec_map, word_to_index)
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 10) 0
_________________________________________________________________
embedding_2 (Embedding) (None, 10, 50) 20000050
_________________________________________________________________
lstm_1 (LSTM) (None, 10, 128) 91648
_________________________________________________________________
dropout_1 (Dropout) (None, 10, 128) 0
_________________________________________________________________
lstm_2 (LSTM) (None, 128) 131584
_________________________________________________________________
dropout_2 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 5) 645
_________________________________________________________________
activation_1 (Activation) (None, 5) 0
=================================================================
Total params: 20,223,927
Trainable params: 223,877
Non-trainable params: 20,000,050
_________________________________________________________________
###Markdown
As usual, after creating your model in Keras, you need to compile it and define what loss, optimizer and metrics your are want to use. Compile your model using `categorical_crossentropy` loss, `adam` optimizer and `['accuracy']` metrics:
###Code
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
It's time to train your model. Your Emojifier-V2 `model` takes as input an array of shape (`m`, `max_len`) and outputs probability vectors of shape (`m`, `number of classes`). We thus have to convert X_train (array of sentences as strings) to X_train_indices (array of sentences as list of word indices), and Y_train (labels as indices) to Y_train_oh (labels as one-hot vectors).
###Code
X_train_indices = sentences_to_indices(X_train, word_to_index, maxLen)
Y_train_oh = convert_to_one_hot(Y_train, C = 5)
###Output
_____no_output_____
###Markdown
Fit the Keras model on `X_train_indices` and `Y_train_oh`. We will use `epochs = 50` and `batch_size = 32`.
###Code
model.fit(X_train_indices, Y_train_oh, epochs = 50, batch_size = 32, shuffle=True)
###Output
Epoch 1/50
132/132 [==============================] - 0s - loss: 1.6086 - acc: 0.1667
Epoch 2/50
132/132 [==============================] - 0s - loss: 1.5873 - acc: 0.3333
Epoch 3/50
132/132 [==============================] - 0s - loss: 1.5727 - acc: 0.2652
Epoch 4/50
132/132 [==============================] - ETA: 0s - loss: 1.5586 - acc: 0.343 - 0s - loss: 1.5542 - acc: 0.3485
Epoch 5/50
132/132 [==============================] - 0s - loss: 1.5418 - acc: 0.3106
Epoch 6/50
132/132 [==============================] - 0s - loss: 1.5202 - acc: 0.3636
Epoch 7/50
132/132 [==============================] - 0s - loss: 1.5290 - acc: 0.3258
Epoch 8/50
132/132 [==============================] - 0s - loss: 1.4652 - acc: 0.4545
Epoch 9/50
132/132 [==============================] - 0s - loss: 1.4336 - acc: 0.4924
Epoch 10/50
132/132 [==============================] - 0s - loss: 1.3730 - acc: 0.6212
Epoch 11/50
132/132 [==============================] - 0s - loss: 1.3450 - acc: 0.6061
Epoch 12/50
132/132 [==============================] - 0s - loss: 1.2794 - acc: 0.6894
Epoch 13/50
132/132 [==============================] - 0s - loss: 1.2718 - acc: 0.6364
Epoch 14/50
132/132 [==============================] - 0s - loss: 1.2631 - acc: 0.6667
Epoch 15/50
132/132 [==============================] - 0s - loss: 1.2087 - acc: 0.6970
Epoch 16/50
132/132 [==============================] - 0s - loss: 1.2345 - acc: 0.7197
Epoch 17/50
132/132 [==============================] - 0s - loss: 1.2316 - acc: 0.7121
Epoch 18/50
132/132 [==============================] - 0s - loss: 1.1441 - acc: 0.7879
Epoch 19/50
132/132 [==============================] - 0s - loss: 1.1234 - acc: 0.7955
Epoch 20/50
132/132 [==============================] - 0s - loss: 1.1098 - acc: 0.7879
Epoch 21/50
132/132 [==============================] - 0s - loss: 1.0762 - acc: 0.8409
Epoch 22/50
132/132 [==============================] - 0s - loss: 1.1472 - acc: 0.7652
Epoch 23/50
132/132 [==============================] - 0s - loss: 1.1541 - acc: 0.7424
Epoch 24/50
132/132 [==============================] - 0s - loss: 1.1350 - acc: 0.7727
Epoch 25/50
132/132 [==============================] - 0s - loss: 1.0922 - acc: 0.8182
Epoch 26/50
132/132 [==============================] - 0s - loss: 1.0399 - acc: 0.8712
Epoch 27/50
132/132 [==============================] - 0s - loss: 1.0526 - acc: 0.8409
Epoch 28/50
132/132 [==============================] - 0s - loss: 1.1477 - acc: 0.7652
Epoch 29/50
132/132 [==============================] - 0s - loss: 1.2821 - acc: 0.6288
Epoch 30/50
132/132 [==============================] - 0s - loss: 1.2171 - acc: 0.6970
Epoch 31/50
132/132 [==============================] - 0s - loss: 1.2006 - acc: 0.6818
Epoch 32/50
132/132 [==============================] - 0s - loss: 1.1316 - acc: 0.7803
Epoch 33/50
132/132 [==============================] - 0s - loss: 1.0251 - acc: 0.9015
Epoch 34/50
132/132 [==============================] - 0s - loss: 1.1396 - acc: 0.7576
Epoch 35/50
132/132 [==============================] - 0s - loss: 1.0882 - acc: 0.8333
Epoch 36/50
132/132 [==============================] - 0s - loss: 1.0717 - acc: 0.8333
Epoch 37/50
132/132 [==============================] - 0s - loss: 1.1267 - acc: 0.7803
Epoch 38/50
132/132 [==============================] - 0s - loss: 1.0981 - acc: 0.8030
Epoch 39/50
132/132 [==============================] - 0s - loss: 1.0484 - acc: 0.8561
Epoch 40/50
132/132 [==============================] - 0s - loss: 1.0165 - acc: 0.9015
Epoch 41/50
132/132 [==============================] - 0s - loss: 1.0120 - acc: 0.9015
Epoch 42/50
132/132 [==============================] - 0s - loss: 1.0052 - acc: 0.9167
Epoch 43/50
132/132 [==============================] - 0s - loss: 0.9873 - acc: 0.9242
Epoch 44/50
132/132 [==============================] - 0s - loss: 0.9850 - acc: 0.9242
Epoch 45/50
132/132 [==============================] - 0s - loss: 0.9799 - acc: 0.9318
Epoch 46/50
132/132 [==============================] - 0s - loss: 0.9702 - acc: 0.9394
Epoch 47/50
132/132 [==============================] - 0s - loss: 0.9719 - acc: 0.9394
Epoch 48/50
132/132 [==============================] - 0s - loss: 0.9672 - acc: 0.9394
Epoch 49/50
132/132 [==============================] - 0s - loss: 0.9678 - acc: 0.9394
Epoch 50/50
132/132 [==============================] - 0s - loss: 0.9733 - acc: 0.9394
###Markdown
Your model should perform close to **100% accuracy** on the training set. The exact accuracy you get may be a little different. Run the following cell to evaluate your model on the test set.
###Code
X_test_indices = sentences_to_indices(X_test, word_to_index, max_len = maxLen)
Y_test_oh = convert_to_one_hot(Y_test, C = 5)
loss, acc = model.evaluate(X_test_indices, Y_test_oh)
print()
print("Test accuracy = ", acc)
###Output
32/56 [================>.............] - ETA: 0s
Test accuracy = 0.803571428571
###Markdown
You should get a test accuracy between 80% and 95%. Run the cell below to see the mislabelled examples.
###Code
# This code allows you to see the mislabelled examples
C = 5
y_test_oh = np.eye(C)[Y_test.reshape(-1)]
X_test_indices = sentences_to_indices(X_test, word_to_index, maxLen)
pred = model.predict(X_test_indices)
for i in range(len(X_test)):
x = X_test_indices
num = np.argmax(pred[i])
if(num != Y_test[i]):
print('Expected emoji:'+ label_to_emoji(Y_test[i]) + ' prediction: '+ X_test[i] + label_to_emoji(num).strip())
###Output
Expected emoji:๐ prediction: she got me a nice present โค๏ธ
Expected emoji:๐ prediction: he is a good friend โค๏ธ
Expected emoji:๐ prediction: work is hard ๐
Expected emoji:๐ prediction: This girl is messing with me โค๏ธ
Expected emoji:๐ prediction: work is horrible ๐
Expected emoji:๐ด prediction: any suggestions for dinner ๐
Expected emoji:๐ prediction: you brighten my day โค๏ธ
Expected emoji:๐ prediction: she is a bully โค๏ธ
Expected emoji:๐ prediction: My life is so boring โค๏ธ
Expected emoji:๐ prediction: will you be my valentine ๐
Expected emoji:๐ prediction: go away โพ
###Markdown
Now you can try it on your own example. Write your own sentence below.
###Code
# Change the sentence below to see your prediction. Make sure all the words are in the Glove embeddings.
x_test = np.array(['not feeling happy'])
X_test_indices = sentences_to_indices(x_test, word_to_index, maxLen)
print(x_test[0] +' '+ label_to_emoji(np.argmax(model.predict(X_test_indices))))
###Output
not feeling happy ๐
|
08_WinningestMethods/lightgbm_m5_forecasting.ipynb
|
###Markdown
Chapter 8: Winningest Methods in Time Series ForecastingCompiled by: Sebastian C. Ibaรฑez and Michael DorosanIn previous sections, we examined several models used in time series forecasting such as ARIMA, VAR, and Exponential Smoothing methods. While the main advantage of traditional statistical methods is their ability to perform more sophisticated inference tasks directly (e.g. hypothesis testing on parameters, causality testing), they usually lack predictive power because of their rigid assumptions. That is not to say that they are necessarily inferior when it comes to forecasting, but rather they are typically used as performance benchmarks.In this section, we demonstrate several of the fundamental ideas and approaches used in the recently concluded [`M5 Competition`](https://mofc.unic.ac.cy/m5-competition/) where challengers from all over the world competed in building time series forecasting models for both [`accuracy`](https://www.kaggle.com/c/m5-forecasting-accuracy) and [`uncertainty`](https://www.kaggle.com/c/m5-forecasting-uncertainty) prediction tasks. Specifically, we explore the machine learning model that majority of the competition's winners utilized: [`LightGBM`](https://lightgbm.readthedocs.io/en/latest/index.html), a tree-based gradient boosting framework designed for speed and efficiency. 1. M5 DatasetYou can download the M5 dataset from the Kaggle links above. Let's load the dataset and examine it.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plot_x_size = 15
plot_y_size = 2
np.set_printoptions(precision = 6, suppress = True)
date_list = [d.strftime('%Y-%m-%d') for d in pd.date_range(start = '2011-01-29', end = '2016-04-24')]
df_calendar = pd.read_csv('../data/m5/calendar.csv')
df_price = pd.read_csv('../data/m5/sell_prices.csv')
df_sales = pd.read_csv('../data/m5/sales_train_validation.csv')
df_sales.rename(columns = dict(zip(df_sales.columns[6:], date_list)), inplace = True)
df_sales
df_calendar
df_price
###Output
_____no_output_____
###Markdown
Sample ProductLet's choose a random product and plot it.
###Code
df_sample = df_sales.iloc[3, :]
series_sample = df_sample.iloc[6:]
df_sample
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
series_sample.plot()
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Pick a Time SeriesLet's try and find an interesting time series to forecast.
###Code
df_sales_total_by_store = df_sales.groupby(['store_id']).sum()
df_sales_total_by_store
plt.rcParams['figure.figsize'] = [plot_x_size, 4]
df_sales_total_by_store.T.plot()
plt.show()
series = df_sales_total_by_store.iloc[0]
print(series.name)
print('Min Dates:' + str(series[series == series.min()].index.to_list()))
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
series.plot()
plt.legend()
plt.show()
###Output
CA_1
Min Dates:['2011-12-25', '2012-12-25', '2013-12-25', '2014-12-25', '2015-12-25']
###Markdown
2. Pre-processingBefore we build a forecasting model, let's check some properties of our time series. Is the series non-stationary?Let's check.
###Code
from statsmodels.tsa.stattools import adfuller
result = adfuller(series)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
###Output
ADF Statistic: -2.035408
p-value: 0.271267
Critical Values:
1%: -3.434
5%: -2.863
10%: -2.568
###Markdown
Does differencing make the series stationary?Let's check.
###Code
def difference(dataset, interval = 1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return np.array(diff)
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
series_d1 = difference(series)
result = adfuller(series_d1)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
###Output
ADF Statistic: -20.626012
p-value: 0.000000
Critical Values:
1%: -3.434
5%: -2.863
10%: -2.568
###Markdown
Is the series seasonal?Let's check.
###Code
from statsmodels.graphics.tsaplots import plot_acf
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
plot_acf(series)
plt.show()
plot_acf(series, lags = 730, use_vlines = True)
plt.show()
###Output
_____no_output_____
###Markdown
Can we remove the seasonality?Let's check.
###Code
series_d7 = difference(series, 7)
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
plot_acf(series_d7)
plt.show()
plot_acf(series_d7, lags = 730, use_vlines = True)
plt.show()
series_d7_d30 = difference(series_d7, 30)
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
plot_acf(series_d7_d30)
plt.show()
plot_acf(series_d7_d30, lags = 730, use_vlines = True)
plt.show()
result = adfuller(series_d7_d30)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
series_d7_d30 = pd.Series(series_d7_d30)
series_d7_d30.index = date_list[37:]
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
series_d7_d30.plot(label = 'Differenced Series')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
What now?At this point we have two options:- Model the seasonally differenced series, then reverse the differencing after making predictions.- Model the original series directly.While (vanilla) ARIMA requires a non-stationary and non-seasonal time series, these properties are not necessary for most non-parametric ML models. 3. One-Step PredictionLet's build a model for making one-step forecasts.To do this, we first need to transform the time series data into a supervised learning dataset.In other words, we need to create a new dataset consisting of $X$ and $Y$ variables, where $X$ refers to the features and $Y$ refers to the target. How far do we lookback?To create the new $(X,Y)$ dataset, we first need to decide what the $X$ features are. For the moment, let's ignore any exogenous variables. In this case, what determines the $X$s is how far we lookback. In general, we can treat the lookback as a hyperparameter, which we will call `window_size`.Advanced note: Technically, we could build an entire methodology for feature engineering $X$. Test SetTo test our model we will use the last 28 days of the series.
###Code
### CREATE X,Y ####
def create_xy(series, window_size, prediction_horizon, shuffle = False):
x = []
y = []
for i in range(0, len(series)):
if len(series[(i + window_size):(i + window_size + prediction_horizon)]) < prediction_horizon:
break
x.append(series[i:(i + window_size)])
y.append(series[(i + window_size):(i + window_size + prediction_horizon)])
x = np.array(x)
y = np.array(y)
return x,y
### HYPERPARAMETERS ###
window_size = 365
prediction_horizon = 1
### TRAIN VAL SPLIT ### (include shuffling later)
test_size = 28
split_time = len(series) - test_size
train_series = series[:split_time]
test_series = series[split_time - window_size:]
train_x, train_y = create_xy(train_series, window_size, prediction_horizon)
test_x, test_y = create_xy(test_series, window_size, prediction_horizon)
train_y = train_y.flatten()
test_y = test_y.flatten()
print(train_x.shape)
print(train_y.shape)
print(test_x.shape)
print(test_y.shape)
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
series[-test_size:].plot(label = 'CA_1 Test Series')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
LightGBMNow we can build a LightGBM model to forecast our time series.Gradient boosting is an ensemble method that combines multiple weak models to produce a single strong prediction model. The method involves constructing the model (called a gradient boosting machine) in a serial stage-wise manner by sequentially optimizing a differentiable loss function at each stage. Much like other boosting algorithms, the residual errors are passed to the next weak learner and trained.For this work, we use LightGBM, a gradient boosting framework designed for speed and efficiency. Specifically, the framework uses tree-based learning algorithms.To tune the model's hyperparameters, we use a combination of grid search and repeated k-fold cross validation, with some manual tuning. For more details, see the Hyperparameter Tuning notebook. Now we train the model on the full dataset and test it.
###Code
import lightgbm as lgb
params = {
'n_estimators': 2000,
'max_depth': 4,
'num_leaves': 2**4,
'learning_rate': 0.1,
'boosting_type': 'dart'
}
model = lgb.LGBMRegressor(first_metric_only = True, **params)
model.fit(train_x, train_y,
eval_metric = 'l1',
eval_set = [(test_x, test_y)],
#early_stopping_rounds = 10,
verbose = 0)
forecast = model.predict(test_x)
s1_naive = series[-29:-1].to_numpy()
s7_naive = series[-35:-7].to_numpy()
s30_naive = series[-56:-28].to_numpy()
s365_naive = series[-364:-336].to_numpy()
print(' Naive MAE: %.4f' % (np.mean(np.abs(s1_naive - test_y))))
print(' s7-Naive MAE: %.4f' % (np.mean(np.abs(s7_naive - test_y))))
print(' s30-Naive MAE: %.4f' % (np.mean(np.abs(s30_naive - test_y))))
print('s365-Naive MAE: %.4f' % (np.mean(np.abs(s365_naive - test_y))))
print(' LightGBM MAE: %.4f' % (np.mean(np.abs(forecast - test_y))))
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
series[-test_size:].plot(label = 'True')
plt.plot(forecast, label = 'Forecast')
plt.legend()
plt.show()
###Output
Naive MAE: 698.0000
s7-Naive MAE: 372.2857
s30-Naive MAE: 330.8214
s365-Naive MAE: 247.9286
LightGBM MAE: 200.5037
###Markdown
Tuning Window SizeHow does our metric change as we extend the window size?
###Code
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
params = {
'n_estimators': 2000,
'max_depth': 4,
'num_leaves': 2**4,
'learning_rate': 0.1,
'boosting_type': 'dart'
}
windows = [7, 30, 180, 365, 545, 730]
results = []
names = []
for w in windows:
window_size = w
train_x, train_y = create_xy(train_series, window_size, prediction_horizon)
train_y = train_y.flatten()
cv = RepeatedKFold(n_splits = 10, n_repeats = 3, random_state = 123)
scores = cross_val_score(lgb.LGBMRegressor(**params), train_x, train_y, scoring = 'neg_mean_absolute_error', cv = cv, n_jobs = -1)
results.append(scores)
names.append(w)
print('%3d --- MAE: %.3f (%.3f)' % (w, np.mean(scores), np.std(scores)))
plt.rcParams['figure.figsize'] = [plot_x_size, 5]
plt.boxplot(results, labels = names, showmeans = True)
plt.show()
###Output
7 --- MAE: -333.105 (21.291)
30 --- MAE: -307.008 (21.648)
180 --- MAE: -291.474 (22.537)
365 --- MAE: -275.644 (17.895)
545 --- MAE: -277.332 (20.982)
730 --- MAE: -275.664 (23.006)
###Markdown
4. Multi-Step PredictionSuppose we were interested in forecasting the next $n$-days instead of just the next day.There are several approaches we can take to solve this problem.
###Code
### HYPERPARAMETERS ###
window_size = 365
prediction_horizon = 1
### TRAIN VAL SPLIT ###
test_size = 28
split_time = len(series) - test_size
train_series = series[:split_time]
test_series = series[split_time - window_size:]
train_x, train_y = create_xy(train_series, window_size, prediction_horizon)
test_x, test_y = create_xy(test_series, window_size, prediction_horizon)
train_y = train_y.flatten()
test_y = test_y.flatten()
###Output
_____no_output_____
###Markdown
Recursive ForecastingIn recursive forecasting, we first train a one-step model then generate a multi-step forecast by recursively feeding our predictions back into the model.
###Code
params = {
'n_estimators': 2000,
'max_depth': 4,
'num_leaves': 2**4,
'learning_rate': 0.1,
'boosting_type': 'dart'
}
model = lgb.LGBMRegressor(first_metric_only = True, **params)
model.fit(train_x, train_y,
eval_metric = 'l1',
eval_set = [(test_x, test_y)],
#early_stopping_rounds = 10,
verbose = 0)
recursive_x = test_x[0, :]
forecast_ms = []
for i in range(test_x.shape[0]):
pred = model.predict(recursive_x.reshape((1, recursive_x.shape[0])))
recursive_x = np.append(recursive_x[1:], pred)
forecast_ms.append(pred)
forecast_ms_rec = np.asarray(forecast_ms).flatten()
forecast_os = model.predict(test_x)
print(' One-Step MAE: %.4f' % (np.mean(np.abs(forecast_os - test_y))))
print('Multi-Step MAE: %.4f' % (np.mean(np.abs(forecast_ms_rec - test_y))))
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
series[-test_size:].plot(label = 'True')
plt.plot(forecast_ms_rec, label = 'Forecast Multi-Step')
plt.plot(forecast_os, label = 'Forecast One-Step')
plt.legend()
plt.show()
###Output
One-Step MAE: 200.5037
Multi-Step MAE: 214.8020
###Markdown
Direct ForecastingIn direct forecasting, we train $n$ independent models and generate a multi-step forecast by concatenating the $n$ predictions.For this implementation, we need to create a new $(X,Y)$ dataset, where $Y$ is now a vector of $n$ values.
###Code
### HYPERPARAMETERS ###
window_size = 365
prediction_horizon = 28
### TRAIN VAL SPLIT ###
test_size = 28
split_time = len(series) - test_size
train_series = series[:split_time]
test_series = series[split_time - window_size:]
train_x, train_y = create_xy(train_series, window_size, prediction_horizon)
test_x, test_y = create_xy(test_series, window_size, prediction_horizon)
from sklearn.multioutput import MultiOutputRegressor
model = MultiOutputRegressor(lgb.LGBMRegressor(), n_jobs = -1)
model.fit(train_x, train_y)
forecast_ms_dir = model.predict(test_x)
print(' One-Step MAE: %.4f' % (np.mean(np.abs(forecast_os - test_y))))
print('Multi-Step MAE: %.4f' % (np.mean(np.abs(forecast_ms_dir - test_y))))
plt.rcParams['figure.figsize'] = [plot_x_size, plot_y_size]
series[-test_size:].plot(label = 'True')
plt.plot(forecast_ms_dir.T, label = 'Forecast Multi-Step')
plt.plot(forecast_os, label = 'Forecast One-Step')
plt.legend()
plt.show()
###Output
One-Step MAE: 200.5037
Multi-Step MAE: 233.6326
###Markdown
Single-Shot ForecastingIn single-shot forecasting, we create a model that attempts to predict all $n$-steps simultaneously.Unfortunately, LightGBM (tree-based methods in general) does not support multi-output models. Forecast CombinationAn easy way to improve forecast accuracy is to use several different methods on the same time series, and to average the resulting forecasts.
###Code
forecast_ms_comb = 0.5*forecast_ms_dir.flatten() + 0.5*forecast_ms_rec
print(' Recursive MAE: %.4f' % (np.mean(np.abs(forecast_ms_rec - test_y))))
print(' Direct MAE: %.4f' % (np.mean(np.abs(forecast_ms_dir - test_y))))
print('Combination MAE: %.4f' % (np.mean(np.abs(forecast_ms_comb - test_y))))
series[-test_size:].plot(label = 'True')
plt.plot(forecast_ms_comb, label = 'Forecast Combination')
plt.show()
###Output
Recursive MAE: 214.8020
Direct MAE: 233.6326
Combination MAE: 217.0313
###Markdown
5. Feature ImportanceOne advantage of GBM models is that it can generate feature importance metrics based on the quality of the splits (or information gain).
###Code
### HYPERPARAMETERS ###
window_size = 365
prediction_horizon = 1
### TRAIN VAL SPLIT ###
test_size = 28
split_time = len(series) - test_size
train_series = series[:split_time]
test_series = series[split_time - window_size:]
train_x, train_y = create_xy(train_series, window_size, prediction_horizon)
test_x, test_y = create_xy(test_series, window_size, prediction_horizon)
train_y = train_y.flatten()
test_y = test_y.flatten()
params = {
'n_estimators': 2000,
'max_depth': 4,
'num_leaves': 2**4,
'learning_rate': 0.1,
'boosting_type': 'dart'
}
model = lgb.LGBMRegressor(first_metric_only = True, **params)
feature_name_list = ['lag_' + str(i+1) for i in range(window_size)]
model.fit(train_x, train_y,
eval_metric = 'l1',
eval_set = [(test_x, test_y)],
#early_stopping_rounds = 10,
feature_name = feature_name_list,
verbose = 0)
plt.rcParams['figure.figsize'] = [5, 5]
lgb.plot_importance(model, max_num_features = 15, importance_type = 'split')
plt.show()
###Output
_____no_output_____
|
notebooks/animation_inference_playground.ipynb
|
###Markdown
SAM: Animation Inference Playground
###Code
import os
os.chdir('/content')
CODE_DIR = 'SAM'
!git clone https://github.com/yuval-alaluf/SAM.git $CODE_DIR
!wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip
!sudo unzip ninja-linux.zip -d /usr/local/bin/
!sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force
os.chdir(f'./{CODE_DIR}')
from argparse import Namespace
import os
import sys
import pprint
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
sys.path.append(".")
sys.path.append("..")
from datasets.augmentations import AgeTransformer
from utils.common import tensor2im
from models.psp import pSp
EXPERIMENT_TYPE = 'ffhq_aging'
###Output
_____no_output_____
###Markdown
Step 1: Download Pretrained ModelAs part of this repository, we provide our pretrained aging model.We'll download the model for the selected experiments as save it to the folder `../pretrained_models`.
###Code
def get_download_model_command(file_id, file_name):
""" Get wget download command for downloading the desired model and save to directory ../pretrained_models. """
current_directory = os.getcwd()
save_path = os.path.join(os.path.dirname(current_directory), "pretrained_models")
if not os.path.exists(save_path):
os.makedirs(save_path)
url = r"""wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={FILE_ID}' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id={FILE_ID}" -O {SAVE_PATH}/{FILE_NAME} && rm -rf /tmp/cookies.txt""".format(FILE_ID=file_id, FILE_NAME=file_name, SAVE_PATH=save_path)
return url
MODEL_PATHS = {
"ffhq_aging": {"id": "1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC", "name": "sam_ffhq_aging.pt"}
}
path = MODEL_PATHS[EXPERIMENT_TYPE]
download_command = get_download_model_command(file_id=path["id"], file_name=path["name"])
!wget {download_command}
###Output
_____no_output_____
###Markdown
Step 3: Define Inference Parameters Below we have a dictionary defining parameters such as the path to the pretrained model to use and the path to theimage to perform inference on.While we provide default values to run this script, feel free to change as needed.
###Code
EXPERIMENT_DATA_ARGS = {
"ffhq_aging": {
"model_path": "../pretrained_models/sam_ffhq_aging.pt",
"transform": transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
}
}
EXPERIMENT_ARGS = EXPERIMENT_DATA_ARGS[EXPERIMENT_TYPE]
###Output
_____no_output_____
###Markdown
Step 4: Load Pretrained ModelWe assume that you have downloaded the pretrained aging model and placed it in the path defined above.
###Code
model_path = EXPERIMENT_ARGS['model_path']
ckpt = torch.load(model_path, map_location='cpu')
opts = ckpt['opts']
pprint.pprint(opts)
# update the training options
opts['checkpoint_path'] = model_path
opts = Namespace(**opts)
net = pSp(opts)
net.eval()
net.cuda()
print('Model successfully loaded!')
###Output
_____no_output_____
###Markdown
Utils for Generating MP4
###Code
import imageio
from tqdm import tqdm
import matplotlib
from IPython.display import HTML
from base64 import b64encode
matplotlib.use('module://ipykernel.pylab.backend_inline')
%matplotlib inline
def generate_mp4(out_name, images, kwargs):
writer = imageio.get_writer(out_name + '.mp4', **kwargs)
for image in images:
writer.append_data(image)
writer.close()
def run_on_batch_to_vecs(inputs, net):
_, result_batch = net(inputs.to("cuda").float(), return_latents=True, randomize_noise=False, resize=False)
return result_batch.cpu()
def get_result_from_vecs(vectors_a, vectors_b, alpha):
results = []
for i in range(len(vectors_a)):
cur_vec = vectors_b[i] * alpha + vectors_a[i] * (1 - alpha)
res = net(cur_vec.cuda(), randomize_noise=False, input_code=True, input_is_full=True, resize=False)
results.append(res[0])
return results
def show_mp4(filename, width=400):
mp4 = open(filename + '.mp4', 'rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
display(HTML("""
<video width="%d" controls autoplay loop>
<source src="%s" type="video/mp4">
</video>
""" % (width, data_url)))
SEED = 42
np.random.seed(SEED)
img_transforms = EXPERIMENT_ARGS['transform']
n_transition = 25
kwargs = {'fps': 40}
save_path = "notebooks/animations"
os.makedirs(save_path, exist_ok=True)
#################################################################
# TODO: define your image paths here to be fed into the model
#################################################################
root_dir = 'notebooks/images'
ims = ['866', '1287', '2468']
im_paths = [os.path.join(root_dir, im) + '.jpg' for im in ims]
# NOTE: Please make sure the images are pre-aligned!
target_ages = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10, 0]
age_transformers = [AgeTransformer(target_age=age) for age in target_ages]
for image_path in im_paths:
image_name = os.path.basename(image_path)
print(f'Working on image: {image_name}')
original_image = Image.open(image_path).convert("RGB")
input_image = img_transforms(original_image)
all_vecs = []
for idx, age_transformer in enumerate(age_transformers):
input_age_batch = [age_transformer(input_image.cpu()).to('cuda')]
input_age_batch = torch.stack(input_age_batch)
# get latent vector for the current target age amount
with torch.no_grad():
result_vec = run_on_batch_to_vecs(input_age_batch, net)
result_image = get_result_from_vecs([result_vec], result_vec, 0)[0]
all_vecs.append([result_vec])
images = []
for i in range(1, len(target_ages)):
alpha_vals = np.linspace(0, 1, n_transition).tolist()
for alpha in tqdm(alpha_vals):
result_image = get_result_from_vecs(all_vecs[i-1], all_vecs[i], alpha)[0]
output_im = tensor2im(result_image)
images.append(np.array(output_im))
animation_path = os.path.join(save_path, f"{image_name}_animation")
generate_mp4(animation_path, images, kwargs)
show_mp4(animation_path)
###Output
_____no_output_____
###Markdown
SAM: Animation Inference Playground
###Code
import os
os.chdir('/content')
CODE_DIR = 'SAM'
!git clone https://github.com/yuval-alaluf/SAM.git $CODE_DIR
!wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip
!sudo unzip ninja-linux.zip -d /usr/local/bin/
!sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force
os.chdir(f'./{CODE_DIR}')
from argparse import Namespace
import os
import sys
import pprint
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
sys.path.append(".")
sys.path.append("..")
from datasets.augmentations import AgeTransformer
from utils.common import tensor2im
from models.psp import pSp
EXPERIMENT_TYPE = 'ffhq_aging'
###Output
_____no_output_____
###Markdown
Step 1: Download Pretrained ModelAs part of this repository, we provide our pretrained aging model.We'll download the model for the selected experiments as save it to the folder `../pretrained_models`.
###Code
def get_download_model_command(file_id, file_name):
""" Get wget download command for downloading the desired model and save to directory ../pretrained_models. """
current_directory = os.getcwd()
save_path = os.path.join(os.path.dirname(current_directory), "pretrained_models")
if not os.path.exists(save_path):
os.makedirs(save_path)
url = r"""wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={FILE_ID}' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id={FILE_ID}" -O {SAVE_PATH}/{FILE_NAME} && rm -rf /tmp/cookies.txt""".format(FILE_ID=file_id, FILE_NAME=file_name, SAVE_PATH=save_path)
return url
MODEL_PATHS = {
"ffhq_aging": {"id": "1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC", "name": "sam_ffhq_aging.pt"}
}
path = MODEL_PATHS[EXPERIMENT_TYPE]
download_command = get_download_model_command(file_id=path["id"], file_name=path["name"])
!wget {download_command}
###Output
_____no_output_____
###Markdown
Step 3: Define Inference Parameters Below we have a dictionary defining parameters such as the path to the pretrained model to use and the path to theimage to perform inference on.While we provide default values to run this script, feel free to change as needed.
###Code
EXPERIMENT_DATA_ARGS = {
"ffhq_aging": {
"model_path": "../pretrained_models/sam_ffhq_aging.pt",
"transform": transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
}
}
EXPERIMENT_ARGS = EXPERIMENT_DATA_ARGS[EXPERIMENT_TYPE]
###Output
_____no_output_____
###Markdown
Step 4: Load Pretrained ModelWe assume that you have downloaded the pretrained aging model and placed it in the path defined above.
###Code
model_path = EXPERIMENT_ARGS['model_path']
ckpt = torch.load(model_path, map_location='cpu')
opts = ckpt['opts']
pprint.pprint(opts)
# update the training options
opts['checkpoint_path'] = model_path
opts = Namespace(**opts)
net = pSp(opts)
net.eval()
net.cuda()
print('Model successfully loaded!')
###Output
_____no_output_____
###Markdown
Utils for Generating MP4
###Code
import imageio
from tqdm import tqdm
import matplotlib
matplotlib.use('module://ipykernel.pylab.backend_inline')
%matplotlib inline
def generate_mp4(out_name, images, kwargs):
writer = imageio.get_writer(out_name + '.mp4', **kwargs)
for image in images:
writer.append_data(image)
writer.close()
def run_on_batch_to_vecs(inputs, net):
_, result_batch = net(inputs.to("cuda").float(), return_latents=True, randomize_noise=False, resize=False)
return result_batch.cpu()
def get_result_from_vecs(vectors_a, vectors_b, alpha):
results = []
for i in range(len(vectors_a)):
cur_vec = vectors_b[i] * alpha + vectors_a[i] * (1 - alpha)
res = net(cur_vec.cuda(), randomize_noise=False, input_code=True, input_is_full=True, resize=False)
results.append(res[0])
return results
SEED = 42
np.random.seed(SEED)
img_transforms = EXPERIMENT_ARGS['transform']
n_transition = 25
kwargs = {'fps': 40}
save_path = "notebooks/animations"
os.makedirs(save_path, exist_ok=True)
#################################################################
# TODO: define your image paths here to be fed into the model
#################################################################
root_dir = 'notebooks/images'
ims = ['866', '1287', '2468']
im_paths = [os.path.join(root_dir, im) + '.jpg' for im in ims]
# NOTE: Please make sure the images are pre-aligned!
target_ages = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10, 0]
age_transformers = [AgeTransformer(target_age=age) for age in target_ages]
for image_path in im_paths:
image_name = os.path.basename(image_path)
print(f'Working on image: {image_name}')
original_image = Image.open(image_path).convert("RGB")
input_image = img_transforms(original_image)
all_vecs = []
for idx, age_transformer in enumerate(age_transformers):
input_age_batch = [age_transformer(input_image.cpu()).to('cuda')]
input_age_batch = torch.stack(input_age_batch)
# get latent vector for the current target age amount
with torch.no_grad():
result_vec = run_on_batch_to_vecs(input_age_batch, net)
result_image = get_result_from_vecs([result_vec],result_vec,0)[0]
all_vecs.append([result_vec])
images = []
for i in range(1, len(target_ages)):
alpha_vals = np.linspace(0, 1, n_transition).tolist()
for alpha in tqdm(alpha_vals):
result_image = get_result_from_vecs(all_vecs[i-1], all_vecs[i], alpha)[0]
output_im = tensor2im(result_image)
images.append(np.array(output_im))
animation_path = os.path.join(save_path, f"{image_name}_animation")
generate_mp4(animation_path, images, kwargs)
###Output
_____no_output_____
|
CustomAcq.ipynb
|
###Markdown
BO
###Code
def PI(mu,sigma,**kwargs):
import scipy as scp
vmax = kwargs.get('vmax')
vmix = kwargs.get('vmin')
it = kwargs.get('it')
xi = 1.*np.sqrt(np.log(it+1.)/(it+1.))
Z = (mu-vmax-xi)/sigma
return scp.stats.norm.cdf(Z)
# %%timeit
BO = bayesopt.BayesOpt(f=f,
initial_input=np.array([0.]),
kernel=bayesopt.kernel.MaternKernel(),
acq=PI,
acq_optim=bayesopt.acquisition_optimizer.Acquisition_Grid_Optimizer(bounds=[0,18],step=0.1),
maximize=True,
)
BO.run_optim(20)
X = np.arange(0,18,0.2)
plt.plot(BO.param_history,BO.value_history,'o--')
plt.plot(BO.best_params,BO.best_value,'x')
plt.plot(X,f(X))
bayesopt.plot_history(BO)
def view_acqfunc(i=1):
X_obs,Y_obs = BO.param_history[:i+1],BO.value_history[:i+1]
gpr = GP.GPR(X_obs,Y_obs,alpha=BO.alpha,kernel=BO.kernel)
X = np.linspace(0,18,100)
Y_mu,Y_std = gpr.posterior_predictive(X,return_std=True)
acq_val = BO.acq(Y_mu,Y_std,vmax=max(BO.value_history[:i+1]),vmin=0,it=i)
# 95% confidence interval
uncertainty = 1.96 * Y_std
plt.figure(figsize=[8,8])
plt.subplot(211)
plt.plot(X,Y_mu)
plt.fill_between(X.ravel(), (Y_mu + uncertainty).ravel(), (Y_mu - uncertainty).ravel(), alpha=0.1)
plt.plot(X_obs,Y_obs,'x')
plt.subplot(212)
plt.plot(X,acq_val)
for i in range(20):
view_acqfunc(i)
###Output
_____no_output_____
###Markdown
use original kernel and acq function
###Code
dists = bayesopt.utils.pairwise(bayesopt.metric.euclid_distance,square=True)
def kernel(x,y):
return np.exp(-dists(x,y))
print(kernel(np.array([[2,3],[1,2]]),np.array([[2,3],[1,2],[1,1]])))
def acq(mu,sigma,*args,**kwargs):
'''
mu, sigma, it=it, vmax=vmax, vmin=vminใๅ
ฅๅใใใใใacq optimใงๆฑบใใฆใใ.
'''
it = kwargs.get('it',5.)
return -mu+sigma*5*np.sqrt(np.log(it+1)/(it+1))
BO = bayesopt.BayesOpt(f=f,
initial_input=np.array([0.]),
kernel=bayesopt.kernel.MaternKernel(),
acq=acq,
acq_optim=bayesopt.acquisition_optimizer.Acquisition_L_BFGS_B_Optimizer(bounds=[0,15]))
BO.run_optim(20)
X = np.arange(0,18,0.2)
plt.plot(BO.param_history,BO.value_history,'o--')
plt.plot(BO.best_params,BO.best_value,'x')
plt.plot(X,f(X))
bayesopt.plot_history(BO)
###Output
_____no_output_____
###Markdown
memo acuisition optimizer
###Code
def AcquisitionSLSQPOptimizer(gpr, acq, it, bounds, n_trial=5):
## gprใจacqใจitใฏๅใๅใใใใใซใใชใใจใใใชใ.
##boundsใฏfunctoolใฎpartialใงๆๅฎใใใใๅ
้จๅคๆฐใซๆใใใใ
bounds = np.atleast_2d(bounds)
vmax = np.max(gpr.Y_train)
vmin = np.min(gpr.Y_train)
ndim = len(bounds)
loc = None
value = None
import scipy.optimize
def Obj(x):
mu,sigma = gpr.posterior_predictive(np.atleast_2d(x),return_std=True)
return -1.*acq(mu,sigma, it=it, vmax=vmax, vmin=vmin).ravel()
x_seeds = onp.random.uniform(bounds[:,0],bounds[:,1], size=(n_trial,ndim))
for xtry in x_seeds:
res = scipy.optimize.fmin_slsqp(Obj,
x0=xtry,
bounds=bounds,
iprint=0,
full_output=True,
iter=100)
if (loc is None) or (res[1] < value):
loc = res[0]
value = res[1]
return loc, value
###Output
_____no_output_____
###Markdown
memo terminate functionใ่จญๅฎใงใใใใใซใใ ๅ
ฅๅใฏit,param_history,value_historyใฎ้ ็ชใงๅ
ฅใ
###Code
def terminate_function(it, param_history, value_history):
if value_history.min()<1e-1:
return True
else:
return False
###Output
_____no_output_____
|
_infty/2018/01/jp/02.ipynb
|
###Markdown
02. Numbers, Strings, Booleans and Sets [Inference Theory 1](https://lamastex.github.io/scalable-data-science/infty/2018/01/)©2018 Raazesh Sainudiin. [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) Numbers and Arithmetic OperationsWe will start by showing you some of the basic numeric capabilities of SageMath.A worksheet cell is the area enclosed by a gray rectangle. You may type any expression you want to evaluate into a worksheet cell. We have already put some expressions into this worksheet.When you are in a cell you can evaluate the expression in it by pressing or just by clicking the evaluate button below the cell. To start with, we are going to be using SAGE like a hand-held calculator. Let's perform the basic arithmetic operations of addition, subtraction, multiplication, division, exponentiation, and remainder over the three standard number systems: Integers denoted by $\mathbb{Z}$, Rational Numbers denoted by $\mathbb{Q}$ and Real Numbers denoted by $\mathbb{R}$. Let us recall the real number line and the basics of number systems next.
###Code
def showURL(url, ht=500):
"""Return an IFrame of the url to show in notebook with height ht"""
from IPython.display import IFrame
return IFrame(url, width='95%', height=ht)
showURL('https://en.wikipedia.org/wiki/Number',400)
###Output
_____no_output_____
###Markdown
The most basic numbers are called natural numbers and they are denoted by $\mathbb{N} :=\{0, 1,2,3,\ldots\}$. See [https://en.wikipedia.org/wiki/Natural_number](https://en.wikipedia.org/wiki/Natural_number).> The natural numbers are the basis from which many other number sets may be built by extension: the integers, by including (if not yet in) the neutral element 0 and an additive inverse (โn) for each nonzero natural number n; the rational numbers, by including a multiplicative inverse (1/n) for each nonzero integer n (and also the product of these inverses by integers); the real numbers by including with the rationals the limits of (converging) Cauchy sequences of rationals; the complex numbers, by including with the real numbers the unresolved square root of minus one (and also the sums and products thereof); and so on. These chains of extensions make the natural numbers canonically embedded (identified) in the other number systems.
###Code
showURL("https://en.wikipedia.org/wiki/Natural_number#Notation",300)
###Output
_____no_output_____
###Markdown
Let us get our fingers dirty with some numerical operations in SageMath. Note that anything after a '' symbol is a comment - comments are ignored by SAGE but help programmers to know what's going on. Example 1: Integer ArithmeticTry evaluating the cell containing 1+2 below by placing the cursor in the cell and pressing .
###Code
1+2 # one is being added to 2
###Output
_____no_output_____
###Markdown
Now, modify the above expression and evaluate it again. Try 3+4, for instance.
###Code
3-4 # subtracting 4 from 3
###Output
_____no_output_____
###Markdown
The multiplication operator is `*`, the division operator is `/`.
###Code
2*6 # multiplying 2 by 6
15/5 # dividing 15 by 5
type(1)
###Output
_____no_output_____
###Markdown
The exponentiation operator is `^`.
###Code
2^3 # exponentiating 2 by 3, i.e., raising 2 to the third power
###Output
_____no_output_____
###Markdown
However, Python's exponentiation operator `**` also works.
###Code
2**3
###Output
_____no_output_____
###Markdown
Being able to finding the remainder after a division is surprisingly useful in computer programming.
###Code
11%3 # remainder after 11 is divided by 3; i.e., 11=3*3+2
###Output
_____no_output_____
###Markdown
Another way of referring to this is 11 modulus 3, which evaluates to 2. Here `%` is the modulus operator. You tryTry typing in and evaluating some expressions of your own. You can get new cells above or below an existing cell by clicking 'Insert' in the menu above and 'Insert Cell Above' or 'Insert Cell below'. You can also place the cursor at an existing cell and click `+` icon above to get a new cell below. What happens if you put space between the characters in your expression, like:`1 + 2` instead of `1+2`?. Example 2: Operator Precedence for Evaluating Arithmetic ExpressionsSometimes we want to perform more than one arithmetic operation with some given integers. Suppose, we want to - "divide 12 by 4 then add the product of 2 and 3 and finally subtract 1." Perhaps this can be achieved by evaluating the expression "12/4+2*3-1"?But could that also be interpreted as - "divide 12 by the sum of 4 and 2 and multiply the result by the difference of 3 and 1"?In programming, there are rules for the order in which arithmetic operations are carried out. This is called the order of precedence.The basic arithmetic operations are: +, -, *, %, /, ^. The order in which operations are evaluated are as follows:- ^ Exponents are evaluated right to left- *, %, / Then multiplication, remainder and division operations are evaluated left to right- +, - Finally, addition and subtraction are evaluated left to rightWhen operators are at the same level in the list above, what matters is the evaluation order (right to left, or left to right). Operator precedence can be forced using parenthesis.
###Code
showURL("https://en.wikipedia.org/wiki/Order_of_operations", 300)
(12/4) + (2*3) - 1 # divide 12 by 4 then add the product of 2 and 3 and finally subtract 1
12/4+2*3-1 # due to operator precedence this expression evaluates identically to the parenthesized expression above
###Output
_____no_output_____
###Markdown
Operator precedence can be forced using nested parentheses. When our expression has nested parenthesis, i.e., one pair of parentheses inside another pair, the expression inside the inner-most pair of parentheses is evaluated first.
###Code
(12/(4+2)) * (3-1) # divide 12 by the sum of 4 and 2 and multiply the result by the difference of 3 and 1
###Output
_____no_output_____
###Markdown
You tryTry writing an expression which will subtract 3 from 5 and then raise the result to the power of 3. Find out for yourself what we mean by the precedence for exponentiation (^) being from right to left: - What do you think the expression `3^3^2` would evaluate to? - Is it the same as `(3^3)^2`, i.e., `27` squared, or - `3^(3^2)`, i.e., `3` raised to the power `9`? Try typing in the different expressions to find out: Find an expression which will add the squares of four numbers together and then divide that sum of squares by 4. Find what the precedence is for the modulus operator `%` that we discussed above: try looking at the difference between the results for `10%2^2` and `10%2*2` (or `10^2+2`). Can you see how SageMath is interpreting your expressions? Note that when you have two operators at the same precedence level (like `%` and `*`), then what matters is the order - left to right or right to left. You will see this when you evaluate `10%2*2`. Does putting spaces in your expression make any difference? Using parenthesis or white spaces can improve readability a lot! So be generous with them.
###Code
10^2+2^8-4
10^2 + 2^8 -4
(((10^2) + (2^8)) - 4)
###Output
_____no_output_____
###Markdown
The lesson to learn is that it is always good to use the parentheses: you will make it clear to someone reading your code what you mean to happen as well as making sure that the computer actually does what you mean it to!Try this 10 minutes-long videos to get some practice if you are really rusty with order of operations:* [Khan Academy Order of operations - https://www.youtube.com/watch?v=ClYdw4d4OmA](https://www.youtube.com/watch?v=ClYdw4d4OmA) Example 3: Rational ArithmeticSo far we have been dealing with integers. Integers are a type in SAGE. Algebraically speaking, integers, rational numbers and real numbers form a *ring*. This is something you will learn in detail in a maths course in Group Theory or Abstract Algebra, but let's take a quick peek at the definition of a ring.
###Code
showURL("https://en.wikipedia.org/wiki/Ring_(mathematics)#Definition_and_illustration",400)
type(1) # find the data type of 1
###Output
_____no_output_____
###Markdown
The output above tells us that `1` is of type `sage.rings.integer.Integer`.
###Code
showURL("https://en.wikipedia.org/wiki/Integer",400)
###Output
_____no_output_____
###Markdown
However, life with only integers denoted by $\mathbb{Z} := \{\ldots,-3,-2,-1,0,1,2,3,\ldots\}$ is a bit limited. What about values like $1/2$ or $\frac{1}{2}$?This brings us to the rational numbers denoted by $\mathbb{Q}$.
###Code
showURL("https://en.wikipedia.org/wiki/Rational_number",400)
type(1/2) # data type of 1/2 is a sage.rings.rational.Rational
###Output
_____no_output_____
###Markdown
Try evaluating the cell containing `1/2 + 2` below.
###Code
1/2 + 2 # add one half to 2 or four halves to obtain the rational number 5/2 or five halves
###Output
_____no_output_____
###Markdown
SageMath seems to have done rational arithmetic for us when evaluating the above expression. Next, modify the expression in the cell below and evaluate it again. Try `1/3+2/4`, for instance.
###Code
1/2 + 1/3
###Output
_____no_output_____
###Markdown
You can do arithmetic with rationals just as we did with integers.
###Code
3/4 - 1/4 # subtracting 3/4 from 1/4
1/2 * 1/2 # multiplying 1/2 by 1/2
(2/5) / (1/5) # dividing 2/5 by 1/5
(1/2)^3 # exponentiating 1/2 by 3, i.e., raising 1/2 to the third power
###Output
_____no_output_____
###Markdown
You tryWrite an expression which evaluates to `1` using the rationals `1/3` and `1/12`, some integers, and some of the arithmetical operators - there are lots of different expressions you can choose, just try a few. What does SageMath do with something like `1/1/5`? Can you see how this is being interpreted? What should we do if we really want to evaluate `1` divided by `1/5`? Try adding some rationals and some integers together - what type is the result? Example 4: Real Arithmetic (multi-precision floating-point arithmetic)Recall that real numbers denoted by $\mathbb{R}$ include natural numbers ($\mathbb{N}$), integers ($\mathbb{Z}$), rational numbers ($\mathbb{Q}$) and various types of irrational numbers like:- the square root of 2 or $\sqrt{2}$- [Pi](https://en.wikipedia.org/wiki/Pi) or $\pi$ and - Euler's number $e$ and - [EulerโMascheroni constant](https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant) $\gamma$. Real numbers can be thought of as all the numbers in the real line between negative infinity and positive infinity. Real numbers are represented in decimal format, for e.g. 234.4677878.
###Code
showURL("https://en.wikipedia.org/wiki/Real_number#Definition",400)
###Output
_____no_output_____
###Markdown
We can do arithmetic with real numbers, actually with [http://www.mpfr.org/](http://www.mpfr.org/)'s multiprecision [floating-point numbers](http://en.wikipedia.org/wiki/Floating_point), and can combine them with integer and rational types in SageMath. *Technical note:* Computers can be made to exactly compute in integer and rational arithmetic. But, because computers with finite memory (all computers today!) cannot represent the [uncountably infinitely many real numbers](http://en.wikipedia.org/wiki/Cantor%27s_diagonal_argument), they can only mimic or approximate arithmetic over real numbers using finitely many computer-representable floating-point numbers.See [SageMath Quick Start on Numerical Analysis](http://doc.sagemath.org/html/en/prep/Quickstarts/NumAnalysis.html) to understand SageMath's multiprecision real arithmetic.For now, let's compare the results of evaluating the expressions below to the equivalent expressions using rational numbers above.
###Code
type(0.5) # data type of 0.2 is a sage.rings.real_mpfr.RealLiteral
RR # Real Field with the default 53 bits of precision
RR(0.5) # RR(0.5) is the same as 0.5 in SageMath
0.5 + 2 # one half as 0.5 is being added to 2 to obtain the real number 2.500..0 in SageMath
0.75 - 0.25 # subtracting 0.75 from 0.25 is the same as subtracting 0.75 from 1/4
0.5 * 0.5 # multiplying 0.5 by 0.5 is the same as 1/2 * 1/2
(2 / 5.0) / 0.2 # dividing 2/5. by 0.2 is the same as (2/5) / (1/5)
0.5^3.0 # exponentiating 0.5 by 3.0 is the same as (1/2)^3
###Output
_____no_output_____
###Markdown
You tryFind the type of `1/2`. Try a few different ways of getting the same result as typing `((((1/5) / (1/10)) * (0.1 * 2/5) + 4/100))*5/(3/5)` - this exact expression has already been put in for you in the cell below you could try something just using floating point numbers. Then see how important the parentheses are around rationals when you have an expression like this - try taking some of the parenthesis out and just play with complex expressions like these to get familiar.
###Code
((((1/5) / (1/10)) * (0.1 * 2/5) + 4/100))*5/(3/5)
((((1/5) / (1/10)) * (1/10 * 2/5) + 4/100))*5/(3/5)
###Output
_____no_output_____
###Markdown
Example 5: Variables and assignments of numbers and expressionsLoosely speaking one can think of a *variable* as a way of referring to a memory location used by a computer program. A variable is a symbolic name for this physical location. This memory location contains values, like numbers, text or more complicated types and crucially *what is contained in a variable can change* based on operations we do to it.In SageMath, the symbol `=` is the *assignment operator*. You can assign a numerical value to a *variable* in SageMath using the assignment operator. This is a good way to store values you want to use or modify later. (If you have programmed before using a a language like C or C++ or Java, you'll see that SageMath is a bit different because in SageMath you don't have to say what type of value is going to be assigned to the variable.)Feel free to take a deeper dive into the computer science concept of assignment.
###Code
a = 1 # assign 1 to a variable named a
a # disclose a - you need to explicitly do this!
###Output
_____no_output_____
###Markdown
Just typing the name of a variable to get the value works in the SageMath Notebook, but if you are writing a program and you want to output the value of a variable, you'll probably want to use something like the print command.
###Code
print(a)
b = 2
c = 3
print a, b, c # print out the values of a and b and c
x=2^(1/2)
x
type(x) # x is a sage symbolic expression
###Output
_____no_output_____
###Markdown
Many of the commands in SageMath/Python are "methods" of objects.That is, we access them by typing:- the name of the mathematical object,- a dot/period,- the name of the method, and- parentheses (possibly with an argument).This is a huge advantage, once you get familiar with it, because it allows you to do only the things that are possible, and all such things. See [SageMath programming guide for more details on this](http://doc.sagemath.org/html/en/prep/Programming.htmlmethods-and-dot-notation).Let's try to hit the Tab button after the `.` following `x` below to view all available methods for `x` which is currently `sqrt(2)`.
###Code
x. # hit the Tab button after the '.' following 'x'
help(x.n)
# we can use ? after a method to get breif help
x.n(digits=10) # this gives a numerical approximation for x
s = 1; t = 2; u = 3;
print s + t + u
f=(5-3)^(6/2)+3*(7-2) # assign the expression to f
f # disclose f
type(f)
###Output
_____no_output_____
###Markdown
You tryTry assigning some values to some variables - you choose what values and you choose what variable names to use. See if you can print out the values you have assigned. You can reassign different values to variable names. Using SageMath you can also change the type of the values assigned to the variable (not all programming languages allow you to do this).
###Code
a = 1
print "Right now, a =", a, "and is of type", type(a) # using , and strings in double quotes print can be more flexible
a = 1/3 # reassign 1/3 to the variable a
print "Now, a =", a, "and is of type", type(a) # note the change in type
###Output
Right now, a = 1 and is of type <type 'sage.rings.integer.Integer'>
Now, a = 1/3 and is of type <type 'sage.rings.rational.Rational'>
###Markdown
You tryAssign the value `2` to a variable named `x`.On the next line down in the same cell, assign the value `3` to a variable named `y`.Then (on a third line) put in an expression which will evaluate `x + y` Now try reassigning a different value to x and re-evaluating x + y Example 6: StringsVariables can be strings (an not just numbers). Anything you put inside quote marks will be treated as a string by SageMath/Python.Strings as `str` and `unicode` are built-in [sequence types](https://docs.python.org/2/library/stdtypes.htmlsequence-types-str-unicode-list-tuple-bytearray-buffer-xrange) for storing strings of bytes and unicode-encoded characters and and operating over them.
###Code
myStr = "this is a string" # assign a string to the variable myStr
myStr # disclose myStr
type(myStr) # check the type for myStr
###Output
_____no_output_____
###Markdown
You can also create a string by enclosing them in single quotes or three consecutive single quotes. In SageMath/Python a character (represented by the `char` type in languages like C/C++/Scala) is just a string made up of one character.
###Code
myStr = 'this is a string' # assign a string to the variable myStr using single quotes
myStr # disclose myStr
###Output
_____no_output_____
###Markdown
You can assign values to more than one variable on the same line, by separating the assignment expressions with a semicolon `;`. However, it is usually best not to do this because it will make your code easier to read (it is hard to spot the other assignments on a single line after the first one).
###Code
myStr = '''this is a string''' # assign a string to the variable myStr using three consecutive single quotes
myStr # disclose myStr
###Output
_____no_output_____
###Markdown
Using triple single quotes is especially useful if your string has single or double quotes within it. Triple quotes are often used to create `DocString` to document code in Pyhton/SageMath.
###Code
myStrContainingQuotes = '''this string has "a double quoted sub-string" and some escaped characters: \,', - all OK!'''
myStrContainingQuotes
###Output
_____no_output_____
###Markdown
Str and unicode StringsIn Python/SageMath, we need to be extremely careful with strings.The type 'str' is actually a sequence of bytes while the unicode string of type `unicode` is a sequence of unicode characters (some of which can be more than a byte in size). See [this](http://pgbovine.net/unicode-python.htm) for an nice clarification of ASCII and unicode (utf-8) encoded strings. So, it is a good habit to convert strings from natural languages that are meant for processing into unicode strings using the `decode(utf-8)` method right away.
###Code
x = 'hi็ซ' # this is hi (each letter is encoded by one byte) followed by the Chinese character for cat (3 bytes)
type(x) # x is of type str = sequence of bytes in Python2 / SageMath
len(x) # this is a sequence of five hexadecimal numbers each requiring a byte to represent
###Output
_____no_output_____
###Markdown
Disclosing `x` below only shows the hexa-decimal numbers `68` `69` `e7` `8c` `ab`, but only `h` for `68` and `i` for `69` from [ASCII table](http://www.asciitable.com/), are displayed as characters here,while `\xe7\x8c\xab` are shown as hexadecimal numbers with prefix `\x` instead of the Chinese character for cat: ็ซ
###Code
x
print(x) # printing a string displays the desired if the display is unicode-compatible
###Output
hi็ซ
###Markdown
Generally it is safe to convert strings from natural languages to unicode in Python/SageMath.
###Code
y = x.decode('utf-8') # this decodes or converts the sequence of bytes to a sequence of unicode characters
type(y) # the type of y now is unicode
len(y) # now we have a sequence of just 3 unicode characters as we want
###Output
_____no_output_____
###Markdown
Disclosing `y` shows the two ASCII character `h` and `i` and the Chinese cat character ็ซ is specified by the corresponding entry in [utf-8 table](https://en.wikipedia.org/wiki/UTF-8).
###Code
y # output prepended by u shows it is a unicode sequence as opposed to a str which is a byte sequence
print y
###Output
hi็ซ
###Markdown
When programmatically processing sequences of unicode characters it is much safer to work with `repr` for the canonical string representation of the object.
###Code
?repr # gives the canonical string representation of the object
print repr(y)
print repr(y).decode('unicode_escape')
###Output
u'hi็ซ'
###Markdown
Pride and Prejudice as unicodeWe will explore frequencies of strings for the most downloaded book at [Project Gutenberg](http://www.gutenberg.org/ebooks/search/?sort_order=downloads) that publishes public domain books online.Currently, books published before 1923 are in the *public domain* - meaning anyone has the right to copy or use the text in any way. Pride and Prejudice by Jane Austin had the most number of downloads and it's available from - [http://www.gutenberg.org/ebooks/1342](http://www.gutenberg.org/ebooks/1342).A quick exploration allows us to see the utf-encoded text [here](http://www.gutenberg.org/files/1342/1342-0.txt).For now, we will just show how to download the most popular book from the project and display it's contents for processing down the road.
###Code
# this downloads the unicode text of the book from the right url we found at the Gutenberg Project
# and assigns it to a variable named prideAndPrejudiceRaw
from urllib import *
prideAndPrejudiceRaw = urlopen('http://www.gutenberg.org/files/1342/1342-0.txt').read().decode('utf-8')
prideAndPrejudiceRaw[0:1000] # just showing the first 1000 raw characters of the downloaded book as unicode
type(prideAndPrejudiceRaw) # this is a sequence of utf-8-encoded characters
len(prideAndPrejudiceRaw) # the length of the unicode string is about 700 thousand unicode characters
###Output
_____no_output_____
###Markdown
Next we will show how trivial it is to "read" all the chapters into SageMath/Python using these steps:- we use regular expressions via the `re` library to substitue all occurences of white-space characters like one or more consecutive end-of-line, tabs, white space characters, etc with a single white space, - we split by 'Chapter ' into multiple chapters in a list- print the first 100 character in each of the first 10 Chapters(don't worry about the details now - we will revist these in detail later)
###Code
myString = "strBlah"
myString
myString.split?
import re
# make a list of chapters
chapterList = re.sub('\\s+', ' ',prideAndPrejudiceRaw).split('Chapter ')[1:10]
for chapter in chapterList:
print repr(chapter[0:100]).decode('unicode_escape'), '\n';
###Output
u'1 It is a truth universally acknowledged, that a single man in possession of a good fortune, must be'
u'2 Mr. Bennet was among the earliest of those who waited on Mr. Bingley. He had always intended to vi'
u'3 Not all that Mrs. Bennet, however, with the assistance of her five daughters, could ask on the sub'
u'4 When Jane and Elizabeth were alone, the former, who had been cautious in her praise of Mr. Bingley'
u'5 Within a short walk of Longbourn lived a family with whom the Bennets were particularly intimate. '
u'6 The ladies of Longbourn soon waited on those of Netherfield. The visit was soon returned in due fo'
u"7 Mr. Bennet's property consisted almost entirely in an estate of two thousand a year, which, unfort"
u"8 At five o'clock the two ladies retired to dress, and at half-past six Elizabeth was summoned to di"
u"9 Elizabeth passed the chief of the night in her sister's room, and in the morning had the pleasure "
###Markdown
As we learn more we will return to this popular book's unicode. Assignment Gotcha!Let's examine the three assignments in the cell below.The first assignment of `x=3` is standard: Python/SageMath chooses a memory location for `x` and saves the integer value `3` in it. The second assignment of `y=x` is more interesting and *Pythonic*: Instead of finding another location for the variable `y` and copying the value of `3` in it, Python/SageMath differs from the ways of C/C++. Since both variables will have the same value after the assignment, Python/SageMath lets `y` point to the memory location of `x`.Finally, after the third assignment of `y=2`, `x` will be NOT be changed to `2` as because the behavior is not that of a C-pointer. Since `x` and `y` will not share the same value anymore, `y` gets its own memory location, containing `2` and `x` sticks to the originally assigned value `3`.
###Code
x=3
print(x) # x is 3
y=x
print(x,y) # x is 3 and y is
y=2
print(x,y)
###Output
3
(3, 3)
(3, 2)
###Markdown
As every instance (object or variable) has an identity or `id()`, i.e. an integer which is unique within the script or program, we can use `id()` to understand the above behavior of Python/SageMath assignments.So, let's have a look at our previous example and see how the identities change with the assignments.
###Code
x = 3
print('x and its id() are:')
print(x,id(x))
y = x
print('\ny and its id() are:')
print(y,id(y))
y = 2
print('\nx, y and their id()s are:')
print(x,y,id(x),id(y))
###Output
x and its id() are:
(3, 140085314697904)
y and its id() are:
(3, 140085314697904)
x, y and their id()s are:
(3, 2, 140085314697904, 140085314531216)
###Markdown
Example 6: Truth statements and Boolean valuesConsider statements like "Today is Friday" or "2 is greater than 1" or " 1 equals 1": statements which are either true or not true (i.e., false). SageMath has two values, True and False which you'll meet in this situation. These value are called Booleans values, or values of the type Boolean.In SageMath, we can express statements like "2 is greater than 1" or " 1 equals 1" with relational operators, also known as value comparison operators. Have a look at the list below.- `<` Less than- `>` Greater than- `<=` Less than or equal to- `>=` Greater than or equal to- `==` Equal to. - `!=` Not equal toLets try some really simple truth statements.
###Code
1 < 1 # 1 is less than 1
###Output
_____no_output_____
###Markdown
Let us evaluate the following statement.
###Code
1 <= 1 # 1 is less than or equal to 1
###Output
_____no_output_____
###Markdown
We can use these operators on variables as well as on values. Again, try assigning different values to `x` and `y`, or try using different operators, if you want to.
###Code
x = 1 # assign the value 1 to x
y = 2 # assign the value 2 to y
x == y # evaluate the truth statement "x is equal to y"
###Output
_____no_output_____
###Markdown
Note that when we check if something equals something else, we use `==`, a double equals sign. This is because `=`, a single equals sign, is the assignment operator we talked about above. Therefore, to test if `x` equals `y` we can't write `x = y` because this would assign `y to x`, instead we use the equality operator `==` and write `x == y`.We can also assign a Boolean value to a variable.
###Code
# Using the same x and y as above
myBoolean = (x == y) # assign the result of x == y to the variable myBoolean
myBoolean # disclose myBoolean
type(myBoolean) # check the type of myBoolean
###Output
_____no_output_____
###Markdown
If we want to check if two things are not equal we use `!=`. As we would expect, it gives us the opposite of testing for equality:
###Code
x != y # evaluate the truth statement "x is not equal to y"
print(x,y) # Let's print x and y to make sure the above statement makes sense
###Output
(1, 2)
###Markdown
You tryTry assigning some values to two variables - you choose what values and you choose what variable names to use. Try some truth statements to check if they are equal, or one is less than the other. You tryTry some strings (we looked at strings briefly in Example 5 above). Can you check if two strings are equal? Can you check if one string is less than (`<`) another string. How do you think that Sage is ordering strings (try comparing "fred" and "freddy", for example)?
###Code
'raazb' <= 'raaza'
x = [1]
y = x
y[0] = 5
print x
print(x,id(x),y,id(y))
###Output
[5]
([5], 140085306496640, [5], 140085306496640)
###Markdown
Example 7: Mathematical constantsSage has reserved words that are defined as common mathematical constants. For example, `pi` and `e` behave as you expect. Numerical approximations can be obtained using the `.n()` method, as before.
###Code
print pi, "~", pi.n() # print a numerical approximation of the mathematical constant pi
print e, "~", e.n() # print a numerical approximation of the mathematical constant e
print I, "~", I.n() # print a numerical approximation of the imaginary number sqrt(-1)
(pi/e).n(digits=100) # print the first 100 digits of pi/e
e^(i*pi)+1 # Euler's identity symbolically - see https://en.wikipedia.org/wiki/Euler%27s_identity
###Output
_____no_output_____
###Markdown
Example 8: SageMath number types and Python number typesWe showed how you can find the type of a number value and we demonstrated that by default, SageMath makes 'real' numbers like 3.1 into Sage real literals (`sage.rings.real_mpfr.RealLiteral`). If you were just using Python (the programming language underlying most of SageMath) then a value like 3.1 would be a floating point number or float type. Python has some interesting extra operators that you can use with Python floating point numbers, which also work with the Sage rings integer type but not with Sage real literals.
###Code
X = 3.1 # convert to default Sage real literal 3.1
type(X)
X = float(3.1) # convert the default Sage real literal 3.1 to a float 3.1
type(X)
###Output
_____no_output_____
###Markdown
Floor Division (`//`) - The division of operands where the result is the quotient in which the digits after the decimal point are removed - the result is floored, i.e., rounded towards negative infinity: examples: 9//2 = 4 and 9.0//2.0 = 4.0, -11//3 = -4, -11.0//3 = -4.0
###Code
3 // 2 # floor division
3.3 // 2.0 # this will give error - floor division is undefined for Sage real literals
float(3.5) // float(2.0)
###Output
_____no_output_____
###Markdown
Similarly, we have the light-weight Python integer type `int` that we may want instead of SageMath integer type for non-mathematical operations.
###Code
type(3) # the default Sage rings integer type
X = int(3) # conversion to a plain Python integer type
type(X)
3/2 # see the result you get when dividing one default Sage rings integer type by another
###Output
_____no_output_____
###Markdown
One of the differences of SageMath rings integers to plain Python integers is that result of dividing one SageMath rings integer by another is a rational. This probably seems very sensible, but it is not what happens at the moment with Python integers.
###Code
int(7)/int(2) # division using python integers is "floor division"
###Output
_____no_output_____
###Markdown
We showed the `.n()` method. If X is some Sage real literal and we use `X.n(20)` we will be asking for 20 bits of precision, which is about how many bits in the computer's memory will be allocated to hold the number. If we ask for `X.n(digits=20)` will be asking for 20 digits of precision, which is not the same thing. Also note that 20 digits of precision does not mean showing the number to 20 decimal places, it means all the digits including those in front of the decimal point.
###Code
help(n) # always ask for help when you need it - or lookup in help menu above
X=3.55555555
X.n(digits = 3)
X.n(3) # this will use 3 bits of precision
round(X,3)
?round # this opens a window with help information that can be closed
###Output
_____no_output_____
###Markdown
If you want to actually round a number to a specific number of decimal places, you can also use the round(...) function.For deeper dive see documents on [Python Numeric Types](https://docs.python.org/2/library/stdtypes.htmlnumeric-types-int-float-long-complex) and [SageMath Numeric Types]() SetsSet theory is at the very foundation in modern mathematics and is necessary to understand the mathematical notions of probability and statistics. We will take a practical mathemtical tour of the essential concepts from set theory that a data scientist needs to understand and build probabilistic models from the data using statistical principles.
###Code
showURL("https://en.wikipedia.org/wiki/Set_(mathematics)",500)
###Output
_____no_output_____
###Markdown
Essentials of Set Theory for Probability and Statistics**These are black-board lectures typeset here.**Let us learn or recall elementary set theory. Sets are perhaps the most fundamental concept in mathematics. Definitions**Set** *is a collection of distinct elements*. We write a set by enclosing its elements with curly brackets. Let us see some example next.- The collection of $\star$ and $\circ$ is $\{\star,\circ\}$.- We can name the set $\{\star,\circ\}$ by the letter $A$ and write $$A=\{\star,\circ\}.$$- Question: Is $\{\star,\star,\circ\}$ a set?- A set of letters and numbers that I like is $\{b,d,6,p,q,9\}$.- The set of first five Greek alphabets is $\{\alpha,\beta,\gamma,\delta,\epsilon\}$.The set that contains no elements is the **empty set**. It is denoted by $$\boxed{\emptyset = \{\}} \ .$$We say an element belongs to or does not belong to a set with the binary operators $$\boxed{\in \ \text{or} \ \notin} \ .$$ For example,- $\star \in \{\star,\circ\}$ but the element $\otimes \notin \{\star,\circ\}$- $b \in \{b,d,6,p,q,9\}$ but $8 \notin \{b,d,6,p,q,9\}$- Question: Is $9 \in \{3,4,1,5,2,8,6,7\}$?We say a set $C$ is a **subset** of a set $D$ and write$$\boxed{C \subset D}$$if every element of $C$ is also an element of $D$. For example,- $\{\star\} \subset \{\star,\circ\}$- Question: Is $\{6,9\}\subset \{b,d,6,p,q,9\}$? Set OperationsWe can add distinct new elements to an existing set by **union** operation denoted by $\cup$ symbol. For example- $\{\circ, \bullet\} \cup \{\star\} = \{\circ,\bullet,\star\}$- Question: $\{\circ, \bullet\} \cup \{\bullet\} = \quad$?More formally, we write the union of two sets $A$ and $B$ as $$\boxed{A \cup B = \{x: x \in A \ \text{or} \ x \in B \}} \ .$$The symbols above are read as *$A$ union $B$ is equal to the set of all $x$ such that $x$ belongs to $A$ or $x$ belongs to $B$* and simply means that $A$ union $B$ or $A \cup B$ is the set of elements that belong to $A$ or $B$.Similarly, the **intersection** of two sets $A$ and $B$ written as $$\boxed{A \cap B = \{x: x \in A \ \text{and} \ x \in B \}} $$ means $A$ intersection $B$ is the set of elements that belong to both $A$ and $B$.For example- $\{\circ, \bullet\} \cap \{\circ\} = \{\circ\}$- $\{\circ, \bullet\} \cap \{\bullet\} = \{\bullet\}$- $\{\circ\} \cap \{a,b,c,d\}=\emptyset$The **set difference** of two sets $A$ and $B$ written as $$\boxed{A \setminus B = \{x: x \in A \ \text{and} \ x \notin B \}} $$ means $A \setminus B$ is the set of elements that belong to $A$ and not belong to $B$.For example- $\{\circ, \bullet\} \setminus \{\circ\} = \{\bullet\}$- $\{\circ, \bullet\} \setminus \{\bullet\} = \{\circ\}$- $\{a,b,c,d\} \setminus \{a,b,c,d\}=\emptyset$The equality of two sets $A$ and $B$ is defined in terms of subsets as follows: $$\boxed{A = B \quad \text{if and only if} \quad A \subset B \ \text{and} \ B \subset A} \ .$$Two sets $A$ anb $B$ are said to be **disjoint** if $$\boxed{ A \cap B = \emptyset} \ .$$Given a **universal set** $\Omega$, we define the **complement** of a subset $A$ of the universal set by $$\boxed{A^c = \Omega \setminus A = \{x: x \in \Omega \ \text{and} \ x \notin A\}} \ .$$ An Interactive Venn DiagramLet us gain more intuition by seeing the unions and intersections of sets interactively. The following interact is from [interact/misc](https://wiki.sagemath.org/interact/miscAnInteractiveVennDiagram) page of Sage Wiki.
###Code
# ignore this code for now and focus on the interact in the output cell
def f(s, braces=True):
t = ', '.join(sorted(list(s)))
if braces: return '{' + t + '}'
return t
def g(s): return set(str(s).replace(',',' ').split())
@interact
def _(X='1,2,3,a', Y='2,a,3,4,apple', Z='a,b,10,apple'):
S = [g(X), g(Y), g(Z)]
X,Y,Z = S
XY = X & Y
XZ = X & Z
YZ = Y & Z
XYZ = XY & Z
pretty_print(html('<center>'))
pretty_print(html("$X \cap Y$ = %s"%f(XY)))
pretty_print(html("$X \cap Z$ = %s"%f(XZ)))
pretty_print(html("$Y \cap Z$ = %s"%f(YZ)))
pretty_print(html("$X \cap Y \cap Z$ = %s"%f(XYZ)))
pretty_print(html('</center>'))
centers = [(cos(n*2*pi/3), sin(n*2*pi/3)) for n in [0,1,2]]
scale = 1.7
clr = ['yellow', 'blue', 'green']
G = Graphics()
for i in range(len(S)):
G += circle(centers[i], scale, rgbcolor=clr[i],
fill=True, alpha=0.3)
for i in range(len(S)):
G += circle(centers[i], scale, rgbcolor='black')
# Plot what is in one but neither other
for i in range(len(S)):
Z = set(S[i])
for j in range(1,len(S)):
Z = Z.difference(S[(i+j)%3])
G += text(f(Z,braces=False), (1.5*centers[i][0],1.7*centers[i][1]), rgbcolor='black')
# Plot pairs of intersections
for i in range(len(S)):
Z = (set(S[i]) & S[(i+1)%3]) - set(XYZ)
C = (1.3*cos(i*2*pi/3 + pi/3), 1.3*sin(i*2*pi/3 + pi/3))
G += text(f(Z,braces=False), C, rgbcolor='black')
# Plot intersection of all three
G += text(f(XYZ,braces=False), (0,0), rgbcolor='black')
# Show it
G.show(aspect_ratio=1, axes=False)
###Output
_____no_output_____
###Markdown
Create and manipulate sets in SageMath. Example 0: Lists before SetsA `list` is a sequential collection that we will revisit in detail soon. For now, we just need to know that we can create a list by using delimiter `,` between items and by wrapping with left and right square brackets: `[` and `]`. For example, the following is a list of 4 integers:
###Code
[1,2,3,4]
myList = [1,2,3,4] # we can assign the list to a variable myList
print(myList) # print myList
type(myList) # and ask for its type
###Output
[1, 2, 3, 4]
###Markdown
List is one of the most primitive data structures and has a long history in a popular computer programming language called LISP - originally created as a practical mathematical notation for computer programs.For now, we just use lists to create sets. Example 1: Making setsIn SageMath, you do have to specifically say that you want a set when you make it.
###Code
X = set([1, 2, 3, 4]) # make the set X={1,2,3,4} from the List [1,2,3,4]
X # disclose X
type(X) # what is the type of X
###Output
_____no_output_____
###Markdown
This is a specialized datatype in Python and more details can be found in Python docs: [https://docs.python.org/2/library/datatypes.html](https://docs.python.org/2/library/datatypes.html)
###Code
4 in X # 'is 4 in X?'
5 in X # 'is 5 in X?'
Y = set([1, 2]) # make the set Y={1,2}
Y # disclose Y
4 not in Y # 'is 4 not in Y?'
1 not in Y # 'is 1 not in Y?'
###Output
_____no_output_____
###Markdown
We can add new elements to a set.
###Code
X.add(5) # add 5 to the set X
X
###Output
_____no_output_____
###Markdown
But remember from the mathematical exposition above that sets contain distinct elements.
###Code
X.add(1) # try adding another 1 to the set X
X
###Output
_____no_output_____
###Markdown
You tryTry making the set $Z=\{4,5,6,7\}$ next. The instructions are in the two cells below.
###Code
# Write in the expression to make set Z ={4, 5, 6, 7}
# (press ENTER at the end of this line to get a new line)
Z = set([4,5,6,7]) #([4],[5],[6,7]))
# Check if 4 is in Z
4 in Z
# (press ENTER at the end of this line to get a new line)
###Output
_____no_output_____
###Markdown
Make a set with the value 2/5 (as a rational) in it. Try adding 0.4 (as a floating point number) to the set. Does SageMath do what you expect? Example 2: SubsetsIn lectures we talked about subsets of sets.Recall that `Y` is a subset of `X` if each element in `Y` is also in `X`.
###Code
print "X is", X
print "Y is", Y
print "Is Y a subset of X?"
Y <= X # 'is Y a subset of X?'
###Output
X is set([1, 2, 3, 4, 5])
Y is set([1, 2])
Is Y a subset of X?
###Markdown
If you have time: We say Y is a proper subset of X if all the elements in Y are also in X but there is at least one element in X that it is not in Y. If X is a (proper) subset of Y, then we also say that Y is a (proper) superset of X.
###Code
X < X # 'is X a proper subset of itself?'
X > Y # 'is X a proper superset of Y?'
X > X # 'is X a proper superset of itself?'
X >= Y # 'is X a superset of Y?' is the same as 'is Y a subset of X?'
###Output
_____no_output_____
###Markdown
Example 3: More set operationsNow let's have a look at the other set operations we talked about above: intersection, union, and difference.Recall that the intersection of X and Y is the set of elements that are in both X and Y.
###Code
X & Y # '&' is the intersection operator
###Output
_____no_output_____
###Markdown
The union of X and Y is the set of elements that are in either X or Y.
###Code
X | Y # '|' is the union operator
###Output
_____no_output_____
###Markdown
The set difference between X and Y is the set of elements in X that are not in Y.
###Code
X - Y # '-' is the set difference operator
###Output
_____no_output_____
###Markdown
You tryTry some more work with sets of strings below.
###Code
fruit = set(['orange', 'banana', 'apple'])
fruit
colours = set(['red', 'green', 'blue', 'orange'])
colours
###Output
_____no_output_____
###Markdown
Fruit and colours are different to us as people, but to the computer, the string 'orange' is just the string 'orange' whether it is in a set called fruit or a set called colours.
###Code
print "fruit intersection colours is", fruit & colours
print "fruit union colours is", fruit | colours
print "fruit - colours is", fruit - colours
print "colours - fruit is", colours - fruit
###Output
fruit intersection colours is set(['orange'])
fruit union colours is set(['blue', 'green', 'apple', 'orange', 'banana', 'red'])
fruit - colours is set(['banana', 'apple'])
colours - fruit is set(['blue', 'green', 'red'])
###Markdown
Try a few other simple subset examples - make up your own sets and try some intersections, unions, and set difference operations. The best way to try new possible operations on a set such as X we just created is to type a period after X and hit `` key. THis will bring up all the possible methods you can call on the set X.
###Code
mySet = set([1,2,3,4,5,6,7,8,9])
mySet. # try placing the cursor after the dot and hit <TAB> key
?mySet.add # you can get help on a method by prepending a question mark
###Output
_____no_output_____
###Markdown
Infact, there are two ways to make sets in SageMath. We have so far used [the python set](https://docs.python.org/2/library/sets.html) to make a set. However we can use the SageMath `Set` to maka sets too. SageMath `Set` is more mathematically consisitent. If you are interested in the SageMath `Set` go to the source and work through the [SageMath reference on Sets](http://doc.sagemath.org/html/en/reference/sets/sage/sets/set.html). But, first let us appreciate the difference between Python `set` and SageMath `Set`!
###Code
X = set([1, 2, 3, 4]) # make the set X={1,2,3,4} with python set
X # disclose X
type(X) # this is the set in python
anotherX = Set([1, 2, 3, 4]) # make the set anotherX={1,2,3,4} in SAGE Set
anotherX #disclose it
type(anotherX) # this is the set in SAGE and is more mathy
anotherX. # see what methods are available in SageMath Set
###Output
_____no_output_____
###Markdown
Example 4Python also provides something called a [frozenset](https://docs.python.org/2/library/stdtypes.htmlfrozenset), which you can't change like an ordinary set.
###Code
aFrozenSet = frozenset([2/5, 0.2, 1/7, 0.1])
aFrozenSet
aFrozenSet.add(0.3) # This should give an error
###Output
_____no_output_____
|
notebook/hello/quadratic_formula.ipynb
|
###Markdown
Formula kuadrat Formula kuadrat adalah rumus yang digunakan untuk mencari akar-akar dari suatu persamaan kuadrat. Persamaan kuadratTerdapat suatu persamaan kuadrat dalam bentuk\begin{equation}\label{eq1}\tag{1}y = ax^2 + bx + c\end{equation}yang akar-akarnya dapat dicari melalui formula kuadrat. Formula kuadratSuatu persamaan memiliki dua akar, di mana\begin{equation}\label{eq2}\tag{2}x_1 = \frac{-b - \sqrt{b^2 - 4ac}}{2a}\end{equation}akan memberikan akar pertama dan\begin{equation}\label{eq3}\tag{3}x_2 = \frac{-b + \sqrt{b^2 - 4ac}}{2a}\end{equation}akan memberikan akar kedua dengan $x_1 < x_2$. Penerapan Persamaan \eqref{eq2} dan \eqref{eq3} akan diberikan pada bagian berikutnya. ContohBerikut ini adalah contoh sederhana penerapan dua persamaan sebelumnya dengan persamaannya adalah$$y = x^2 - 5x + 6$$sehingga $a = 1$, $b = -5$, dan $c = 6$.
###Code
import math
a = 1
b = -5
c = 6
D = b * b - 4 * a * c
x1 = (-b - math.sqrt(D)) / (2*a)
x2 = (-b + math.sqrt(D)) / (2*a)
print("x1 = ", x1)
print("x2 = ", x2)
###Output
x1 = 2.0
x2 = 3.0
###Markdown
Dengan demikian diperoleh bahwa akar-akar dari persamaan $y = x^2 - 5x + 6$ adalah $x_1 = 2.0$ dan $x_2 = 3.0$. GrafikUntuk persamaan $y = x^2 - 5x + 6$ dapat dibuat grafiknya sebagai berikut.
###Code
import matplotlib.pyplot as plt
def poly(x):
a = 1
b = -5
c = 6
y = a * x * x + b * x + c
return y
x = []
y = []
for i in range(0, 21):
x.append(i * 0.25)
y.append(poly(i * 0.25))
print(x)
print(y)
plt.plot(x,y)
plt.show()
###Output
_____no_output_____
|
lectures/06_Syntax/06_Syntax.ipynb
|
###Markdown
Syntax Natural Language Processing and Information Extraction, 2021 WSLecture 6, 10/19/2021Gรกbor Recski The lecture will begin at 13.15.This material can be downloaded from [https://github.com/tuw-nlp-ie/tuw-nlp-ie-2021WS](https://github.com/tuw-nlp-ie/tuw-nlp-ie-2021WS) Topics and SLP3 chapters- Parts-of-speech [8.1-8.4](https://web.stanford.edu/~jurafsky/slp3/8.pdf)- Constituency [12.1-12.3](https://web.stanford.edu/~jurafsky/slp3/12.pdf), [13.1-13.3](https://web.stanford.edu/~jurafsky/slp3/13.pdf)- Dependency [14.1](https://web.stanford.edu/~jurafsky/slp3/14.pdf) DependenciesTo run this notebook, you will need to install the **stanza** and **spacy** python packages.Make sure to restart the kernel afterwards.Then you can use the cells below to download and initialize the necessary models. Download models, initialize pipelines
###Code
import stanza
stanza.download('en')
stanza_nlp = stanza.Pipeline(lang='en', logging_level='WARNING')
import spacy
from spacy.cli import download as spacy_download
spacy_download('en')
spacy_nlp = spacy.load("en_core_web_sm")
import stanza
stanza.download('en')
###Output
_____no_output_____
###Markdown
Recap Tokenization, lemmatization, decompounding
###Code
doc = stanza_nlp("Did you get me those muffins?")
print("\n".join([f"{word.text:<8}\t{word.lemma}" for word in doc.sentences[0].words]))
###Output
_____no_output_____
###Markdown
What's next? ```Twas brillig, and the slithy tovesDid gyre and gimble in the wabe;All mimsy were the borogoves,And the mome raths outgrabe.```(Lewis Carroll: [Jabberwocky](https://en.wikipedia.org/wiki/Jabberwocky)) ```Es brillig war. Die schlichten TovenWirrten und wimmelten in Waben;Und aller-mรผmsige BurggovenDie mohmen Rรคth' ausgraben.```(Translated by Robert Scott) They don't make much sense, but how come they make any? Part-of-speech (POS)
###Code
print("\n".join([f"{word.text:<8}\t{word.pos}" for word in doc.sentences[0].words]))
print("\n".join([f"{word.text:<8}\t{word.xpos}" for word in doc.sentences[0].words]))
###Output
_____no_output_____
###Markdown
POS-tags are morphosyntactic categories | Word | [UPOS](https://universaldependencies.org/u/pos/) | | [PTB](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html) | || :--- | :--- | :--- | :--- | :--- || Did | AUX | auxiliary | VBD | verb, past tense || you | PRON | pronoun | PRP | personal pronoun || get | VERB | verb | VB | verb, base form || me | PRON | pronoun | PRP | personal pronoun || those | DET | determiner | DT | determiner || muffins | NOUN | noun | NNS | noun, plural || ? | PUNCT | punctuation | . | punctuation | There's always more morphosyntactic features to consider:
###Code
print("\n".join([f"{word.text:<8}\t{word.pos:<8}\t{word.feats}" for word in doc.sentences[0].words]))
###Output
_____no_output_____
###Markdown
And this even makes sense for unknown words:
###Code
doc = stanza_nlp('all mimsy were the borogoves')
print("\n".join([f"{word.text:<8}\t{word.pos:<8}\t{word.feats}" for word in doc.sentences[0].words]))
###Output
_____no_output_____
###Markdown
Difficulties of POS-tagging _earnings growth took a __back/JJ__ seat__a small building in the **back/NN**__a clear majority of senators **back/VBP** the bill__Dave began to __back/VB__ toward the door__enable the country to buy __back/RP__ about debt__I was twenty-one __back/RB__ then_([SLP Ch.8](https://web.stanford.edu/~jurafsky/slp3/8.pdf)) Why not implement grammar? - grammar and vocabulary change too fast- resolving ambiguities requires probabilistic reasoning | _Time_ | _flies_ | _like_ | _an_ | _arrow_ || :----- | :------ | :----- | :--- | :------ || NOUN | VERB | ADP | DET | NOUN | | _Time_ | _flies_ | _like_ | _an_ | _arrow_ || :----- | :------ | :----- | :--- | :------ || VERB | NOUN | ADP | DET | NOUN | | _Time_ | _flies_ | _like_ | _an_ | _arrow_ || :----- | :------ | :----- | :--- | :------ || NOUN | NOUN | VERB | DET | NOUN | BTW: the second one can still have three interpretations - can you think of all of them (without googling)? Questions? _See the supplementary material in 06b_POS_tagging_HMMs.ipynb on POS-tagging with Hidden Markov Models_ Syntactic structure Two perspectives- Constituency structure (SLP3 Ch. [12](https://web.stanford.edu/~jurafsky/slp3/12.pdf))- Dependency structure (SLP3 Ch. [15](https://web.stanford.edu/~jurafsky/slp3/12.pdf)) Constituency I shot an elephant in my pyjamas
###Code
doc = stanza_nlp("I shot an elephant in my pyjamas")
print("\n".join([f"{word.text:<12}{word.pos}" for word in doc.sentences[0].words]))
###Output
_____no_output_____
###Markdown
([SLP Ch.13](https://web.stanford.edu/~jurafsky/slp3/13.pdf))  > (NP > $\quad$ (DET an) > $\quad$ (Nominal > $\quad \quad$ (Nominal > $\quad \quad \quad$ (NOUN elephant) > $\quad \quad$ ) > $\quad$ (PP > $\quad \quad$ (PREP in) > $\quad \quad$ (NP > $\quad \quad \quad$ (DET my) > $\quad \quad \quad$ (NOUN pyjamas) > $\quad \quad$ ) > $\quad $ ) > ) NP, PP, etc. are distributional categories. Just like POS-tags! (DET an) (NOUN elephant) (PREP in) (DET my) (NOUN pyjamas)(DET two) (NOUN pandas) (PREP behind) (DET his) (NOUN tent) (NP I) (VERB shot) (NP an elephant) (PP in my pyjamas)(NP My best friend) (VERB met) (NP two pandas) (PP behind his tent) (NP I) (VP shot an elephant in my pyjamas)(NP The guy driving the jeep) (VP fainted) Phrase structure grammars ```S -> NP VPVP -> VERB (NP)NP -> (DET) NOUN (PP)PP -> PREP NP(...)DET -> (an|the|my|his|...)VERB -> (shot|met|fainted...)PREP -> (in|behind|...)NOUN -> (I|elephant|pyjamas|panda|tent|jeep|guy|...)``` Probabilistic grammars ```NOUN -> I (0.8)NOUN -> elephant (0.1)(...)VP -> VERB (0.2)VP -> VERB NP (0.8)``` Constituency parsing Parsing is the task of determining the (most likely) possible derivations of a sentence, given a (probabilistic) grammar The CKY algorithm See example in [cky.pdf](cky.pdf) See SLP3 Chapters [13](https://web.stanford.edu/~jurafsky/slp3/13.pdf) and [14](https://web.stanford.edu/~jurafsky/slp3/14.pdf) for more. Questions? Dependency structure  - **NSUBJ**: nominal subject- **OBJ**: object- **DET**: determiner- **OBL**: oblique nominal- **NMOD**: nominal modifier- **POSS**: possessive
###Code
doc = stanza_nlp("I shot an elephant in my pyjamas")
print("\n".join([f"{word.id:<4}{word.text:<12}{word.deprel:<12}{word.head:<8}" for word in doc.sentences[0].words]))
###Output
_____no_output_____
|
Chapter_Preprocessing/Embedded_Method_Lasso.ipynb
|
###Markdown
Chapter: Data Preprocessing Topic: Embedded Method: Lasso
###Code
# read data
import numpy as np
VSdata = np.loadtxt('VSdata.csv', delimiter=',')
# separate X and y
y = VSdata[:,0]
X = VSdata[:,1:]
# scale data
from sklearn.preprocessing import StandardScaler
xscaler = StandardScaler()
X_scaled = xscaler.fit_transform(X)
yscaler = StandardScaler()
y_scaled = yscaler.fit_transform(y[:,None])
# fit Lasso model
from sklearn.linear_model import LassoCV
Lasso_model = LassoCV(cv=5).fit(X_scaled, y_scaled.ravel())
# cfind the relevant inputs using model coefficients
top_k_inputs = np.argsort(abs(Lasso_model.coef_))[::-1][:10] + 1
print('Relevant inputs: ', top_k_inputs)
###Output
Relevant inputs: [21 22 20 23 24 19 25 18 33 14]
|
term7/mmod/lab1.ipynb
|
###Markdown
ะะฐะฑะพัะฐัะพัะฝะฐั ัะฐะฑะพัะฐ โ1 ะะพัััะพะตะฝะธะต ะธ ะธััะปะตะดะพะฒะฐะฝะธะต ั
ะฐัะฐะบัะตัะธััะธะบ ะดะฐััะธะบะพะฒ ะฑะฐะทะพะฒัั
ัะปััะฐะนะฝัั
ะฒะตะปะธัะธะฝ
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
ะะตัะพะด ัะตัะตะดะธะฝั ะบะฒะฐะดัะฐัะฐ
###Code
def square_mid_sensor(n=5, z0=90909090, digit_amount=8, is_float=True):
assert n >= 0, "`n` should not be negative"
assert len(str(z0)) == digit_amount, f"`z0` Should be {digit_amount}-digit number"
generated_nums = []
next_num = str(z0)
for _ in range(n):
next_num = str(int(next_num) ** 2)
if len(next_num) < 2 * digit_amount:
next_num = next_num.zfill(2*digit_amount)
slice_len = len(next_num) // 2
next_num = next_num[slice_len//2:-slice_len//2]
generated_nums.append(float("0." + next_num) if is_float else int(next_num))
return generated_nums
square_mid_sensor(n=5)
###Output
_____no_output_____
###Markdown
ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด
###Code
def multiplicative_congruent_sensor(n=20,
a0=19941994,
k=(2**32)-5,
m=2**64,
float_round=8):
assert a0 >= 0, "`a0` should not be negative"
assert m >= 0, "`m` should not be negative"
generated_nums = []
next_a = a0
for _ in range(n):
next_a = (k * next_a) % m
generated_nums.append(round(next_a / m, float_round))
return generated_nums
multiplicative_congruent_sensor(n=5)
###Output
_____no_output_____
###Markdown
ะขะตััะธัะพะฒะฐะฝะธะต ัะฐะฒะฝะพะผะตัะฝะพััะธ ะะพัััะพะตะฝะธะต ะณะธััะพะณัะฐะผะผ
###Code
def get_frequencies(z, k, n):
step = 1 / k
arange = np.arange(0, 1 + step, step)
z_n = [0 for _ in range(k)]
for index, (i, j) in enumerate(zip(arange, arange[1:])):
for el in z:
if i <= el <= j:
z_n[index] += 1
return arange, [i/n for i in z_n]
def draw_plot(x_orig, y_orig, title="", is_plot_show=True):
x = x_orig[:]
y = y_orig[:]
y.insert(0, 0)
plt.step(x, y)
plt.vlines(x, min(y), y, colors='C0')
plt.grid(True)
plt.suptitle(title, fontweight='bold')
if is_plot_show:
plt.show()
k = 50
n = 100
z0 = square_mid_sensor(n=n)
z1 = multiplicative_congruent_sensor(n=n)
x0, p0 = get_frequencies(z0, k, n)
x1, p1 = get_frequencies(z1, k, n)
draw_plot(x0, p0, title=f"ะะตัะพะด ัะตัะตะดะธะฝั ะบะฒะฐะดัะฐัะฐ ะฟัะธ n={n} ะธ k={k}")
draw_plot(x1, p1, title=f"ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด ะฟัะธ n={n} ะธ k={k}")
k = 10
n = 10000
z0 = square_mid_sensor(n=n)
z1 = multiplicative_congruent_sensor(n=n)
x0, p0 = get_frequencies(z0, k, n)
x1, p1 = get_frequencies(z1, k, n)
draw_plot(x0, p0, title=f"ะะตัะพะด ัะตัะตะดะธะฝั ะบะฒะฐะดัะฐัะฐ ะฟัะธ n={n} ะธ k={k}")
draw_plot(x1, p1, title=f"ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด ะฟัะธ n={n} ะธ k={k}")
###Output
_____no_output_____
###Markdown
ะะพ ะณะธััะพะณัะฐะผะผะฐะผ ะฒะธะดะฝะพ, ััะพ ะฒะพ ะฒัะพัะพะผ ัะปััะฐะต ััะพะปะฑัั ะฑะปะธะถะต ะบ ะทะฝะฐัะตะฝะธั: $$\frac{1}{k}$$
###Code
k = 10
n1 = 100
n2 = 10000
z0 = square_mid_sensor(n=n1)
z1 = square_mid_sensor(n=n2)
x0, p0 = get_frequencies(z0, k, n1)
x1, p1 = get_frequencies(z1, k, n2)
draw_plot(x0, p0, is_plot_show=False)
draw_plot(x1, p1, is_plot_show=False)
plt.legend((f"n={n1}", f"n={n2}"))
plt.show()
k = 10
n1 = 100
n2 = 10000
z0 = multiplicative_congruent_sensor(n=n1)
z1 = multiplicative_congruent_sensor(n=n2)
x0, p0 = get_frequencies(z0, k, n1)
x1, p1 = get_frequencies(z1, k, n2)
draw_plot(x0, p0, is_plot_show=False)
draw_plot(x1, p1, is_plot_show=False)
plt.legend((f"n={n1}", f"n={n2}"))
plt.show()
###Output
_____no_output_____
###Markdown
ะััะธัะปะตะฝะธะต ะผะฐั. ะพะถะธะดะฐะฝะธั ะธ ะดะธัะฟะตััะธะธ
###Code
def get_mathematical_expectation(func, n):
z = func(n=n)
return sum(z) / n
def get_variance(func, n):
m = get_mathematical_expectation(func, n)
z = func(n=n)
return sum(i**2 - m**2 for i in z) / n
def print_some(what, func, n):
print(f"ะัะธ n={n}:", what(func, n))
###Output
_____no_output_____
###Markdown
ะขะตะพัะธัะธัะตัะบะพะต ะผะฐั. ะพะถะธะดะฐะฝะธะต $$ M(z) = 0.5 $$
###Code
print("ะะตัะพะด ัะตัะตะดะธะฝั ะบะฒะฐะดัะฐัะฐ")
print_some(get_mathematical_expectation, square_mid_sensor, n=100)
print_some(get_mathematical_expectation, square_mid_sensor, n=10000)
print("ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด")
print_some(get_mathematical_expectation, multiplicative_congruent_sensor, n=100)
print_some(get_mathematical_expectation, multiplicative_congruent_sensor, n=10000)
###Output
ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด
ะัะธ n=100: 0.4922854331999998
ะัะธ n=10000: 0.49932376641400034
###Markdown
ะขะตะพัะธัะธัะตัะบะฐั ะดะธัะฟะตััะธั $$ D(z) = 0.833333... $$
###Code
print("ะะตัะพะด ัะตัะตะดะธะฝั ะบะฒะฐะดัะฐัะฐ")
print_some(get_variance, square_mid_sensor, n=100)
print_some(get_variance, square_mid_sensor, n=10000)
print("ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด")
print_some(get_variance, multiplicative_congruent_sensor, n=100)
print_some(get_variance, multiplicative_congruent_sensor, n=10000)
###Output
ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด
ะัะธ n=100: 0.07862842551145854
ะัะธ n=10000: 0.08261330374336037
###Markdown
ะขะตััะธัะพะฒะฐะฝะธะต ะฝะตะทะฐะฒะธัะธะผะพััะธ ะะพััะธัะฐะตะผ ะบะพัััะธัะธะตะฝั ะบะพััะตะปััะธะธ. ะัะธ ัะฒะตะดะธัะตะฝะธะธ $n$ ะบะพัััะธัะธะตะฝั ะดะพะปะถะตะฝ ัั
ะพะดะธััั ะบ $0$
###Code
def get_correlation_coefficient(method, n=10000, s=5):
z = method(n=n)
x, y = z[:-s], z[s:]
M_x = sum(x) / n
D_x = sum(i**2 - M_x**2 for i in x) / n
M_y = sum(y) / n
D_y = sum(i**2 - M_y**2 for i in y) / n
M_xy = sum(i * j for i, j in zip(x, y)) / n
return (M_xy - (M_x*M_y)) / np.sqrt(D_x*D_y)
def print_correlation_coefficient(method, n):
print(f"R(x, y) = {get_correlation_coefficient(method, n=n)}, ะฟัะธ n={n}")
print("ะะตัะพะด ัะตัะตะดะธะฝั ะบะฒะฐะดัะฐัะฐ")
print_correlation_coefficient(square_mid_sensor, n=100)
print_correlation_coefficient(square_mid_sensor, n=10000)
print("ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด")
n = 100
print_correlation_coefficient(multiplicative_congruent_sensor, n)
n = 10000
print_correlation_coefficient(multiplicative_congruent_sensor, n)
###Output
ะัะปััะธะฟะปะธะบะฐัะธะฒะฝัะน ะบะพะฝะณัััะฝัะฝัะน ะผะตัะพะด
R(x, y) = 0.09604279133581228, ะฟัะธ n=100
R(x, y) = -0.005910065605539842, ะฟัะธ n=10000
|
ipynb/Germany-Rheinland-Pfalz-LK-Cochem-Zell.ipynb
|
###Markdown
Germany: LK Cochem-Zell (Rheinland-Pfalz)* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Rheinland-Pfalz-LK-Cochem-Zell.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="LK Cochem-Zell", weeks=5);
overview(country="Germany", subregion="LK Cochem-Zell");
compare_plot(country="Germany", subregion="LK Cochem-Zell", dates="2020-03-15:");
# load the data
cases, deaths = germany_get_region(landkreis="LK Cochem-Zell")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Rheinland-Pfalz-LK-Cochem-Zell.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
Germany: LK Cochem-Zell (Rheinland-Pfalz)* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Rheinland-Pfalz-LK-Cochem-Zell.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="LK Cochem-Zell", weeks=5);
overview(country="Germany", subregion="LK Cochem-Zell");
compare_plot(country="Germany", subregion="LK Cochem-Zell", dates="2020-03-15:");
# load the data
cases, deaths = germany_get_region(landkreis="LK Cochem-Zell")
# get population of the region for future normalisation:
inhabitants = population(country="Germany", subregion="LK Cochem-Zell")
print(f'Population of country="Germany", subregion="LK Cochem-Zell": {inhabitants} people')
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 1000 rows
pd.set_option("max_rows", 1000)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Rheinland-Pfalz-LK-Cochem-Zell.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
Germany: LK Cochem-Zell (Rheinland-Pfalz)* Homepage of project: https://oscovida.github.io* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Rheinland-Pfalz-LK-Cochem-Zell.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="LK Cochem-Zell");
# load the data
cases, deaths, region_label = germany_get_region(landkreis="LK Cochem-Zell")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Rheinland-Pfalz-LK-Cochem-Zell.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
|
Netflix Exploration.ipynb
|
###Markdown
Netflix ExplorationThis notebook is meant to explore the trend as we move from the weekly episode model into the "all at once" release model. What genres have proven most successful with this model? How does one keep a viewer onboard without spacing out shows at one time?
###Code
import pandas as pd
pd.__version__
def import_csv(filename, category):
df = pd.read_csv(filename).dropna()
# Set Category column
df['Category'] = category
# Convert Premiere to a datetime
df['Premiere'] = pd.to_datetime(df['Premiere'])
# Clean up status name
df['Status'] = df['Status'].str.replace(r"Renewed.*", "Renewed")
df['Status'] = df['Status'].str.replace(r".*to premiere.*", "Renewed")
df['Status'] = df['Status'].str.replace(r"Pending.*", "Pending")
df['Status'] = df['Status'].str.replace(r"Ended.*", "Ended")
return df
comedy = import_csv('comedy.csv', 'Comedy')
drama = import_csv('drama.csv', 'Drama')
docuseries = import_csv('docuseries.csv', 'Documentary')
kids_animated = import_csv('kids_animated.csv', 'Kids')
kids_liveaction = import_csv('kids_liveaction.csv', 'Kids')
netflix = pd.concat([comedy, drama, docuseries, kids_animated, kids_liveaction])
netflix.head()
netflix['PremiereYear'] = netflix['Premiere'].map(lambda x: x.year)
netflix.head()
netflix.groupby(['Category', 'Status']).size()
netflix.groupby(['Category', 'Status', 'PremiereYear']).size()
netflix.groupby(['PremiereYear', 'Category']).size()
###Output
_____no_output_____
###Markdown
Netflix user behaviour Requirements[Jupyter Notebook](https://jupyter.org/install) [Apache Toree](https://toree.incubator.apache.org/) [sampleDataNetflix.tsv](https://guicaro.com/sampleDataNetflix.tsv) placed in local filesystem and path updated in 1) below Notes* I used a combination of Jupyter notebook and the Apache Toree project as it makes it easy and fast to explore a dataset. * I was part of the team that came up with [Apache Toree (aka The Spark Kernel)](https://twitter.com/guicaro/status/543541995247910917), and till now I think it's still the only Jupyter kernel that ties to a Spark Session and is backed by Apache. It solved many issues for us back when we were developing applications in Spark. Future* I was hoping to use [Voila](https://github.com/voila-dashboards/voila) project to create an interactive dashboard for data scientists where they could move a slider widget to change the parameters in my SQL queries, thus, change the time window to search. So, for example, say a data scientist would want to search for users only between 8 and 9 in the morning.* I wanted to randomly generate a bigger dataset using rules so that we could at least have more data to play with 1. Let's read our data We will read in a TSV file and try to infer schema since it is not very complex data types we are using
###Code
val sessions = spark.read.option("header", "true")
.option("sep", "\t")
.option("inferSchema","true")
.csv("/Users/memo/Desktop/netflixSpark/sampleDataNetflix.tsv")
sessions.printSchema
sessions.show(2)
###Output
+-------+---------------+--------------------+----------+--------+----+----------+
|user_id|navigation_page| url|session_id| date|hour| timestamp|
+-------+---------------+--------------------+----------+--------+----+----------+
| 1001| HomePage|https://www.netfl...| 6001|20181125| 11|1543145019|
| 1001| OriginalsGenre|https://www.netfl...| 6001|20181125| 11|1543144483|
+-------+---------------+--------------------+----------+--------+----+----------+
only showing top 2 rows
###Markdown
2. Let's create a temp SQL table to use of the SQL magic in Apache Toree to get our information
###Code
sessions.registerTempTable("SESSIONS")
###Output
_____no_output_____
###Markdown
a) Find all users who have visited OurPlanetTitle Page. Using DISTINCT to show unique users
###Code
%%SQL select distinct user_id
from SESSIONS
where navigation_page = 'OurPlanetTitle'
###Output
_____no_output_____
###Markdown
b) Find all users who have visited OurPlanetTitle Page only once. Showing the page visits just for validation, can be easily removed from the projection list in query
###Code
%%SQL select user_id, count(user_id) as page_visits
from SESSIONS
where navigation_page = 'OurPlanetTitle'
group by user_id
having page_visits == 1
###Output
_____no_output_____
###Markdown
c) Find all users who have visited HomePage -> OriginalsGenre -> OurPlanetTitle -> HomePage Making sure we filter for the same path using the timestamps and making sure it's all within the same `session_id`
###Code
%%SQL select distinct a.user_id
from sessions a,
sessions b,
sessions c,
sessions d
where a.user_id = b.user_id
and b.user_id = c.user_id
and c.user_id = d.user_id
and a.navigation_page = 'HomePage'
and b.navigation_page = 'OriginalsGenre'
and c.navigation_page = 'OurPlanetTitle'
and d.navigation_page = 'HomePage'
and a.timestamp < b.timestamp
and b.timestamp < c.timestamp
and c.timestamp < d.timestamp
and a.session_id = b.session_id
and b.session_id = c.session_id
and c.session_id = d.session_id
###Output
_____no_output_____
###Markdown
d) Find all users who landed on LogIn Page from a Title Page The like operator is not the most performant but the SQL optimizer should be able to tell that my 2nd where clause can improve selectivity of this query. I am using the `timestamp` column to make sure that a before landing on a **Login** page, the user first comes from a **Title** page
###Code
%%SQL select a.user_id
from sessions a,
sessions b
where a.user_id = b.user_id
and b.navigation_page = 'LogIn'
and a.navigation_page like '%Title'
and a.timestamp < b.timestamp
###Output
_____no_output_____
###Markdown
e) Find all users who have visited only OurPlanetTitle Page We are using relation 'b' to get the total count of `url` the user has visited
###Code
%%SQL select a.user_id
from sessions a,
(select user_id, count(url) as totalUrl from sessions group by user_id) b
where a.user_id = b.user_id
and a.navigation_page = 'OurPlanetTitle'
and b.totalurl = 1
###Output
_____no_output_____
|
Linked List/0901/24. Swap Nodes in Pairs.ipynb
|
###Markdown
่ฏดๆ๏ผ ็ปๅฎไธไธช้พ่กจ๏ผๆฏ้ไธคไธช็ธ้ป่็นไบคๆขๅนถ่ฟๅๅ
ถๅคดใ ๆจไธ่ฝไฟฎๆนๅ่กจ่็นไธญ็ๅผ๏ผๅช่ฝๆดๆน่็นๆฌ่บซใExample: Given 1->2->3->4, you should return the list as 2->1->4->3.
###Code
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
dummy = prev = ListNode()
prev.next = head
while prev.next and prev.next.next:
a = prev.next
b = prev.next.next
c = prev.next.next.next
prev.next = b
prev.next.next = a
prev.next.next = c
prev = prev.next.next
return dummy.next
###Output
_____no_output_____
|
Allfiles/Labs/01-preprocess-data/01-process-data.ipynb
|
###Markdown
Preprocess the data with RAPIDS using GPUsIn this notebook, you'll be using a subset of high-dimensional airline data: the Airline Service Quality Performance dataset, distributed by the U.S. Bureau of Transportation Statistics. 1987-2021. https://www.bts.dot.gov/browse-statistical-products-and-data/bts-publications/airline-service-quality-performance-234-time)This dataset is open source and provided on an ongoing basis by the U.S. Bureau of Transportation Statistics.Each month, the Bureau publishes a new csv file containing all flight information for the prior month. To train a robust machine learning model, you'd want to combine data over multiple years to use as a training dataset. In this exercise, you'll use data of only 10 days for illustration purposes. However, even when working with large amounts of data, the script should execute quickly as it uses cuDF to load and preprocess the data.In addition to the flight data, you'll also be downloading a file containing metadata and geo-coordinates of each airport and a file containing the code mappings for each airline. Airlines and airports rarely change, and as such, these files are static and do not change on a monthly basis. They do, however, contain information that we will later need to be mapped to the full airline dataset. (Megginson, David. "airports.csv", distributed by OurAirports. August 2, 2021. https://ourairports.com/data/airports.csv) Get environment variablesBefore you can submit the job, you have to get all necessary environment variables such as the workspace and environment.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core import Environment
from azureml.core.runconfig import DockerConfiguration
rapidsai_env = Environment.get(workspace=ws, name="rapids-mlflow")
d_config = DockerConfiguration(arguments=['-it'])
###Output
_____no_output_____
###Markdown
Define the configuration and submit the runNow that you have defined all necessary variables, you can define the script run configuration and submit the run.**Warning!** Change the value of the compute_target variable to your compute cluster name before running the code below!
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='script',
script='preprocess-rapids.py',
compute_target="<your-compute-cluster>",
environment=rapidsai_env,
docker_runtime_config=d_config)
###Output
_____no_output_____
###Markdown
To learn what is done during preprocessing, explore the script `preprocess-rapids.py` in the `script` folder.The following cell will initiate the run. Note that first, the compute cluster has to scale up from 0 nodes. Once a node is available, it will execute the script. The execution of the script should be fast and you can see the execution time in the **Details** tab of the **Experiment** run afterwards.
###Code
from azureml.core import Experiment
run = Experiment(ws,'preprocess-data').submit(src)
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Preprocess the data with RAPIDS using GPUsIn this notebook, you'll be using a subset of high-dimensional airline data: the Airline Service Quality Performance dataset, distributed by the U.S. Bureau of Transportation Statistics. 1987-2021. https://www.bts.dot.gov/browse-statistical-products-and-data/bts-publications/airline-service-quality-performance-234-time)This dataset is open source and provided on an ongoing basis by the U.S. Bureau of Transportation Statistics.Each month, the Bureau publishes a new csv file containing all flight information for the prior month. To train a robust machine learning model, you'd want to combine data over multiple years to use as a training dataset. In this exercise, you'll use data of only 10 days for illustration purposes. However, even when working with large amounts of data, the script should execute quickly as it uses cuDF to load and preprocess the data.In addition to the flight data, you'll also be downloading a file containing metadata and geo-coordinates of each airport and a file containing the code mappings for each airline. Airlines and airports rarely change, and as such, these files are static and do not change on a monthly basis. They do, however, contain information that we will later need to be mapped to the full airline dataset. (Megginson, David. "airports.csv", distributed by OurAirports. August 2, 2021. https://ourairports.com/data/airports.csv) Get environment variablesBefore you can submit the job, you have to get all necessary environment variables such as the workspace and environment.
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core import Environment
from azureml.core.runconfig import DockerConfiguration
rapidsai_env = Environment.get(workspace=ws, name="rapids-mlflow")
d_config = DockerConfiguration(arguments=['-it'])
###Output
_____no_output_____
###Markdown
Define the configuration and submit the runNow that you have defined all necessary variables, you can define the script run configuration and submit the run.**Warning!** Change the value of the compute_target variable to your compute cluster name before running the code below!
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='script',
compute_target="<your-compute-cluster>",
environment=rapidsai_env,
docker_runtime_config=d_config)
###Output
_____no_output_____
###Markdown
To learn what is done during preprocessing, explore the script `preprocess-rapids.py` in the `script` folder.The following cell will initiate the run. Note that first, the compute cluster has to scale up from 0 nodes. Once a node is available, it will execute the script. The execution of the script should be fast and you can see the execution time in the **Details** tab of the **Experiment** run afterwards.
###Code
from azureml.core import Experiment
run = Experiment(ws,'preprocess-data').submit(src)
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
|
nlp-labs/Day_01/Python_Basic/05_Functions.ipynb
|
###Markdown
> **Copyright (c) 2020 Skymind Holdings Berhad**> **Copyright (c) 2021 Skymind Education Group Sdn. Bhd.**Licensed under the Apache License, Version 2.0 (the \"License\");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0/Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an \"AS IS\" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.**SPDX-License-Identifier: Apache-2.0** IntroductionIn this notebook, you will learn what is python function and how to construct it. Notebook Content* [Function Definition](Function-Definition)* [Function Invocation](Function-Invocation)* [Parameters](Parameters)* [Return Value](Return-Value)* [Examples](Example-1)* [Challenges](Challenge-1) Function Definition- A function can have **multiple inputs value (arguments)** but can only consist of **one output value** def name( parameters ): statements - header line must **begins with a keyword (def)** and **ends with a colon** - **parameters** specify what **input needed** in order to use the new function - **body** consisting of one or more **Python statements** with same **indentation**
###Code
# Function without parameter
def printHello():
print("Hello")
print("Nice to meet you")
# Function with parameter
def greeting(name):
print("Hello", name)
print("Nice to meet you")
###Output
_____no_output_____
###Markdown
Function Invocation- We need a **function call** to execute the function- To invoke a function, put **function name** followed by **parentheses**- **Parenthesis** is the required syntax- Input **required arguments** if any- **IMPORTANT**: Function must be declared before it is invoked function_name() Function Invocation
###Code
print(type(printHello))
print(type(printHello()))
# This code will run into error
# greeting()
###Output
_____no_output_____
###Markdown
Parameters- A parameter is the **information needed** by the function to execute the sequential flow (function body)- If there are multiple parameters needed, use **comma(,)** to seperate the parameter name def function_name(parameter_1, parameter_2, parameter_3): function_bodyThe parameter and argument are quite similiar in general but usually we **use "parameter" on function declaration** while **"argument" is used on function invocation**.
###Code
def displaySum(n1, n2):
print(n1, "+", n2, "=", n1 + n2)
displaySum(5, 6)
###Output
5 + 6 = 11
###Markdown
Return Value- A function can either return a value or nothing (*Void* / *None*)- A function with return value is called **fruitful functions** (Python jargon)
###Code
def add(n1, n2):
return n1 + n2
def minus(n1, n2):
return n1 - n2
def isEven(num):
return num % 2 == 0
print(add(10, 30))
print(minus(30, 10))
print(isEven(minus(10, 8)))
###Output
40
20
True
###Markdown
Example 1Write a function called *subtract_three* that takes an **integer or any number** as input, and **returns that number minus three**.
###Code
def subtract_three(num):
return num - 3
###Output
_____no_output_____
###Markdown
Example 2Write a function called *decision* that takes **a string** as input, and then **checks the number of characters**. If it has **over 17 characters**, return โThis is a long stringโ, if it is **shorter or has 17 characters**, return โThis is a short stringโ.
###Code
def decision(a_str):
if len(a_str) > 17:
return "This is a long string"
else:
return "This is a short string"
###Output
_____no_output_____
###Markdown
Example 3Write a function that will return the **number of digits** in an integer.
###Code
def numDigits(n):
return len(str(n))
print(numDigits(10000))
###Output
5
###Markdown
Example 4Write a function that **mirrors its string argument**, generating a string containing the original string and the string backwards.
###Code
def reverse(a_str):
return a_str[::-1]
# [<start_index> : <stop_index> : step]
def mirror_1(a_str):
return a_str + reverse(a_str)
print(mirror_1("Hello"))
def mirror_2(a_str):
return a_str + "".join(reversed(a_str))
print(mirror_2("Hello"))
###Output
HelloolleH
###Markdown
Challenge 1Write a function that **removes all occurrences** of a given letter from a string.Create your own function.
###Code
def removeLetter(a_str, letter):
temp = ""
for char in a_str:
if char != letter:
temp += char
return temp
print(removeLetter("CSDISCOVERY", "S"))
###Output
CDICOVERY
###Markdown
Challenge 2Write a function *replace(s, old, new)* that replaces all occurences of **old** with **new** in a string **s** test(replace('Mississippi', 'i', 'I'), 'MIssIssIppI') s = 'I love spom! Spom is my favorite food. Spom, spom, spom, yum!' test(replace(s, 'om', 'am'), 'I love spam! Spam is my favorite food. Spam, spam, spam, yum!') test(replace(s, 'o', 'a'), 'I lave spam! Spam is my favarite faad. Spam, spam, spam, yum!')*Hint*: use the *split* and *join* methods
###Code
def replace(s, old, new):
arr = s.split(old)
return new.join(arr)
s = 'I love spom! Spom is my favorite food. Spom, spom, spom, yum!'
print(replace(s, 'om', 'am'))
###Output
I love spam! Spam is my favorite food. Spam, spam, spam, yum!
###Markdown
Challenge 3Write a Python function that will **generate a list of 100 random integers** between 0 and 1000 and another function to **return the maximum value**.Constraint: Two functions are required
###Code
import random
def randomList(total, min, max):
temp = []
for _ in range(total):
temp.append(random.randint(min, max))
return temp
def getMaximum(lst):
max = 0
for num in lst:
if num > max:
max = num
return max
randList = randomList(100, 0, 1000)
print(randList)
print(getMaximum(randList))
###Output
[919, 282, 184, 912, 245, 388, 41, 855, 634, 371, 73, 602, 155, 107, 584, 0, 987, 931, 221, 291, 627, 650, 474, 739, 988, 248, 32, 575, 615, 936, 348, 475, 547, 782, 330, 444, 566, 303, 269, 553, 592, 1, 774, 958, 926, 655, 997, 828, 131, 97, 606, 214, 402, 648, 945, 944, 574, 780, 580, 660, 942, 979, 696, 835, 619, 450, 263, 728, 773, 591, 497, 879, 347, 104, 308, 694, 932, 784, 900, 685, 867, 651, 910, 635, 763, 109, 179, 284, 110, 380, 851, 557, 148, 993, 189, 531, 55, 56, 347, 822]
997
###Markdown
Challenge 4Write a function *sum_of_even_squares(xs)* that computes the **sum of the squares of the even numbers** in the list xs. For example, sum_of_squares([2, 3, 4]) should return 4+16 which is 20
###Code
def sum_of_even_squares(xs):
sum = 0
for x in xs:
if x % 2 == 0:
sum += x**2
return sum
print(sum_of_even_squares([3,4,5,6,7]))
###Output
52
|
windows_store_apps/Windows_Store_Apps.ipynb
|
###Markdown
###Code
import pandas as pd
df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/data/msft.csv', index_col = 'Date')
df.head()
# Data Cleaning
df_clean = df
df_clean['Price'] = df['Price'].str.strip('โน') # clean the dirty value of the Price column
df_clean = df_clean.replace({'Free': '0', ',':''}) # Replace the "Free" with 0
df_clean['Price'] = df_clean['Price'].astype('float64') #TODO: fix the `,` & `.` as it is blocking the data type casting
df_clean = df_clean.dropna() # Get rid of the complete NaN row of record
df_clean.tail(50)
# df_clean
df_clean.describe()
df.groupby('Category')['Price'].mean()
###Output
_____no_output_____
|
Notebooks/spikeinterface_examples/plot_2_using_the_launcher.ipynb
|
###Markdown
Use the spike sorting launcher==============================This example shows how to use the spike sorting launcher. The launcher allows to parameterize the sorter name andto run several sorters on one or multiple recordings.
###Code
import spikeinterface.extractors as se
import spikeinterface.sorters as ss
###Output
_____no_output_____
###Markdown
First, let's create the usueal toy example:
###Code
recording, sorting_true = se.example_datasets.toy_example(duration=10, seed=0)
###Output
_____no_output_____
###Markdown
The launcher enables to call any spike sorter with the same functions: :code:`run_sorter` and :code:`run_sorters`.For running multiple sorters on the same recording extractor or a collection of them, the :code:`run_sorters`function can be used.Let's first see how to run a single sorter, for example, Klusta:
###Code
# The sorter name can be now a parameter, e.g. chosen with a command line interface or a GUI
sorter_name = 'klusta'
sorting_KL = ss.run_sorter(sorter_name_or_class='klusta', recording=recording, output_folder='my_sorter_output')
print(sorting_KL.get_unit_ids())
###Output
_____no_output_____
###Markdown
This will launch the klusta sorter on the recording object.You can also run multiple sorters on the same recording:
###Code
recording_list = [recording]
sorter_list = ['klusta', 'mountainsort4', 'tridesclous']
sorting_output = ss.run_sorters(sorter_list, recording_list, working_folder='tmp_some_sorters', mode='overwrite')
###Output
_____no_output_____
###Markdown
The 'mode' argument allows to 'overwrite' the 'working_folder' (if existing), 'raise' and Exception, or 'keep' thefolder and skip the spike sorting run.To 'sorting_output' is a dictionary that has (recording, sorter) pairs as keys and the correspondent:code:`SortingExtractor` as values. It can be accessed as follows:
###Code
for (rec, sorter), sort in sorting_output.items():
print(rec, sorter, ':', sort.get_unit_ids())
###Output
_____no_output_____
|
lgb.ipynb
|
###Markdown
Feature Engineering Groupby and aggregate
###Code
pop = data.groupby('population').mean()
pop.drop('target', 1, inplace = True)
cols = []
for i in pop.columns:
if i != 'population':
pop[i+'_population_mean_all'] = pop[i]
pop.drop(i, 1, inplace = True)
cols.append(i+'_population_mean_all')
data = pd.merge(data, pop, on='population', how = 'left')
for col in cols: data[col+'_freq'] = data[col].map(data[col].value_counts().to_dict())/len(data)
data['std_country_pop'] = data.groupby('country')['population'].transform('std')
a = data.assign(
atm_cnt = np.where(data['Q1']==1,data.Q1,0),
Q12_sum = np.where(data['Q12']==1,data.Q12,0),
Q8_sum = np.where(data['Q8']==1,data.Q8,0),
Q9_sum = np.where(data['Q9']==1,data.Q9,0),
Q27_sum = np.where(data['Q27']==1,data.Q27,0),
Q19_sum = np.where(data['Q19']==1,data.Q19,0)
).groupby('country').agg({'atm_cnt':sum, 'Q12_sum':sum, 'Q8_sum':sum, 'Q9_sum':sum, 'Q27_sum':sum, 'Q19_sum':sum})
a.head()
data = data.merge(a, on='country', how='left')
data['mean_age_country'] = data.groupby('country')['age'].transform('mean')
data['max_age_country'] = data.groupby('country')['age'].transform('max')
data['min_age_country'] = data.groupby('country')['age'].transform('min')
data['mean_age_region'] = data.groupby('region')['age'].transform('mean')
data['max_age_region'] = data.groupby('region')['age'].transform('max')
data['min_age_region'] = data.groupby('region')['age'].transform('min')
categoricals_features = ['Q1', 'Q6', 'Q10a', 'Q10b', 'Q11', 'Q12', 'Q13a',
'Q14', 'Q15', 'Q16', 'Q17a', 'Q17b', 'Q19', 'Q20', 'Q21', 'Q22',
'Q24', 'Q25', 'Q26', 'owns_mobile']
for col in categoricals_features:
for agg_type in ['mean','std']:
new_col_name = col+'_age_'+agg_type
temp_df = data.groupby([col])['age'].agg([agg_type]).reset_index().rename(
columns={agg_type: new_col_name})
temp_df.index = list(temp_df[col])
temp_df = temp_df[new_col_name].to_dict()
data[new_col_name] = data[col].map(temp_df)
###Output
_____no_output_____
###Markdown
Frequency encode
###Code
i_cols2 = ['country','region','population','Q1', 'Q6', 'Q10a', 'Q10b', 'Q11', 'Q12', 'Q13a',
'Q14', 'Q15', 'Q16', 'Q17a', 'Q17b', 'Q19', 'Q20', 'Q21', 'Q22',
'Q24', 'Q25', 'Q26', 'owns_mobile']
for col in i_cols2:
fq_encode = data[col].value_counts().to_dict()
data[col+'_fq_enc'] = data[col].map(fq_encode)
###Output
_____no_output_____
###Markdown
Bin features
###Code
def create_bin_features(input_df, features):
for bin_fe in features:
print("Binning: ",bin_fe)
input_df[bin_fe+"_BINS"] = pd.qcut(input_df[bin_fe], 5, labels=False, duplicates='drop')
return input_df
binning_num_features = ['population']
data = create_bin_features(data, binning_num_features)
def create_bin_features(input_df, features):
for bin_fe in features:
print("Binning: ",bin_fe)
input_df[bin_fe+"_BINS"] = pd.qcut(input_df[bin_fe], 3, labels=False, duplicates='drop')
return input_df
binning_num_features = ['age']
data = create_bin_features(data, binning_num_features)
data['max_region_pop'] = data.groupby('region')['population'].transform('max')
###Output
_____no_output_____
###Markdown
Label encode
###Code
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
data['region'] = le.fit_transform(data['region'].fillna('missing'))
le = LabelEncoder()
data['country'] = le.fit_transform(data['country'].fillna('missing'))
data['country_target'] = data.groupby('country')['target'].transform('mean')
data.drop("population",axis = 1,inplace = True)
###Output
_____no_output_____
###Markdown
Modelling Split data
###Code
train = data[:len_train]
test = data[len_train:]
X = train.drop(columns=['target', 'ID','Q1', 'Q16', 'std_country_pop', 'min_age_country', 'max_age_region', 'min_age_region', 'Q1_age_std', 'Q6_age_std', 'Q10a_age_mean', 'Q10a_age_std', 'Q14_age_std', 'Q21_age_std', 'Q22_age_mean', 'Q22_age_std', 'Q25_age_mean', 'Q25_age_std', 'Q1_fq_enc', 'Q10b_fq_enc', 'Q13a_fq_enc', 'Q16_fq_enc', 'Q17a_fq_enc', 'Q17b_fq_enc', 'Q21_fq_enc', 'Q22_fq_enc', 'Q26_fq_enc', 'owns_mobile_fq_enc', 'population_BINS', 'max_region_pop'])
y = train['target']
tes = test.drop(columns=['target', 'ID','Q1', 'Q16', 'std_country_pop', 'min_age_country', 'max_age_region', 'min_age_region', 'Q1_age_std', 'Q6_age_std', 'Q10a_age_mean', 'Q10a_age_std', 'Q14_age_std', 'Q21_age_std', 'Q22_age_mean', 'Q22_age_std', 'Q25_age_mean', 'Q25_age_std', 'Q1_fq_enc', 'Q10b_fq_enc', 'Q13a_fq_enc', 'Q16_fq_enc', 'Q17a_fq_enc', 'Q17b_fq_enc', 'Q21_fq_enc', 'Q22_fq_enc', 'Q26_fq_enc', 'owns_mobile_fq_enc', 'population_BINS', 'max_region_pop'])
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.03,
'subsample': 1,
'colsample_bytree': 0.6,
'reg_alpha': 3,
'reg_lambda': 4,
'scale_pos_weight': 1,
'n_estimators': 5000,
'silent': -1,
'verbose': -1,
'max_depth': -1,
'seed':2021,
}
# %%time
testsplit_store=[]
test_store=[]
from sklearn.model_selection import KFold,StratifiedKFold, TimeSeriesSplit, GroupKFold
from sklearn.metrics import mean_squared_error, f1_score, log_loss, roc_auc_score
from lightgbm import LGBMRegressor, LGBMClassifier
oofs = np.zeros((len(train)))
y_oof = np.zeros((len(train)))
preds = np.zeros((len(test)))
fold = StratifiedKFold(n_splits=10, shuffle=True, random_state=22)
i=1
for train_index, test_index in fold.split(X, y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
y_oof[test_index] = y_test
lgb = LGBMClassifier(**lgb_params)
lgb.fit(X_train,y_train,eval_set=[(X_train,y_train),(X_test, y_test)], early_stopping_rounds=300,verbose=500)
predict = lgb.predict_proba(X_test)[:,1]
oofs[test_index] = predict
print("err: ", roc_auc_score(y_test,predict))
testsplit_store.append(roc_auc_score(y_test,predict, average='weighted'))
pred = lgb.predict_proba(tes)[:,1]
test_store.append(pred)
preds += pred/10
np.mean(testsplit_store)
ss['target'] = preds
ss.to_csv('lgb.csv', index=False)
fea_imp = pd.DataFrame({'imp':lgb.feature_importances_, 'col': X.columns})
fea_imp = fea_imp.sort_values(['imp', 'col'], ascending=[True, False]).iloc[-30:]
_ = fea_imp.plot(kind='barh', x='col', y='imp', figsize=(20, 10))
###Output
_____no_output_____
|
N03_Advanced.ipynb
|
###Markdown
Advanced Concepts Copyright noticeThis version (c) 2019 Fabian Offert, [MIT License](LICENSE). Learn more- Official Python tutorial: https://docs.python.org/3/tutorial/index.html- Python docs: https://docs.python.org/3- Official NumPy tutorial: https://docs.scipy.org/doc/numpy/user/quickstart.html- NumPy docs: https://docs.scipy.org/doc/numpy/reference/index.html Dictionaries Dictionaries extend lists in a useful way: they introduce key value pairs which allow indexing an item by name, rather than by position. Dictionaries in Python use curly brackets, item names are strings, item values follow the name, separated by a colon:
###Code
empty_dict = {}
fruit = {'apples':1, 'bananas': 2}
###Output
_____no_output_____
###Markdown
We can get an item from the second dictionary like this:
###Code
number_of_apples = fruit['apples']
print(number_of_apples)
###Output
1
###Markdown
To loop over a dictionary, Python 3 offers the `items()`function. For instance:
###Code
for k, v in fruit.items():
print(k, v)
###Output
apples 1
bananas 2
###Markdown
will print the contents of the dictionary. Keys and values can be accesses separately. Values of keys can be simply overwritten:
###Code
fruit['apples'] = 15
###Output
_____no_output_____
###Markdown
To check whether a single key is in the dictionary, use the in keyword:
###Code
if 'apples' in fruit:
print('We have', fruit['apples'], 'apples.')
###Output
We have 15 apples.
###Markdown
Classes Python is a multi-paradigm programming language. It is not strictly [imperative](https://en.wikipedia.org/wiki/Imperative_programming) or [declarative](https://en.wikipedia.org/wiki/Declarative_programming) but combines feature from both [paradigms](https://en.wikipedia.org/wiki/Programming_paradigm). The building-block like structure of neural networks (or rather the useful abstraction of a building-block like structure), however, lends itself to an object-oriented approach. From Wikipedia:> Object-oriented programming (OOP) is a programming paradigm based on the concept of "objects", which may contain data, in the form of fields, often known as attributes; and code, in the form of procedures, often known as methods. A feature of objects is that an object's procedures can access and often modify the data fields of the object with which they are associated (objects have a notion of "this" or "self"). In OOP, computer programs are designed by making them out of objects that interact with one another. There is significant diversity of OOP languages, but the most popular ones are class-based, meaning that objects are instances of classes, which typically also determine their type.Objects are thus arbitrary structures consisting of methods and attributes. We can regard classes as recipes for building objects, or, conversely, we can regard them as abstraction of (future) objects. Classes define which initial attributes and which methods an object will have at its disposal. Objects are *instantiated* from a class. Classes have several predefines methods starting and ending with double underscores. The most commonly used is `__init__`, which is called once when an object is created. Classes - and thus objects - define their own scope, of course. Hence, all class *methods* must take the `self` argument to pass down a reference to the calling object. An example class `Apple` that makes use of all these techniques could be defined like this:
###Code
class Apple():
color = 'red'
diameter = 4
price = '0.0'
def __init__(self):
self.price = '1.99'
def eat(self):
self.diameter-=1
if (self.diameter <=0):
self.diameter = 0
###Output
_____no_output_____
###Markdown
We can now construct an object from the `Apple` class and access its attributes and methods:
###Code
a = Apple()
print(a.color)
print(a.price)
print(a.diameter)
a.eat()
print(a.diameter)
###Output
red
1.99
4
3
###Markdown
One important technique in OOP is *inheritance*. Inheritance means that we can create new classes that extend existing classes. For instance, climbing further down the ladder of abstraction, we could define a subclass `FujiApple`, which will have all the properties of a regular `Apple`but be more expensive. The base class for a class inheriting properties is defined in parenthesis behind the class name. The following class will have the same attributes and methods as the base class:
###Code
class FujiApple(Apple):
def __init__(self):
self.price = 2.99
b = FujiApple()
print(b.color)
print(b.price)
print(b.diameter)
b.eat()
print(b.diameter)
###Output
red
2.99
4
3
###Markdown
Numpy[NumPy](http://www.numpy.org/), an extension of the Python programming language for high-performance numerical computation, is an important part of all major machine learning frameworks (TensorFlow and PyTorch). Importantly, NumPy re-introduces multidimensional array with predefined *types* to Python, which helps particularly with machine learning tasks. To quote the NumPy docs:> NumPyโs main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers. In NumPy dimensions are called axes.To import NumPy and create a new 10x10 *NumPy 64 bit floating point array* initialized with zero, we can write:
###Code
import numpy as np
a = np.zeros([3, 3, 3], dtype=np.float64)
###Output
_____no_output_____
###Markdown
The most important property of a NumPy array, next to its content, is its *shape*. In machine learning, we operate on large, high-dimensional arrays of values that constantly change their shape. Thus, it is important to occasionally check what values, shape, and type an array contains:
###Code
print(a)
print(a.shape)
print(a.dtype)
###Output
[[[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
[[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
[[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]]
(3, 3, 3)
float64
###Markdown
NumPy re-implements many Python functions in a more efficient way. For instance, the generation of random numbers in NumPy is provided by the `np.random.random()` function and its cousins. Moreover, most NumPy functions allow passing a `size` parameter (sometimes implicitly, sometimes explicitly) to create matrices directly from functions:
###Code
for x in range(10):
print(np.random.random(), np.random.randint(10))
r = np.random.random(size=(3,3))
print(r)
###Output
0.45388110144851956 2
0.9718679099913947 6
0.5245541559766417 0
0.33357366908431285 7
0.10935008593551598 2
0.2958671711506421 0
0.9671515336451018 7
0.4328181997414665 9
0.9397346739259217 9
0.35271746844916485 3
[[0.36558192 0.32759153 0.35512857]
[0.36180645 0.22334446 0.55762825]
[0.7286896 0.25111048 0.61387229]]
###Markdown
The more complex multidimensional arrays are, the more important does slicing become. For instance, to return the n-th z-axis plane of our three, dimensional array `a` we can write:
###Code
n=1
print(a[:,:,n])
###Output
[[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
|
Model backlog/Models/Inference/121-cassava-leaf-inf-effnetb3-scl-augment-heavy512.ipynb
|
###Markdown
Dependencies
###Code
!pip install --quiet /kaggle/input/kerasapplications
!pip install --quiet /kaggle/input/efficientnet-git
import warnings, glob
from tensorflow.keras import Sequential, Model
import efficientnet.tfkeras as efn
from cassava_scripts import *
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Hardware configuration
###Code
# TPU or GPU detection
# Detect hardware, return appropriate distribution strategy
strategy, tpu = set_up_strategy()
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
###Output
REPLICAS: 1
###Markdown
Model parameters
###Code
BATCH_SIZE = 8 * REPLICAS
HEIGHT = 512
WIDTH = 512
CHANNELS = 3
N_CLASSES = 5
TTA_STEPS = 0 # Do TTA if > 0
###Output
_____no_output_____
###Markdown
Augmentation
###Code
def data_augment(image, label):
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# p_pixel_1 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# p_pixel_2 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# p_pixel_3 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# Flips
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial > .75:
image = tf.image.transpose(image)
# Rotates
if p_rotate > .75:
image = tf.image.rot90(image, k=3) # rotate 270ยบ
elif p_rotate > .5:
image = tf.image.rot90(image, k=2) # rotate 180ยบ
elif p_rotate > .25:
image = tf.image.rot90(image, k=1) # rotate 90ยบ
# # Pixel-level transforms
# if p_pixel_1 >= .4:
# image = tf.image.random_saturation(image, lower=.7, upper=1.3)
# if p_pixel_2 >= .4:
# image = tf.image.random_contrast(image, lower=.8, upper=1.2)
# if p_pixel_3 >= .4:
# image = tf.image.random_brightness(image, max_delta=.1)
# Crops
if p_crop > .7:
if p_crop > .9:
image = tf.image.central_crop(image, central_fraction=.7)
elif p_crop > .8:
image = tf.image.central_crop(image, central_fraction=.8)
else:
image = tf.image.central_crop(image, central_fraction=.9)
elif p_crop > .4:
crop_size = tf.random.uniform([], int(HEIGHT*.8), HEIGHT, dtype=tf.int32)
image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS])
# # Crops
# if p_crop > .6:
# if p_crop > .9:
# image = tf.image.central_crop(image, central_fraction=.5)
# elif p_crop > .8:
# image = tf.image.central_crop(image, central_fraction=.6)
# elif p_crop > .7:
# image = tf.image.central_crop(image, central_fraction=.7)
# else:
# image = tf.image.central_crop(image, central_fraction=.8)
# elif p_crop > .3:
# crop_size = tf.random.uniform([], int(HEIGHT*.6), HEIGHT, dtype=tf.int32)
# image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS])
return image, label
###Output
_____no_output_____
###Markdown
Auxiliary functions
###Code
# Datasets utility functions
def resize_image(image, label):
image = tf.image.resize(image, [HEIGHT, WIDTH])
image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS])
return image, label
def process_path(file_path):
name = get_name(file_path)
img = tf.io.read_file(file_path)
img = decode_image(img)
img, _ = scale_image(img, None)
# img = center_crop(img, HEIGHT, WIDTH)
return img, name
def get_dataset(files_path, shuffled=False, tta=False, extension='jpg'):
dataset = tf.data.Dataset.list_files(f'{files_path}*{extension}', shuffle=shuffled)
dataset = dataset.map(process_path, num_parallel_calls=AUTO)
if tta:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.map(resize_image, num_parallel_calls=AUTO)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
###Output
_____no_output_____
###Markdown
Load data
###Code
database_base_path = '/kaggle/input/cassava-leaf-disease-classification/'
submission = pd.read_csv(f'{database_base_path}sample_submission.csv')
display(submission.head())
TEST_FILENAMES = tf.io.gfile.glob(f'{database_base_path}test_tfrecords/ld_test*.tfrec')
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print(f'GCS: test: {NUM_TEST_IMAGES}')
model_path_list = glob.glob('/kaggle/input/121-cassava-leaf-effnetb3-scl-augment-heavy-512/*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep='\n')
###Output
Models to predict:
/kaggle/input/121-cassava-leaf-effnetb3-scl-augment-heavy-512/model_0.h5
###Markdown
Model
###Code
class UnitNormLayer(L.Layer):
"""
Normalize vectors (euclidean norm) in batch to unit hypersphere.
"""
def __init__(self, **kwargs):
super(UnitNormLayer, self).__init__(**kwargs)
def call(self, input_tensor):
norm = tf.norm(input_tensor, axis=1)
return input_tensor / tf.reshape(norm, [-1, 1])
def encoder_fn(input_shape):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = efn.EfficientNetB3(input_tensor=inputs,
include_top=False,
weights=None,
pooling='avg')
norm_embeddings = UnitNormLayer()(base_model.output)
model = Model(inputs=inputs, outputs=norm_embeddings)
return model
def classifier_fn(input_shape, N_CLASSES, encoder, trainable=True):
for layer in encoder.layers:
layer.trainable = trainable
unfreeze_model(encoder) # unfreeze all layers except "batch normalization"
inputs = L.Input(shape=input_shape, name='input_image')
features = encoder(inputs)
features = L.Dropout(.5)(features)
features = L.Dense(512, activation='relu')(features)
features = L.Dropout(.5)(features)
output = L.Dense(N_CLASSES, activation='softmax', name='output')(features)
output_healthy = L.Dense(1, activation='sigmoid', name='output_healthy')(features)
output_cmd = L.Dense(1, activation='sigmoid', name='output_cmd')(features)
model = Model(inputs=inputs, outputs=[output, output_healthy, output_cmd])
return model
with strategy.scope():
encoder = encoder_fn((None, None, CHANNELS))
model = classifier_fn((None, None, CHANNELS), N_CLASSES, encoder, trainable=True)
model.summary()
###Output
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_image (InputLayer) [(None, None, None, 0
__________________________________________________________________________________________________
model (Model) (None, 1536) 10783528 input_image[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, 1536) 0 model[1][0]
__________________________________________________________________________________________________
dense (Dense) (None, 512) 786944 dropout[0][0]
__________________________________________________________________________________________________
dropout_1 (Dropout) (None, 512) 0 dense[0][0]
__________________________________________________________________________________________________
output (Dense) (None, 5) 2565 dropout_1[0][0]
__________________________________________________________________________________________________
output_healthy (Dense) (None, 1) 513 dropout_1[0][0]
__________________________________________________________________________________________________
output_cmd (Dense) (None, 1) 513 dropout_1[0][0]
==================================================================================================
Total params: 11,574,063
Trainable params: 11,399,471
Non-trainable params: 174,592
__________________________________________________________________________________________________
###Markdown
Test set predictions
###Code
files_path = f'{database_base_path}test_images/'
test_size = len(os.listdir(files_path))
test_preds = np.zeros((test_size, N_CLASSES))
for model_path in model_path_list:
print(model_path)
K.clear_session()
model.load_weights(model_path)
if TTA_STEPS > 0:
test_ds = get_dataset(files_path, tta=True).repeat()
ct_steps = TTA_STEPS * ((test_size/BATCH_SIZE) + 1)
preds = model.predict(test_ds, steps=ct_steps, verbose=1)[0][:(test_size * TTA_STEPS)]
preds = np.mean(preds.reshape(test_size, TTA_STEPS, N_CLASSES, order='F'), axis=1)
test_preds += preds / len(model_path_list)
else:
test_ds = get_dataset(files_path, tta=False)
x_test = test_ds.map(lambda image, image_name: image)
test_preds += model.predict(x_test)[0] / len(model_path_list)
test_preds = np.argmax(test_preds, axis=-1)
test_names_ds = get_dataset(files_path)
image_names = [img_name.numpy().decode('utf-8') for img, img_name in iter(test_names_ds.unbatch())]
submission = pd.DataFrame({'image_id': image_names, 'label': test_preds})
submission.to_csv('submission.csv', index=False)
display(submission.head())
###Output
_____no_output_____
|
notebooks/.ipynb_checkpoints/basepair_analysis-checkpoint.ipynb
|
###Markdown
Part 1: Initialize
###Code
time_interval = '0_1us' # '0_1us', '1_2us', '2_3us', '3_4us', '4_5us'
bp_agent = BasePairAgent(rootfolder, time_interval)
###Output
_____no_output_____
###Markdown
Part 2: Histogram for parameters
###Code
figsize = (12, 4)
bins = 100
sele_para = 'shear' #'shear', 'buckle', 'stretch', 'propeller', 'stagger', 'opening'
xlines = [-1, 0, 1]
xlim = (-2,2)
ylim = None
fig, axes = bp_agent.histogram_three_groups(figsize, sele_para, bins, xlines, xlim, ylim)
plt.tight_layout()
plt.savefig(f'{sele_para}.svg')
plt.show()
figsize = (12, 4)
bins = 100
sele_para = 'stretch' #'shear', 'buckle', 'stretch', 'propeller', 'stagger', 'opening'
xlines = [-0.5, 0, 0.5]
xlim = (-1,1)
ylim = None
fig, axes = bp_agent.histogram_three_groups(figsize, sele_para, bins, xlines, xlim, ylim)
plt.tight_layout()
plt.savefig(f'{sele_para}.svg')
plt.show()
figsize = (12, 4)
bins = 100
sele_para = 'buckle' #'shear', 'buckle', 'stretch', 'propeller', 'stagger', 'opening'
xlines = [-40, -20, 0, 20, 40]
xlim = (-50, 45)
ylim = None
fig, axes = bp_agent.histogram_three_groups(figsize, sele_para, bins, xlines, xlim, ylim)
plt.tight_layout()
plt.savefig(f'{sele_para}.svg')
plt.show()
figsize = (12, 4)
bins = 100
sele_para = 'propeller' #'shear', 'buckle', 'stretch', 'propeller', 'stagger', 'opening'
xlines = [-40,-30,-20,-10, 0, 10, 20]
xlim = (-45,35)
ylim = None
fig, axes = bp_agent.histogram_three_groups(figsize, sele_para, bins, xlines, xlim, ylim)
plt.tight_layout()
plt.savefig(f'{sele_para}.svg')
plt.show()
figsize = (12, 4)
bins = 100
sele_para = 'stagger' #'shear', 'buckle', 'stretch', 'propeller', 'stagger', 'opening'
xlines = [-1, 0, 1]
xlim = (-2, 2)
ylim = None
fig, axes = bp_agent.histogram_three_groups(figsize, sele_para, bins, xlines, xlim, ylim)
plt.tight_layout()
plt.savefig(f'{sele_para}.svg')
plt.show()
figsize = (12, 4)
bins = 100
sele_para = 'opening' #'shear', 'buckle', 'stretch', 'propeller', 'stagger', 'opening'
xlines = []
xlim = (-25,25)
ylim = None
fig, axes = bp_agent.histogram_three_groups(figsize, sele_para, bins, xlines, xlim, ylim)
plt.tight_layout()
plt.savefig(f'{sele_para}.svg')
plt.show()
###Output
_____no_output_____
|
Tensorflow_2X_Notebooks/Demo142_Autoencoder_Basic_MNIST_Tuned.ipynb
|
###Markdown
**Spit some [tensor] flow**We need to learn the intricacies of tensorflow to master deep learning
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
print(tf.__version__)
from tensorflow.keras.layers import Input, Conv2D, Dropout, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = X_train / 255.0 , X_test / 255.0
print(X_train.shape)
print(X_test.shape)
# X_train = np.expand_dims(X_train, -1)
# X_test = np.expand_dims(X_test, -1)
print(X_train.shape)
print(X_test.shape)
# SHAPE
# N x H x W x Colors
# Colors = 1 for grayscale
# Fashion MNIST is grayscale
X_train = X_train.reshape(X_train.shape[0],-1)
X_test = X_test.reshape(X_test.shape[0],-1)
print(X_train.shape)
print(X_test.shape)
classes = len(set(y_train))
print(classes)
X_train[0].shape
input_shape = X_train[0].shape
i_layer = Input(shape = input_shape)
h_layer = Dense(512, activation='relu')(i_layer)
h_layer = Dense(254, activation='relu')(h_layer)
h_layer = Dense(128, activation='relu')(h_layer)
h_layer = Dense(254, activation='relu')(h_layer)
h_layer = Dense(512, activation='relu')(h_layer)
o_layer = Dense(X_train[0].shape[0], activation=None)(h_layer)
model = Model(i_layer, o_layer)
model.compile(optimizer='adam',
loss = "mse")
report = model.fit(X_train, X_train, epochs=40, batch_size=200)
idx = np.random.randint(0, len(X_train))
fig,ax = plt.subplots(1,2,figsize=(10,4))
ax[0].imshow(X_train[idx].reshape(28,28), cmap='gray')
X_decoded = model.predict(X_train[[idx]])
ax[1].imshow(X_decoded.reshape(28,28), cmap='gray')
###Output
_____no_output_____
|
Forecast-How_Many.ipynb
|
###Markdown
Forecasting Feasible Scope with a Monte Carlo Simulation 'How Many' ContextAt our planning meeting, the team wants to know how many items can be completed in a given time frame (14 day sprint). Before we make a commitment for a scope and delivery date, we have to forecast the probability to complete the items in time. IdeaTo understand the current delivery capability, we tracked our throughput and cycle times of our items. We can use this data to forecast future throughput. The data points span without date boundaries; i.e., all team data (only) is captured. We will then summarize over the past `P` days.
###Code
def datesWithoutTime(item):
item['Closed Date'] = datetime.datetime.strptime(item['Closed Date'].strftime('%Y-%m-%d'), '%Y-%m-%d').date()
return item
kanban_data = pd.read_csv(
DATA_FILE_PATH, usecols=['Closed Date', 'Work Item Type'], parse_dates=['Closed Date']
).dropna().transform(datesWithoutTime, 'columns')
kanban_data.head(1)
###Output
_____no_output_____
###Markdown
AnalysisBased on the past throughput per day a forecast can be created with a Monte Carlo simulation. Throughput is the number of total items completed per day. Calculate ThroughputTherefore, we sum up the completed items per day and add the missing dates with zero throughput. We plot the data of the throughput per day to get a brief overview of the result.
###Code
throughput = pd.crosstab(
kanban_data['Closed Date'], kanban_data['Work Item Type'], colnames=[None]).reset_index()
throughput['Throughput'] = throughput['User Story']
date_range = pd.date_range(
start=throughput['Closed Date'].min(), end=throughput['Closed Date'].max())
throughput = throughput.set_index('Closed Date').reindex(
date_range).fillna(0).astype(int).rename_axis('Date')
throughput_per_week = pd.DataFrame(
throughput['Throughput'].resample('W-Mon').sum()).reset_index()
ax = throughput_per_week.plot(
x='Date', y='Throughput', linewidth=2.5, figsize=(14, 3), legend=None)
ax.set_title("Throughput per Week", loc='left', fontdict={
'fontsize': 18, 'fontweight': 'semibold'})
ax.set_xlabel('')
ax.set_ylabel('Items Completed')
ax.axhline(y=0, color=lightgrey, alpha=.5);
###Output
_____no_output_____
###Markdown
Run Monte Carlo Simulation 'How Many'Based on the throughput data we simulate multiple times how many items can be completed in the given time span. Before we run the simulation we set the configuration values:* Date range of data basis (last `P` days)* Number of items to simulate.* Number of simulations to run (Recommendation: >= 10000).We plot the simulation results to get a brief overview of distribution of total items completed in the given timespan.
###Code
SIMULATIONS = 10000
dataset = throughput[['Throughput']].tail(LAST_DAYS).reset_index(drop=True)
samples = [dataset.sample(n=SIMULATION_DAYS, replace=True).sum(
).Throughput for i in range(SIMULATIONS)]
samples = pd.DataFrame(samples, columns=['Items'])
distribution = samples.groupby(['Items']).size().reset_index(name='Frequency')
plt.figure(figsize=(28, 7))
ax = sns.barplot(x='Items', y='Frequency', data=distribution, color=barblue)
ax.set_title(f"Distribution of Monte Carlo Simulation 'How Many' ({SIMULATIONS} Runs)", loc='left',
fontdict={'size': 18, 'weight': 'semibold'})
ax.set_xlabel(f"Total Items Completed in {SIMULATION_DAYS} Days")
ax.set_ylabel('Frequency')
ax.axhline(y=SIMULATIONS*0.001, color=darkgrey, alpha=.5);
###Output
_____no_output_____
###Markdown
Analysis of the Probabilities of OccurrenceWe determine the probability for each number of completed items by cumulating the frequency in the simulations. We plot the probability for each number of completed items and indicate the percentiles 70%, 85%, and 95%.
###Code
distribution = distribution.sort_index(ascending=False)
distribution['Probability'] = 100 * distribution.Frequency.cumsum()/distribution.Frequency.sum()
plt.figure(figsize=(28, 10))
ax = sns.barplot(x='Items', y='Probability', data=distribution, color=barblue)
ax.text(x=-1.4, y=118,
s=f"Probabilities of Completing a Scope in {SIMULATION_DAYS} Days", fontsize=18, fontweight='semibold')
ax.text(x=-1.4, y=110,
s=f"Based on a Monte Carlo Simulations ({SIMULATIONS} Runs) with data of last {LAST_DAYS} days", fontsize=16)
ax.set_ylabel('Confidence')
ax.set_xlabel('Total Items Completed')
ax.axhline(y=0.5, color=darkgrey, alpha=.5)
ax.axhline(y=70, color=darkgrey, linestyle='--')
ax.axhline(y=85, color=darkgrey, linestyle='--')
ax.axhline(y=95, color=darkgrey, linestyle='--')
label_xpos = distribution['Items'].max()-2
ax.text(y=70, x=label_xpos, s=f'70%% (%d+ Items)' % samples.Items.quantile(0.3),
va='center', ha='center', backgroundcolor='#F0F0F0')
ax.text(y=85, x=label_xpos, s=f'85%% (%d+ Items)' % samples.Items.quantile(0.15),
va='center', ha='center', backgroundcolor='#F0F0F0')
ax.text(y=95, x=label_xpos, s=f'95%% (%d+ Items)' % samples.Items.quantile(0.05),
va='center', ha='center', backgroundcolor='#F0F0F0')
###Output
_____no_output_____
|
fitsfile_calculation.ipynb
|
###Markdown
Write and open fits file with Astropy
###Code
import numpy as np
from astropy.io import fits
data = np.ones((10, 20, 30, 5))
# hdul = fits.HDUList()
# hdul.append(fits.PrimaryHDU())
# for img in data:
# hdul.append(fits.ImageHDU(data=img))
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
hdulist.writeto('output-1.fits')
from astropy.io import fits
fitsdata = fits.open('./output-1.fits')
fitsdata.info()
###Output
Filename: ./output-1.fits
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 8 (5, 30, 20, 10) float64
###Markdown
--------- Convert galactic coor to cartesian coor from Tom Rice's paperThe code does not work now, d0 here is not clear yet!
###Code
# read table data
from astropy.io import fits
import numpy as np
from math import pi, cos, sin
hdulist = fits.open('BGPS_3D_catalogue_HCO+.fit')
table_data = hdulist[1].data
# table_data.columns to radian
b = table_data.field('_Glat')*pi/180.
l = table_data.field('_Glon')*pi/180.
# use algorithm in Tom Rice catalogue paper to convert galatic coor to cartesian coor
x = d0*cos(l)*cos(b)
y = d0*sin(l)*cos(b)
z = d0*sin(b)
###Output
_____no_output_____
###Markdown
Extract vtk object from fits file
###Code
import vtk
from vtk.util import numpy_support
from mayavi import mlab
from astropy.io import fits
import numpy as np
hdulist = fits.open('l1448_13co.fits')
NumPy_data = hdulist[0].data
NumPy_data_shape = NumPy_data.shape
VTK_data = numpy_support.numpy_to_vtk(num_array=NumPy_data.ravel(), deep=True, array_type=vtk.VTK_FLOAT)
scalarfield = mlab.pipeline.scalar_field(NumPy_data)
print('done', scalarfield)
%tb
# export to file not found
# http://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python
from pyevtk.hl import gridToVTK
from astropy.io import fits
import numpy as np
hdulist = fits.open('l1448_13co.fits')
NumPy_data = hdulist[0].data
noSlices = 5
juliaStacked = numpy.dstack([julia]*noSlices)
x = numpy.arange(0, w+1)
y = numpy.arange(0, h+1)
z = numpy.arange(0, noSlices+1)
gridToVTK("./julia", x, y, z, cellData = {'julia': juliaStacked})
###Output
_____no_output_____
|
tests/notebooks/LIRPA_comparison_fully_connected.ipynb
|
###Markdown
COMPARISON LIRPA VS DECOMON: FULLY CONNECTED MNIST PART A: TENSORFLOW
###Code
import tensorflow.keras as keras
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.datasets import mnist
import sys
sys.path.append('..')
import os.path
import os
import pickle as pkl
from contextlib import closing
import time
from numpy.testing import assert_almost_equal, assert_array_less
import os
import tensorflow.keras as keras
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input, Lambda, Activation, Reshape, \
Conv2D, Add, Flatten, Dense, Layer, MaxPooling2D, Subtract, Concatenate, Multiply, Add, Subtract
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
print('Notebook run using keras:', keras.__version__)
import sys
sys.path.append('../..')
import decomon
from decomon.models.convert import clone as convert
from decomon import get_upper_box, get_lower_box, get_range_box, get_range_noise
from auto_LiRPA import BoundedModule, BoundedTensor, PerturbationLpNorm
###Output
_____no_output_____
###Markdown
Build and Train a Neural Network on a sinusoideThe sinusoide funtion is defined on a $[-1 ; 1 ]$ interval. We put a factor in the sinusoide to have several periods of oscillations.
###Code
x = np.linspace(-1, 1, 1000)
y = np.sin(10*x)
###Output
_____no_output_____
###Markdown
We approximate this funciton by a fully connected network composed of 4 hidden layers of size 100, 100, 20 and 20 respectively. Rectified Linear Units (ReLU) are chosen as activation functions for all the neurons.
###Code
layers = []
layers.append(Dense(100, activation='linear', input_dim=1)) # specify the dimension of the input space
layers.append(Activation('relu'))
layers.append(Dense(100, activation='linear'))
layers.append(Activation('relu'))
layers.append(Dense(20, activation='linear'))
layers.append(Activation('relu'))
layers.append(Dense(20, activation='linear'))
layers.append(Activation('relu'))
layers.append(Dense(1, activation='linear'))
model = Sequential(layers)
###Output
2022-03-08 15:06:06.882439: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
###Markdown
we specify the optimization method and the metric, in this case a classical Means Square Error.
###Code
model.compile('adam', 'mse')
###Output
_____no_output_____
###Markdown
we train the neural network
###Code
model.fit(x, y, batch_size=32, shuffle=True, epochs=100, verbose=0)
# verbose=0 removes the printing along the training
import torch
from torch import nn
import torch.nn.functional as F
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.hidden_0 = nn.Linear(1, 100) # input_dim = 1; output_dim = 100
self.hidden_1 = nn.Linear(100, 100)
self.hidden_2 = nn.Linear(100, 20)
self.hidden_3 = nn.Linear(20, 20)
self.hidden_4 = nn.Linear(1, 20)
self.layers = [self.hidden_0, self.hidden_1, self.hidden_2, self.hidden_3, self.hidden_4]
def forward(self, x):
x = self.hidden_0(x)
x = F.relu(x)
x = self.hidden_1(x)
x = F.relu(x)
x = self.hidden_2(x)
x = F.relu(x)
x = self.hidden_3(x)
x = F.relu(x)
x = self.hidden_4(x)
return x
#x = x.view(-1, 128)
#return x
def reset_weights(self, model):
layers = model.layers
index=0
for layer_keras in layers:
if len(layer_keras.get_weights()):
print(layer_keras.name)
layer_torch = self.layers[index]
weights = layer_keras.get_weights()
layer_torch.weight.data = torch.from_numpy(np.transpose(weights[0]))
layer_torch.bias.data = torch.from_numpy(np.transpose(weights[1]))
index+=1
model_torch = NeuralNet()
model_torch.reset_weights(model)
model_torch.reset_weights(model)
# convert our model into a decomon model:
decomon_model_0 = convert(model, method='crown-ibp')
decomon_model_1 = convert(model, ibp=True, forward=False, method='crown')
###Output
_____no_output_____
###Markdown
check the predictions
###Code
x_train_tensor = torch.from_numpy(x[:,None]).float().to('cpu')
y_pred_torch = model_torch(x_train_tensor).cpu().detach().numpy()
y_pred_torch = model_torch(x_train_tensor).cpu().detach().numpy()
y_pred_keras = model.predict(x)
assert_almost_equal(y_pred_keras, y_pred_torch, decimal=6)
plt.plot(x, y_pred_torch, 'x')
plt.plot(x, y_pred_keras)
###Output
_____no_output_____
###Markdown
AUTO LIRPA
###Code
# define the intervals
def get_range_box_comparison(method, model_decomon_1, model_torch, x_min=x.min(), x_max=x.max(), n_split=10):
alpha = np.linspace(0, 1, n_split+1)
x_samples = (1-alpha)*x_min + alpha*x_max
X_min = x_samples[:-1][:, None]
X_max = x_samples[1:][:, None]
X_lirpa_ = (X_min + X_max)/2.
eps = 0.5*(x_max - x_min)/n_split
# convert X_lirpa into a pytorch tensor
X_lirpa = torch.from_numpy(X_lirpa_).float().to('cpu')
model_lirpa = BoundedModule(model_torch, X_lirpa)
ptb = PerturbationLpNorm(norm=np.inf, eps=eps)
input_lirpa = BoundedTensor(X_lirpa, ptb)
lb, ub = model_lirpa.compute_bounds(x=(input_lirpa,), method=method)
lb_ = lb.cpu().detach().numpy()
ub_ = ub.cpu().detach().numpy()
#upper_0, lower_0 = get_range_noise(model_decomon_0, X_lirpa_, eps, p=np.inf)
upper_, lower_ = get_range_box(model_decomon_1, X_min, X_max, fast=True)
#upper_ = np.minimum(upper_0, upper_0)
#lower_ = np.maximum(lower_1, lower_1)
return X_lirpa_, model.predict(X_lirpa_), lb_, ub_, lower_, upper_
x_samples, y_samples, lb_p_0, ub_p_0, lb_t_0, ub_t_0 = get_range_box_comparison('IBP+backward', decomon_model_0, model_torch, n_split=10)
x_samples, y_samples, lb_p_1, ub_p_1, lb_t_1, ub_t_1 = get_range_box_comparison('crown', decomon_model_1, model_torch, n_split=10)
lb_p_0
lb_t_0
assert_almost_equal(ub_p_0, ub_t_0, decimal=5)
assert_almost_equal(lb_p_0, lb_t_0, decimal=5)
assert_almost_equal(lb_p_1, lb_t_1, decimal=5)
assert_almost_equal(ub_p_1, ub_t_1, decimal=5)
###Output
_____no_output_____
|
neural_networks_and_deep_learning/third_week/Planar_data_classification_with_onehidden_layer_v6c.ipynb
|
###Markdown
Updates to Assignment If you were working on the older version:* Please click on the "Coursera" icon in the top right to open up the folder directory. * Navigate to the folder: Week 3/ Planar data classification with one hidden layer. You can see your prior work in version 6b: "Planar data classification with one hidden layer v6b.ipynb" List of bug fixes and enhancements* Clarifies that the classifier will learn to classify regions as either red or blue.* compute_cost function fixes np.squeeze by casting it as a float.* compute_cost instructions clarify the purpose of np.squeeze.* compute_cost clarifies that "parameters" parameter is not needed, but is kept in the function definition until the auto-grader is also updated.* nn_model removes extraction of parameter values, as the entire parameter dictionary is passed to the invoked functions. Planar data classification with one hidden layerWelcome to your week 3 programming assignment. It's time to build your first neural network, which will have a hidden layer. You will see a big difference between this model and the one you implemented using logistic regression. **You will learn how to:**- Implement a 2-class classification neural network with a single hidden layer- Use units with a non-linear activation function, such as tanh - Compute the cross entropy loss - Implement forward and backward propagation 1 - Packages Let's first import all the packages that you will need during this assignment.- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.- [sklearn](http://scikit-learn.org/stable/) provides simple and efficient tools for data mining and data analysis. - [matplotlib](http://matplotlib.org) is a library for plotting graphs in Python.- testCases provides some test examples to assess the correctness of your functions- planar_utils provide various useful functions used in this assignment
###Code
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases_v2 import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
np.random.seed(1) # set a seed so that the results are consistent
###Output
_____no_output_____
###Markdown
2 - Dataset First, let's get the dataset you will work on. The following code will load a "flower" 2-class dataset into variables `X` and `Y`.
###Code
X, Y = load_planar_dataset()
###Output
_____no_output_____
###Markdown
Visualize the dataset using matplotlib. The data looks like a "flower" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data. In other words, we want the classifier to define regions as either red or blue.
###Code
# Visualize the data:
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
###Output
_____no_output_____
###Markdown
You have: - a numpy-array (matrix) X that contains your features (x1, x2) - a numpy-array (vector) Y that contains your labels (red:0, blue:1).Lets first get a better sense of what our data is like. **Exercise**: How many training examples do you have? In addition, what is the `shape` of the variables `X` and `Y`? **Hint**: How do you get the shape of a numpy array? [(help)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html)
###Code
### START CODE HERE ### (โ 3 lines of code)
shape_X = None
shape_Y = None
m = None # training set size
### END CODE HERE ###
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
###Output
_____no_output_____
###Markdown
**Expected Output**: **shape of X** (2, 400) **shape of Y** (1, 400) **m** 400 3 - Simple Logistic RegressionBefore building a full neural network, lets first see how logistic regression performs on this problem. You can use sklearn's built-in functions to do that. Run the code below to train a logistic regression classifier on the dataset.
###Code
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);
###Output
_____no_output_____
###Markdown
You can now plot the decision boundary of these models. Run the code below.
###Code
# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")
# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
###Output
_____no_output_____
###Markdown
**Expected Output**: **Accuracy** 47% **Interpretation**: The dataset is not linearly separable, so logistic regression doesn't perform well. Hopefully a neural network will do better. Let's try this now! 4 - Neural Network modelLogistic regression did not work well on the "flower dataset". You are going to train a Neural Network with a single hidden layer.**Here is our model**:**Mathematically**:For one example $x^{(i)}$:$$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1]}\tag{1}$$ $$a^{[1] (i)} = \tanh(z^{[1] (i)})\tag{2}$$$$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2]}\tag{3}$$$$\hat{y}^{(i)} = a^{[2] (i)} = \sigma(z^{ [2] (i)})\tag{4}$$$$y^{(i)}_{prediction} = \begin{cases} 1 & \mbox{if } a^{[2](i)} > 0.5 \\ 0 & \mbox{otherwise } \end{cases}\tag{5}$$Given the predictions on all the examples, you can also compute the cost $J$ as follows: $$J = - \frac{1}{m} \sum\limits_{i = 0}^{m} \large\left(\small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large \right) \small \tag{6}$$**Reminder**: The general methodology to build a Neural Network is to: 1. Define the neural network structure ( of input units, of hidden units, etc). 2. Initialize the model's parameters 3. Loop: - Implement forward propagation - Compute loss - Implement backward propagation to get the gradients - Update parameters (gradient descent)You often build helper functions to compute steps 1-3 and then merge them into one function we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data. 4.1 - Defining the neural network structure **Exercise**: Define three variables: - n_x: the size of the input layer - n_h: the size of the hidden layer (set this to 4) - n_y: the size of the output layer**Hint**: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.
###Code
# GRADED FUNCTION: layer_sizes
def layer_sizes(X, Y):
"""
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
### START CODE HERE ### (โ 3 lines of code)
n_x = None # size of input layer
n_h = None
n_y = None # size of output layer
### END CODE HERE ###
return (n_x, n_h, n_y)
X_assess, Y_assess = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
###Output
_____no_output_____
###Markdown
**Expected Output** (these are not the sizes you will use for your network, they are just used to assess the function you've just coded). **n_x** 5 **n_h** 4 **n_y** 2 4.2 - Initialize the model's parameters **Exercise**: Implement the function `initialize_parameters()`.**Instructions**:- Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.- You will initialize the weights matrices with random values. - Use: `np.random.randn(a,b) * 0.01` to randomly initialize a matrix of shape (a,b).- You will initialize the bias vectors as zeros. - Use: `np.zeros((a,b))` to initialize a matrix of shape (a,b) with zeros.
###Code
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (โ 4 lines of code)
W1 = None
b1 = None
W2 = None
b2 = None
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
_____no_output_____
###Markdown
**Expected Output**: **W1** [[-0.00416758 -0.00056267] [-0.02136196 0.01640271] [-0.01793436 -0.00841747] [ 0.00502881 -0.01245288]] **b1** [[ 0.] [ 0.] [ 0.] [ 0.]] **W2** [[-0.01057952 -0.00909008 0.00551454 0.02292208]] **b2** [[ 0.]] 4.3 - The Loop **Question**: Implement `forward_propagation()`.**Instructions**:- Look above at the mathematical representation of your classifier.- You can use the function `sigmoid()`. It is built-in (imported) in the notebook.- You can use the function `np.tanh()`. It is part of the numpy library.- The steps you have to implement are: 1. Retrieve each parameter from the dictionary "parameters" (which is the output of `initialize_parameters()`) by using `parameters[".."]`. 2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).- Values needed in the backpropagation are stored in "`cache`". The `cache` will be given as an input to the backpropagation function.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (โ 4 lines of code)
W1 = None
b1 = None
W2 = None
b2 = None
### END CODE HERE ###
# Implement Forward Propagation to calculate A2 (probabilities)
### START CODE HERE ### (โ 4 lines of code)
Z1 = None
A1 = None
Z2 = None
A2 = None
### END CODE HERE ###
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
X_assess, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(X_assess, parameters)
# Note: we use the mean here just to make sure that your output matches ours.
print(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))
###Output
_____no_output_____
###Markdown
**Expected Output**: 0.262818640198 0.091999045227 -1.30766601287 0.212877681719 Now that you have computed $A^{[2]}$ (in the Python variable "`A2`"), which contains $a^{[2](i)}$ for every example, you can compute the cost function as follows:$$J = - \frac{1}{m} \sum\limits_{i = 1}^{m} \large{(} \small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large{)} \small\tag{13}$$**Exercise**: Implement `compute_cost()` to compute the value of the cost $J$.**Instructions**:- There are many ways to implement the cross-entropy loss. To help you, we give you how we would have implemented$- \sum\limits_{i=0}^{m} y^{(i)}\log(a^{[2](i)})$:```pythonlogprobs = np.multiply(np.log(A2),Y)cost = - np.sum(logprobs) no need to use a for loop!```(you can use either `np.multiply()` and then `np.sum()` or directly `np.dot()`). Note that if you use `np.multiply` followed by `np.sum` the end result will be a type `float`, whereas if you use `np.dot`, the result will be a 2D numpy array. We can use `np.squeeze()` to remove redundant dimensions (in the case of single float, this will be reduced to a zero-dimension array). We can cast the array as a type `float` using `float()`.
###Code
# GRADED FUNCTION: compute_cost
def compute_cost(A2, Y, parameters):
"""
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
[Note that the parameters argument is not used in this function,
but the auto-grader currently expects this parameter.
Future version of this notebook will fix both the notebook
and the auto-grader so that `parameters` is not needed.
For now, please include `parameters` in the function signature,
and also when invoking this function.]
Returns:
cost -- cross-entropy cost given equation (13)
"""
m = Y.shape[1] # number of example
# Compute the cross-entropy cost
### START CODE HERE ### (โ 2 lines of code)
logprobs = None
cost = None
### END CODE HERE ###
cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
A2, Y_assess, parameters = compute_cost_test_case()
print("cost = " + str(compute_cost(A2, Y_assess, parameters)))
###Output
_____no_output_____
###Markdown
**Expected Output**: **cost** 0.693058761... Using the cache computed during forward propagation, you can now implement backward propagation.**Question**: Implement the function `backward_propagation()`.**Instructions**:Backpropagation is usually the hardest (most mathematical) part in deep learning. To help you, here again is the slide from the lecture on backpropagation. You'll want to use the six equations on the right of this slide, since you are building a vectorized implementation. <!--$\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } = \frac{1}{m} (a^{[2](i)} - y^{(i)})$$\frac{\partial \mathcal{J} }{ \partial W_2 } = \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } a^{[1] (i) T} $$\frac{\partial \mathcal{J} }{ \partial b_2 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)}}}$$\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } = W_2^T \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } * ( 1 - a^{[1] (i) 2}) $$\frac{\partial \mathcal{J} }{ \partial W_1 } = \frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } X^T $$\frac{\partial \mathcal{J} _i }{ \partial b_1 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)}}}$- Note that $*$ denotes elementwise multiplication.- The notation you will use is common in deep learning coding: - dW1 = $\frac{\partial \mathcal{J} }{ \partial W_1 }$ - db1 = $\frac{\partial \mathcal{J} }{ \partial b_1 }$ - dW2 = $\frac{\partial \mathcal{J} }{ \partial W_2 }$ - db2 = $\frac{\partial \mathcal{J} }{ \partial b_2 }$ !-->- Tips: - To compute dZ1 you'll need to compute $g^{[1]'}(Z^{[1]})$. Since $g^{[1]}(.)$ is the tanh activation function, if $a = g^{[1]}(z)$ then $g^{[1]'}(z) = 1-a^2$. So you can compute $g^{[1]'}(Z^{[1]})$ using `(1 - np.power(A1, 2))`.
###Code
# GRADED FUNCTION: backward_propagation
def backward_propagation(parameters, cache, X, Y):
"""
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
"""
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
### START CODE HERE ### (โ 2 lines of code)
W1 = None
W2 = None
### END CODE HERE ###
# Retrieve also A1 and A2 from dictionary "cache".
### START CODE HERE ### (โ 2 lines of code)
A1 = None
A2 = None
### END CODE HERE ###
# Backward propagation: calculate dW1, db1, dW2, db2.
### START CODE HERE ### (โ 6 lines of code, corresponding to 6 equations on slide above)
dZ2 = None
dW2 = None
db2 = None
dZ1 = None
dW1 = None
db1 = None
### END CODE HERE ###
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
parameters, cache, X_assess, Y_assess = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, X_assess, Y_assess)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("db2 = "+ str(grads["db2"]))
###Output
_____no_output_____
###Markdown
**Expected output**: **dW1** [[ 0.00301023 -0.00747267] [ 0.00257968 -0.00641288] [-0.00156892 0.003893 ] [-0.00652037 0.01618243]] **db1** [[ 0.00176201] [ 0.00150995] [-0.00091736] [-0.00381422]] **dW2** [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]] **db2** [[-0.16655712]] **Question**: Implement the update rule. Use gradient descent. You have to use (dW1, db1, dW2, db2) in order to update (W1, b1, W2, b2).**General gradient descent rule**: $ \theta = \theta - \alpha \frac{\partial J }{ \partial \theta }$ where $\alpha$ is the learning rate and $\theta$ represents a parameter.**Illustration**: The gradient descent algorithm with a good learning rate (converging) and a bad learning rate (diverging). Images courtesy of Adam Harley.
###Code
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate = 1.2):
"""
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (โ 4 lines of code)
W1 = None
b1 = None
W2 = None
b2 = None
### END CODE HERE ###
# Retrieve each gradient from the dictionary "grads"
### START CODE HERE ### (โ 4 lines of code)
dW1 = None
db1 = None
dW2 = None
db2 = None
## END CODE HERE ###
# Update rule for each parameter
### START CODE HERE ### (โ 4 lines of code)
W1 = None
b1 = None
W2 = None
b2 = None
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
_____no_output_____
###Markdown
**Expected Output**: **W1** [[-0.00643025 0.01936718] [-0.02410458 0.03978052] [-0.01653973 -0.02096177] [ 0.01046864 -0.05990141]] **b1** [[ -1.02420756e-06] [ 1.27373948e-05] [ 8.32996807e-07] [ -3.20136836e-06]] **W2** [[-0.01041081 -0.04463285 0.01758031 0.04747113]] **b2** [[ 0.00010457]] 4.4 - Integrate parts 4.1, 4.2 and 4.3 in nn_model() **Question**: Build your neural network model in `nn_model()`.**Instructions**: The neural network model has to use the previous functions in the right order.
###Code
# GRADED FUNCTION: nn_model
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
"""
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# Initialize parameters
### START CODE HERE ### (โ 1 line of code)
parameters = None
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
### START CODE HERE ### (โ 4 lines of code)
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = None
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = None
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = None
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = None
### END CODE HERE ###
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
X_assess, Y_assess = nn_model_test_case()
parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=True)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
_____no_output_____
###Markdown
**Expected Output**: **cost after iteration 0** 0.692739 $\vdots$ $\vdots$ **W1** [[-0.65848169 1.21866811] [-0.76204273 1.39377573] [ 0.5792005 -1.10397703] [ 0.76773391 -1.41477129]] **b1** [[ 0.287592 ] [ 0.3511264 ] [-0.2431246 ] [-0.35772805]] **W2** [[-2.45566237 -3.27042274 2.00784958 3.36773273]] **b2** [[ 0.20459656]] 4.5 Predictions**Question**: Use your model to predict by building predict().Use forward propagation to predict results.**Reminder**: predictions = $y_{prediction} = \mathbb 1 \text{{activation > 0.5}} = \begin{cases} 1 & \text{if}\ activation > 0.5 \\ 0 & \text{otherwise} \end{cases}$ As an example, if you would like to set the entries of a matrix X to 0 and 1 based on a threshold you would do: ```X_new = (X > threshold)```
###Code
# GRADED FUNCTION: predict
def predict(parameters, X):
"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (โ 2 lines of code)
A2, cache = None
predictions = None
### END CODE HERE ###
return predictions
parameters, X_assess = predict_test_case()
predictions = predict(parameters, X_assess)
print("predictions mean = " + str(np.mean(predictions)))
###Output
_____no_output_____
###Markdown
**Expected Output**: **predictions mean** 0.666666666667 It is time to run the model and see how it performs on a planar dataset. Run the following code to test your model with a single hidden layer of $n_h$ hidden units.
###Code
# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
###Output
_____no_output_____
###Markdown
**Expected Output**: **Cost after iteration 9000** 0.218607
###Code
# Print accuracy
predictions = predict(parameters, X)
print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')
###Output
_____no_output_____
###Markdown
**Expected Output**: **Accuracy** 90% Accuracy is really high compared to Logistic Regression. The model has learnt the leaf patterns of the flower! Neural networks are able to learn even highly non-linear decision boundaries, unlike logistic regression. Now, let's try out several hidden layer sizes. 4.6 - Tuning hidden layer size (optional/ungraded exercise) Run the following code. It may take 1-2 minutes. You will observe different behaviors of the model for various hidden layer sizes.
###Code
# This may take about 2 minutes to run
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer of size %d' % n_h)
parameters = nn_model(X, Y, n_h, num_iterations = 5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
predictions = predict(parameters, X)
accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)
print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
###Output
_____no_output_____
###Markdown
**Interpretation**:- The larger models (with more hidden units) are able to fit the training set better, until eventually the largest models overfit the data. - The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to fits the data well without also incurring noticeable overfitting.- You will also learn later about regularization, which lets you use very large models (such as n_h = 50) without much overfitting. **Optional questions**:**Note**: Remember to submit the assignment by clicking the blue "Submit Assignment" button at the upper-right. Some optional/ungraded questions that you can explore if you wish: - What happens when you change the tanh activation for a sigmoid activation or a ReLU activation?- Play with the learning_rate. What happens?- What if we change the dataset? (See part 5 below!) **You've learnt to:**- Build a complete neural network with a hidden layer- Make a good use of a non-linear unit- Implemented forward propagation and backpropagation, and trained a neural network- See the impact of varying the hidden layer size, including overfitting. Nice work! 5) Performance on other datasets If you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets.
###Code
# Datasets
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
### START CODE HERE ### (choose your dataset)
dataset = "noisy_moons"
### END CODE HERE ###
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y%2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
###Output
_____no_output_____
|
Practical_RL/week8_scst/bonus.ipynb
|
###Markdown
Week8 bonus descriptionsHere are some cool mini-projects you can try to dive deeper into the topic. More metrics: BLEU (5+ pts)Pick BLEU or any other relevant metric, e.g. BLEU (e.g. from `nltk.bleu_score`).* Train model to maximize BLEU directly* How does levenshtein behave when maximizing BLEU and vice versa?* Compare this with how they behave when optimizing likelihood. (use default parameters for bleu: 4-gram, uniform weights) Actor-critic (5+++ pts)While self-critical training provides a large reduction of gradient variance, it has a few drawbacks:- It requires a lot of additional computation during training- It doesn't adjust V(s) between decoder steps. (one value per sequence)There's a more general way of doing the same thing: learned baselines, also known as __advantage actor-critic__.There are two main ways to apply that:- __naive way__: compute V(s) once per training example. - This only requires additional 1-unit linear dense layer that grows out of encoder, estimating V(s) - (implement this to get main points)- __every step__: compute V(s) on each decoder step - Again it's just an 1-unit dense layer (no nonlinearity), but this time it's inside decoder recurrence. - (+3 pts additional for this guy)In both cases, you should train V(s) to minimize squared error $(V(s) - R(s,a))^2$ with R being actual levenshtein.You can then use $ A(s,a) = (R(s,a) - const(V(s))) $ for policy gradient.There's also one particularly interesting approach (+5 additional pts):- __combining SCST and actor-critic__: - compute baseline $V(s)$ via self-critical sequence training (just like in main assignment) - learn correction $ C(s,a_{:t}) = R(s,a) - V(s) $ by minimizing $(R(s,a) - V(s) - C(s,a_{:t}))^2 $ - use $ A(s,a_{:t}) = R(s,a) - V(s) - const(C(s,a_{:t})) $ Implement attention (5+++ pts)Some seq2seq tasks can benefit from the attention mechanism. In addition to taking the _last_ time-step of encoder hidden state, we can allow decoder to peek on any time-step of his choice. Recommended steps:__1)__ Modify encoder-decoderLearn to feed the entire encoder into the decoder. You can do so by sending encoder rnn layer directly into decoder (make sure there's no `only_return_final=True` for encoder rnn layer).```class decoder: ... encoder_rnn_input = InputLayer(encoder.rnn.output_shape, name='encoder rnn input for decoder') ... decoder Recurrencerec = Recurrence(..., input_nonsequences = {decoder.encoder_rnn_input: encoder.rnn}, )```For starters, you can take it's last tick (via SliceLayer) inside the decoder step and feed it as input to make sure it works.__2)__ Implement attention mechanismNext thing we'll need is to implement the math of attention.The simplest way to do so is to write a special layer. We gave you a prototype and some tests below.__3)__ Use attention inside decoderThat's almost it! Now use `AttentionLayer` inside the decoder and feed it to back to lstm/gru/rnn (see code demo below).Train the full network just like you did before attention.__More points__ will be awwarded for comparing learning results of attention Vs no attention.__Bonus bonus:__ visualize attention vectors (>= +3 points)The best way to make sure your attention actually works is to visualize it.A simple way to do so is to obtain attention vectors from each tick (values __right after softmax__, not the layer outputs) and drawing those as images. step-by-step guide:- split AttentionLayer into two layers: _"from start to softmax"_ and _"from softmax to output"_- add outputs of the first layer to recurrence's `tracked_outputs`- compile a function that computes them- plt.imshow(them)
###Code
import numpy as np
import theano,lasagne
import theano.tensor as T
from lasagne import init
from lasagne.layers import *
class AttentionLayer(MergeLayer):
def __init__(self,decoder_h,encoder_rnn):
#sanity checks
assert len(decoder_h.output_shape)==2,"please feed decoder 1 step activation as first param "
assert len(encoder_rnn.output_shape)==3, "please feed full encoder rnn sequence as second param"
self.decoder_num_units = decoder_h.output_shape[-1]
self.encoder_num_units = encoder.output_shape[-1]
#Here you should initialize all trainable parameters.
#
#use this syntax:
self.add_param(spec=init.Normal(std=0.01), #or other initializer
shape=<shape tuple>,
name='<param name here>')
MergeLayer.__init__(self,[decoder_h,encoder_rnn],name="attention")
def get_output_shape_for(self,input_shapes,**kwargs):
"""return matrix of shape [batch_size, encoder num units]"""
return (None,self.encoder_num_units)
def get_output_for(self,inputs,**kwargs):
"""
takes (decoder_h, encoder_seq)
decoder_h has shape [batch_size, decoder num_units]
encoder_seq has shape [batch_size, sequence_length, encoder num_units]
returns attention output: matrix of shape [batch_size, encoder num units]
please read comments carefully before you start implementing
"""
decoder_h,encoder_seq = inputs
#get symbolic batch-size / seq length. Also don't forget self.decoder_num_units above
batch_size,seq_length,_ = tuple(encoder_seq.shape)
#here's a recommended step-by-step guide for attention mechanism.
#You are free to ignore it alltogether if you so wish
#we repeat decoder activations to allign with encoder
decoder_h_repeated = <cast decoder_h into [batch,seq_length,decoer_num_units] by
repeating it _seq_length_ times>
<use T.repeat and maybe some reshape>
# ^--shape=[batch,seq_length,decoder_n_units]
encoder_and_decoder_together = <concatenate repeated decoder and encoder over last axis>
# ^--shape=[batch,seq_length,enc_n_units+dec_n_units]
#here we flatten the tensor to simplify
encoder_and_decoder_flat = T.reshape(encoder_and_decoder_together,(-1,encoder_and_decoder_together.shape[-1]))
# ^--shape=[batch*seq_length,enc_n_units+dec_n_units]
#here you use encoder_and_decoder_flat and some learned weights to predict attention logits
#don't use softmax yet
<your code here>
attention_logits_flat = <logits to be used as attention weights>
# ^--shape=[batch*seq_length,1]
#here we reshape flat logits back into correct form
assert attention_logits_flat.ndim==2
attention_logits = attention_logits_flat.reshape((batch_size,seq_length))
# ^--shape=[batch,seq_length]
#here we apply softmax :)
attention = T.nnet.softmax(attention_logits)
# ^--shape=[batch,seq_length]
#here we compute output
output = (attention[:,:,None]*encoder_seq).sum(axis=1) #sum over seq_length
# ^--shape=[batch,enc_n_units]
return output
#demo code
from numpy.random import randn
dec_h_prev = InputLayer((None,50),T.constant(randn(5,50)),name='decoder h mock')
enc = InputLayer((None,None,32),T.constant(randn(5,20,32)),name='encoder sequence mock')
attention = AttentionLayer(dec_h_prev,enc)
#now you can use attention as additonal input to your decoder
#LSTMCell(prev_cell,prev_out,input_or_inputs=(usual_input,attention))
#sanity check
demo_output = get_output(attention).eval()
print 'actual shape:',demo_output.shape
assert demo_output.shape == (5,32)
assert np.isfinite(demo_output)
###Output
_____no_output_____
|
data/lec1_4_Combining_Pandas_Objects_ForOnlineLecture.ipynb
|
###Markdown
###Code
import numpy as np
import pandas as pd
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option('max_columns', None)
###Output
_____no_output_____
###Markdown
DataFrame์ data row ์ถ๊ฐํ๊ธฐ `loc[]`์ ์ฌ์ฉํ์ฌ ์ถ๊ฐํ๊ธฐ
###Code
df = pd.DataFrame(columns=['a', 'b'])
df.head()
###Output
_____no_output_____
###Markdown
Add data as 'list'
###Code
df.loc[0] = [1, 2]
df.head()
df.loc['ใ
ใ
'] = [1, 2]
df.head()
###Output
_____no_output_____
###Markdown
Add data as 'dict'
###Code
df.loc[len(df)] = {'b' : 'ใ
', 'a': 'ใ
'}
df.head()
###Output
_____no_output_____
###Markdown
Add data as 'Series'
###Code
df.loc["yay"] = pd.Series({'a': 'ใ
', 'b' : 'ใ
'})
df.tail()
# ์ด๋ฏธ ์กด์ฌํ๋ index์ ๋ฃ๊ธฐ
df.loc["yay"] = pd.Series({'a': '1111', 'b' : '2222'})
df.tail()
###Output
_____no_output_____
###Markdown
- ์ ๋ฐฉ๋ฒ๋ค์ ๋ค inplace ๋ฐฉ์ `append()` ์ฌ์ฉํ์ฌ ์ถ๊ฐํ๊ธฐ - ์์ `loc`๊ณผ๋ ๋ค๋ฅด๊ฒ not in-place(returns a new copy of the DataFrame)- `append()` : it only accecpt - DataFrame - Series - Dictionary - list of these(**Not `list` itself**)
###Code
names_df = pd.DataFrame(
{
'Name':['์ฒ ์', '์ํฌ', '์์', '์๋ฏธ'],
'Age':[12, 13, 14, 15]
},
index = ['Canada', 'Canada', 'USA', 'USA']
)
names_df
# Error (์๋ฌ๋ด์ฉ ํ์ธ!) => index๋ฅผ ๋ญ๋ก ์ค์ ํด์ผ๋ ์ง ๋ชจ๋ฅด๊ธฐ ๋๋ฌธ
names_df.append(
{'Name':'๋ช
์', 'Age':1}
)
###Output
_____no_output_____
###Markdown
`ignore_index=True` - ์ด์ index๋ฅผ ๋ค resetํ๋ค
###Code
names_df.append(
{'Name':'๋ช
์', 'Age':100},
ignore_index=True
)
# ๋ฆฌ์คํธ of ๋์
๋๋ฆฌ๋ก๋ ์ ๋ฌ ๊ฐ๋ฅ
names_df.append(
[
{'Name':'๋ช
์', 'Age':1},
{'Name':'๋์', 'Age':2}
],
ignore_index=True
)
# append()๋ ๋ด๋ถ์ ์ผ๋ก copy()๋ฅผ ํ๊ธฐ ๋๋ฌธ์, ์๋ณธ์ ๊ทธ๋๋ก ์ ์ง
names_df
###Output
_____no_output_____
###Markdown
Original index ์ ์งํ๊ธฐ => `append()` ํ ๋, `dict`๋์ ์ `Series`๋ฅผ ์ ๋ฌํ๋ฉด ๋จ - ์ฐธ๊ณ : `Series`๋ฅผ `append`๋ฅผ ํ ๋๋, `Series`์ index๊ฐ target DataFrame์ column์ด ๋๊ณ , name์ด target DataFrame์ index๊ฐ ๋จ
###Code
# `name` arg๋ฅผ ํตํด์ Series์ name์ ๋ถ์ฌํ๊ธฐ
s = pd.Series({'Name': 'Zach', 'Age': 3}, name=len(names_df))
s
names_df.append(s)
# ๋ฆฌ์คํธ of Series๋ก๋ ์ ๋ฌ ๊ฐ๋ฅ
s1 = pd.Series({'Name': 'Zach', 'Age': 3}, name=len(names_df))
s2 = pd.Series({'Name': 'Zayd', 'Age': 2}, name='USA')
names_df.append([s1, s2])
###Output
_____no_output_____
###Markdown
- ์ฐธ๊ณ : Series์ `name`์ ์ด๋ค operation์ ํ๋๋์ ๋ฐ๋ผ์, index or column์ด ๋ ์ ์์
###Code
pd.concat([s1, s2], axis=1)
###Output
_____no_output_____
###Markdown
concat, join, and merge `concat()` - DataFrame or Series object๋ฅผ ์์ง์ ์ผ๋ก or ์ํ์ ์ผ๋ก '์ฐ๊ฒฐ'- **index(or columns)**์ ๋ํด algin (not values)- Defaults to `outer` join - operation axis์ ๋ฐ๋ผ concat๋๋ object์ column or index๊ฐ union๋จ - ์์
###Code
import FinanceDataReader as fdr
samsung_df = fdr.DataReader('005390', '2009-01-01', '2017-12-31')
kodex_df = fdr.DataReader('069500', '2016-01-01', '2017-12-31')
samsung_df.head(2)
kodex_df.head(2)
pd.concat(
[samsung_df, kodex_df]
)
###Output
_____no_output_____
###Markdown
- Column, Index alignment ํน์ง์ ๊ทธ๋๋ก ์ ์ฉ๋จ!
###Code
kodex_df[['Open', 'Close']].head()
pd.concat(
[
samsung_df,
kodex_df[['Open', 'Close']]
]
).tail(2) # head()๋ ํ๋ฒ ์คํํด๋ณด์ธ์!
###Output
_____no_output_____
###Markdown
- `keys`, `names` args
###Code
pd.concat(
[samsung_df, kodex_df],
keys=['์ผ์ฑ', 'KODEX200'],
).head()
pd.concat(
[samsung_df, kodex_df],
keys=['์ผ์ฑ', 'KODEX200'],
names=['์ข
๋ชฉ๋ช
']
).head()
pd.concat(
[samsung_df, kodex_df],
keys=['์ผ์ฑ', 'KODEX200'],
names=['์ข
๋ชฉ๋ช
', '๋ ์ง']
).head()
###Output
_____no_output_____
###Markdown
- On `axis` = 1
###Code
pd.concat([samsung_df, kodex_df], axis=1).head()
pd.concat([samsung_df, kodex_df], keys=['์ผ์ฑ', 'KODEX200'], axis=1).head(2)
###Output
_____no_output_____
###Markdown
- `join` argument - How to handle **indexes** on **other** axis(es). ์ฆ, concat์ ๋์์ด ๋๋(=๋ช
์๋๋) axis ๋ง๊ณ , ๋ค๋ฅธ axis์ ๋ํด ์ด๋ป๊ฒ joinํ ๊ฒ์ธ๊ฐ 
###Code
# default 'outer' join
pd.concat([samsung_df, kodex_df], keys=['์ผ์ฑ', 'kodex'], axis=1, names=['์ข
๋ชฉ๋ช
']).head()
# join = inner (date intersection)
pd.concat([samsung_df, kodex_df], keys=['์ผ์ฑ', 'kodex'], axis=1, names=['์ข
๋ชฉ๋ช
'], join='inner').head()
# concat ๋ฐฉํฅ์ด axis=0์ด๋๊น, axis=1์ ๋ํด์ join์ด ์ ์ฉ๋จ
pd.concat([samsung_df.head(), kodex_df[['Close']].head()], join='inner')
###Output
_____no_output_____
###Markdown
- ์ฃผ์ : `outer` join & column ๋ช
์ด ์๋ก ๊ฒน์น๋ ๊ฒ ์์ ๋ ! => alignment ๊ฐ ์ผ์นํ๋๊ฒ ์์ผ๋ NaN์ผ๋ก ๋ฉ๊พผ๋ค!
###Code
samsung_diff_col_df = samsung_df.copy()
samsung_diff_col_df.columns = ['1_' + col for col in samsung_df.columns]
samsung_diff_col_df.head()
samsung_df.head()
pd.concat([samsung_diff_col_df, kodex_df]).head()
###Output
_____no_output_____
###Markdown
์ค์ ์์ : concat์ ์ด์ฉํด์ close ๋ฐ์ดํฐ๋ง ๋ฝ์๋ด๊ธฐ
###Code
total_df = pd.concat([samsung_df, kodex_df], keys=['์ผ์ฑ', 'kodex200'], names=['์ข
๋ชฉ๋ช
'])
total_df.head()
total_df.tail()
total_df = total_df.reset_index()
total_df.head()
total_df.pivot(index='Date', columns='์ข
๋ชฉ๋ช
', values='Close')
###Output
_____no_output_____
###Markdown
- `pivot()` ์์
###Code
sample_data = pd.DataFrame(
{
"์ข
๋ชฉ๋ช
":["์ผ์ฑ", "ํ๋", "ํ์ด๋์ค", "์ผ์ฑ", "ํ๋", "ํ์ด๋์ค"],
"datetime":["2019-01-01", "2019-01-01", "2019-01-01", "2019-01-02", "2019-01-02", "2019-01-02"],
"price":[1,2,3, 4,5,6]
}
)
sample_data
sample_data.sort_values("์ข
๋ชฉ๋ช
")
sample_data.pivot(index="datetime", columns="์ข
๋ชฉ๋ช
", values="price")
###Output
_____no_output_____
###Markdown
`join()` - 2๊ฐ์ (๋ณดํต index๊ฐ ๋ค๋ฅธ) dataframe์ ํ๋์ dataframe์ผ๋ก ํฉ์น ๋ ์ฌ์ฉ- Aligns the calling DataFrame's **column(s) or index** with the other DataFrame's **index** 1. index - index 2. columns - index (calling object๋ column, called object๋ index) - `on` arg = calling object์ column - called object์ index๋ฅผ calling object์ "์ด๋ค column"์ ๋ง์ถ๊ฒ์ธ๊ฐ - `set_index()` ํ, `on`์์ด index-index join๊ณผ ๊ฐ์ ๊ฒฐ๊ณผ - Cartesian product joining- Defaults to `left` join- ๋๋ถ๋ถ์ ๊ฒฝ์ฐ merge๋ ํธํ ๊ฐ๋ฅ - ์์ 1
###Code
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
left
right
left.join(right)
left.join(right, how='outer')
###Output
_____no_output_____
###Markdown
- ์์ 2
###Code
left = pd.DataFrame(
{
'A':['A0', 'A1', 'A2', 'A3'],
'B':['B0', 'B1', 'B2', 'B3'],
'key':['K0', 'K1', 'K0', 'K1'],
}
)
right = pd.DataFrame(
{
'C':['C0', 'C1'],
'D':['D0', 'D1'],
},
index=['K0', 'K1']
)
left
right
###Output
_____no_output_____
###Markdown
- ์๋ ๋์ ๊ฒฐ๊ณผ ๊ฐ์
###Code
# left.join(right, on='key')
left.join(right, on='key').set_index("key")
# left.set_index('key')
left.set_index('key').join(right)
###Output
_____no_output_____
###Markdown
- l_suffix, r_suffix
###Code
a = pd.DataFrame([1,2,3], index=['a','b','c'], columns=['์๋
'])
b = pd.DataFrame([4,2,6], index=['a','c','d'], columns=['์๋
'])
a
b
a.join(b, lsuffix="_x", rsuffix="_y", how="inner")
###Output
_____no_output_____
###Markdown
- ์์ 3 (์์ lec1\_3์์ median_์๊ฐ์ด์ก ์ฐ๊ฒฐํ๊ธฐ)
###Code
a_df = pd.read_csv("my_data/Small_and_Big.csv", index_col=[0])
a_df.head()
median_df = a_df.groupby(['date']).agg({'์๊ฐ์ด์ก (๋ณดํต)(ํ๊ท )(์)': 'median'})
median_df.columns = ['์๊ฐ์ด์ก_median']
median_df.head()
joined_df = a_df.join(median_df, on="date")
joined_df.head()
joined_df[joined_df['date'] == "2000-08-31"].head()
# Hint: ์๋์ ๊ฐ์ ๋๋์ผ๋ก ํ์๋ฉด ๋ฉ๋๋ค.
# cond1 = joined_df['์๊ฐ์ด์ก(๋ณดํต~~)'] < joined_df['์๊ฐ์ด์ก_median']
# joined_df.loc[cond1, "small_or_big"] = "small"
# joined_df.loc[~cond1, "small_or_big"] = "big"
###Output
_____no_output_____
###Markdown
`merge()` - Aligns the calling DataFrame's *column(s)* with the other DataFrame's *column(s)* - `left_index`, `right_index` argument๋ ์กด์ฌํ๊ธด ํจ(index-index alignment์) - `join()` - ์ฌ์ค ๋ด๋ถ์ ์ผ๋ก `reset_index()` ํ๊ณ `merge()` ํธ์ถํจ- Cartesian product joining- Defaults to `inner` join- `concat()`๊ณผ ๋ฌ๋ฆฌ, index, column๋ช
์ด ์๋๋ผ, value ๊ฐ ์์ฒด๋ฅผ ์ด์ฉํ join
###Code
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
left
right
# default: inner join(๊ต์งํฉ)
pd.merge(left, right, on=['key1', 'key2'])
# outer join(ํฉ์งํฉ)
pd.merge(left, right, how='outer', on=['key1', 'key2'])
pd.merge(left, right, how='right', on=['key1', 'key2'])
pd.merge(left, right, how='left', on=['key1', 'key2'])
###Output
_____no_output_____
###Markdown
- More about Cartesian product joining
###Code
left = pd.DataFrame({'A':[1,2,], 'B':[2,2]})
right = pd.DataFrame({'A':[4,5,6], 'B':[2,2,2]})
left
right
# left, right, inner, outer ๊ฒฐ๊ณผ๊ฐ ๋ค ๊ฐ์
pd.merge(left, right, on="B", how='left')
###Output
_____no_output_____
###Markdown
- ์์
###Code
close_df = samsung_df['Close'].reset_index()
vol_df = samsung_df['Volume'].reset_index()
close_df.head()
vol_df.head()
vol_df.iloc[:2]
# default is 'inner' join
pd.merge(close_df, vol_df.iloc[:2]) # ์์์ ๊ฐ์ column ์ด๋ฆ๋ผ๋ฆฌ ๋ง์ถค
# 'outer' join
pd.merge(close_df, vol_df.iloc[:2], how="outer").head(5)
###Output
_____no_output_____
###Markdown
join & merge ๊ฐ๊ฐ ์ธ์ ์ฌ์ฉ? - index๊ฐ ํ๋๋ผ๋ ๊ด์ฌํ๋ฉด => `join()`- ๋๋ค colum์ ๋ง์ถฐ์ผํ๋ฉด => `merge()`- `merge()` ์ฌ์ฉ์, `left_index`, `right_index` ์ฌ์ฉํ๋ฉด `join()`๊ณผ ๊ฒฐ๊ณผ๊ฐ ๊ฐ์- `join()` ์ฌ์ฉ์ `reset_index()`ํ๊ณ , `merge()` ์จ๋ ๋จ
###Code
a = pd.DataFrame([1,2,3], index=['a','b','c'], columns=['์๋
'])
b = pd.DataFrame([4,2,6], index=['a','c','d'], columns=['์๋
'])
a
b
a.merge(b)
a.reset_index().merge(b.reset_index())
a.merge(b, left_index=True, right_index=True)
a.join(b, lsuffix="_x", rsuffix="_y", how="inner")
###Output
_____no_output_____
###Markdown
Concat๊ณผ join,merge์์ ์ฐจ์ด
###Code
a = pd.DataFrame({"a": [1,2,3],}, index=[1,2,3])
b = pd.DataFrame({"b": [1,4,5],}, index=[1,4,5])
a
b
pd.concat([a, b], axis=1)
a = pd.DataFrame({"a": [1,2,3],}, index=[1,2,2])
b = pd.DataFrame({"b": [1,4,5],}, index=[1,4,5])
a
b
# error ๋ฐ์! => concat()์ cartesian product๊ฐ ๋ถ๊ฐ๋ฅํ๊ธฐ ๋๋ฌธ์, ์ค๋ณต index ํน์ column์ด ์๋ ๊ฒฝ์ฐ ์๋ํ์ง ๋ชปํจ
pd.concat([a, b], axis=1)
###Output
_____no_output_____
###Markdown
์ค์ ์์ flipkart
###Code
product_df = pd.read_csv("my_data/product.csv", index_col=0)
review_df = pd.read_csv("my_data/review.csv", index_col=0)
product_df.shape
review_df.shape
product_df.head(2)
review_df.head(2)
flipkart_df = pd.merge(
product_df,
review_df,
left_on="id",
right_on='product__id',
how='right', # Review์ ์๋ id, ์ฆ product__id๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ฐ์ดํฐ๋ฅผ ์์ฑํฉ๋๋ค. ๋ง์ฝ "product" ์ ๋ณด๊ฐ ๋ฐ๋์ ์กด์ฌํ๋ review๋ค๋ก๋ง ๋ฐ์ดํฐ๋ฅผ ๊ตฌ์ฑํ๊ณ ์ถ์ผ๋ฉด "left"๋ก ํ์๋ฉด ๋ฉ๋๋ค.
)
flipkart_df.shape
flipkart_df.head(2)
# column์ ์ ๊ฑฐ ํ๊ธฐ ์ํด์๋ drop([์ปฌ๋ผ1, ์ปฌ๋ผ2, ..], axis=1)๊ณผ ๊ฐ์ ๋ฐฉ์์ผ๋ก ์งํํฉ๋๋ค
flipkart_df = flipkart_df.drop(['id', 'product__id', 'author'], axis=1)
flipkart_df.head(2)
###Output
_____no_output_____
###Markdown
Amazon
###Code
amazon_df = pd.read_csv("my_data/amazon_review1.csv", index_col=0)
amazon_df.head(2)
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ ํฉ์น๊ธฐ
###Code
amazon_df.shape
flipkart_df.shape
df = pd.concat([amazon_df, flipkart_df], axis=0) # `keys` argument๋ฅผ ํ์ฉํ์
์ site๋ฅผ ๊ตฌ๋ถํ์
๋ ๋ฉ๋๋ค
df.shape
df.head()
df['date'] = pd.to_datetime(df['date'])
df['price'] = df['price'].astype(float)
df.set_index('date', inplace=True)
df = df.loc[:"2017-12-31"] # ๋น๋ก DatetimeIndex์ด์ง๋ง, ๋ ์ง๋ฅผ ๋ฌธ์์ด string์ผ๋ก ํํํ์ฌ loc์ ์ด์ฉํ range indexing์ด ๊ฐ๋ฅํฉ๋๋ค.
df.rename(columns={'title_x':'name', 'title_y':'title'}, inplace=True)
df['price_grp'] = pd.cut(df['price'], [0, 5000, 15000, 200000], labels=["์ ๊ฐ", "์ค๊ฐ", "๊ณ ๊ฐ"])
df.head()
###Output
_____no_output_____
###Markdown
์ ์ฒด์ ํ ๊ธฐ๊ฐ๋ณ ์ ํ ํ๊ท ๊ฐ๊ฒฉ
###Code
# ์๋์์ ๋ณด์ด์๋ ๊ฒ์ฒ๋ผ groupby()์ ์ธ์์๋ ๋ฐ๋์ 'column๋ช
'์ด ์๋์ด๋ ๋ฉ๋๋ค.
# ์๋ ์์ ์ฒ๋ผ df๋ผ๋ object๋ก๋ถํฐ ์ถ์ถํ ์ ์๋(์ฌ๊ธฐ์๋ df.index์์ ์ถ์ถํ) categorical ๋ณ์๋ค์ ์ฌ์ฉํด๋ ๋ฉ๋๋ค.
df.groupby([df.index.year, df.index.quarter]).agg({'price':'mean'})
df.groupby([df.index.year, df.index.quarter]).agg({'price':'mean'}).plot(kind='bar')
%matplotlib inline
ax = df.resample("Q")['price'].mean().plot();
ax.set_title("๊ธฐ๊ฐ๋ณ ์ ํ ํ๊ท ๊ฐ๊ฒฉ");
ax.set_xlabel("๊ธฐ ๊ฐ");
ax.set_ylabel("๊ฐ ๊ฒฉ");
###Output
_____no_output_____
###Markdown
๋ธ๋๋๋ณ ๋ฆฌ๋ทฐ์
###Code
df.groupby(['brand']).agg({'name':'count'})
df.groupby(['brand']).agg({'name':'count'}).plot(kind='bar', figsize=(8,5));
###Output
_____no_output_____
|
Lez11_Esercizi_Preparatori/Esercizi1.ipynb
|
###Markdown
Esercizi preparatori esame **1** - creare un dizionario che abbia come chiavi : titolo, autore, numero di pagine e casa editrice di un libro. Prendere i libri: il libro della giungla e l'isola del tesoro e salvarli come dizionari.
###Code
libro1 = {'titolo':'Il libro della giungla', 'autore':' Rudyard Kipling', 'pagine':196, 'casa editrice':'feltrinelli'}
libro2 = {'titolo':'L\'isola dl tesoro', 'autore':' Robert Louis Stevenson', 'pagine':287, 'casa editrice':'feltrinelli'}
###Output
_____no_output_____
###Markdown
**2** - Salvare i due libri delle esercizio precedente (come dizionari) in un lista, e definire una funzione che prenda in input la lista in questione e stampi i titoli di tutti i libri salvati al suo interno.
###Code
libri = [libro1,libro2]
def stampa(lista_libri):
for libro in lista_libri:
print(libro['titolo'])
stampa(libri)
def stampa_LC_join(lista_libri):
print('\n'.join([libro['titolo'] for libro in lista_libri]))
stampa_LC_join(libri)
###Output
Il libro della giungla
L'isola dl tesoro
Il libro della giungla
L'isola dl tesoro
###Markdown
**3** - Definire una lista contenente tutti i numeri multipli di 3 (ma non multipli di 4) da 1 a 120.
###Code
list(filter(lambda x: x%3==0 and not x%4==0, [i for i in range(1,120)]))
lista = [x for x in range(1,120) if x%3==0 and x%4!=0]
print(lista)
###Output
[3, 6, 9, 15, 18, 21, 27, 30, 33, 39, 42, 45, 51, 54, 57, 63, 66, 69, 75, 78, 81, 87, 90, 93, 99, 102, 105, 111, 114, 117]
###Markdown
**4** - Definire una procedura che prenda in input dall' utente una password che contenga almeno una lettera maiuscola, due cifre e 1 segno speciale tra i seguenti: ,/()&%$ยฃ . Se l'utente non inserisce una password idonea la procedura continua a chiedere l'inserimento di una nuova password per un massimo di 5 volte.
###Code
maiuscole = 'QWERTYUIOPASDFGHJKLZXCVBNM'
cifre = '123456789'
special = ',/()&%$ยฃ'
def passwordIdonea(password):
maiuscoleCounter = 0
cifreCounter = 0
specialCounter=0
for s in password:
if s in maiuscole:
maiuscoleCounter=maiuscoleCounter+1
elif s in cifre:
cifreCounter = cifreCounter +1
elif s in special:
specialCounter=specialCounter+1
return specialCounter>=1 and maiuscoleCounter>=1 and cifreCounter>=2
counter=0;
while(counter<=5):
password = input('inserisci password: ')
if (passwordIdonea(password)):
print('Password Idonea')
counter =5
counter=counter+1
maiuscole = 'QWERTYUIOPASDFGHJKLZXCVBNM'
cifre = '123456789'
special = ',/()&%$ยฃ'
def counter(password,lista,N):
counter = 0;
for lettera in password:
if(lettera in lista):
counter =counter +1;
if (counter==N):
return True
return False
def check(password):
return counter(password,maiuscole,1) and counter(password,cifre,2) and counter(password,special,1)
k =0
while(k<5):
password = input('inserisci password: ')
if (check(password)):
print('Password Idonea')
break
k=k+1
###Output
inserisci password: a
inserisci password: a
inserisci password: a
inserisci password: a
inserisci password: a
###Markdown
**5** Scaricate da https://archive.ics.uci.edu/ml/machine-learning-databases/iris/ il file iris.dataAl suo interno troverete delle misurazioni fatte su 3 tipologie di fiori, nello specifico ogni colonna corrisponde a : 1. sepal length in cm2. sepal width in cm3. petal length in cm4. petal width in cm5. class: -- Iris Setosa -- Iris Versicolour -- Iris Virginica Per ogni tipologia di fiore (quinta colonna) dovete calcolare la media e varianza delle caratteristiche fisiche (prime 4 colonne).
###Code
import csv
data_setosa=[];
data_versiolar=[];
data_virginica=[];
with open('file/iris.data', 'r') as csvfile:
reader = csv.reader(csvfile,delimiter=',')
for row in reader:
if(row[4]=='Iris-setosa'):
data_setosa.append(row[:3])
elif(row[4]=='Iris-versicolor'):
data_versiolar.append(row[:3])
else:
data_virginica.append(row[:3])
def mean(matrix,index):
return sum([float(row[index]) for row in matrix])/len(matrix)
def variance(matrix,index):
m = mean(matrix,index)
return sum([(float(row[index])-m)**2 for row in matrix])/len(matrix)
variance(data_virginica,0)
###Output
_____no_output_____
###Markdown
**6** - Importate numpy e definite come lambda expression la funzione f che rappresenta sin(x). Definite poi una procedura (algoritmo di bisezione https://it.wikipedia.org/wiki/Metodo_della_bisezione) che determini la soluzione di sin(x)-0.234=0 in [0,pi/2], con un intertezza di al massimo epsilon (passato come argomento).
###Code
import numpy as np
epsilon = 0.000001
f = lambda x : np.sin(x)-0.234
errore = 1
a=0
b=np.pi
while (errore>epsilon):
c = (a+b)/2
if (f(a)*f(c)>0):
a,b = c,b
else:
a,b=a,c
errore = b-a
print('X=',c)
###Output
X= 0.2361903475331634
###Markdown
**7** - definire una funzione che prende in input una tupla rappresentante un punto nel piano cartesiano e restituisca in output una tupla con il numero del quadrante dove si trova il punto e la distanza dall' origine. Se il punto trova sugli assi la funzione resituitsce quadrante 0 e la distanza dal centro.
###Code
import numpy as np
def funzione(punto):
quadrante = 0;
if (punto[0]>0):
if (punto[1]>0):
quadrante = 1
elif(punto[1]<0):
quadrante = 4
elif(punto[0]<0):
if (punto[1]>0):
quadrante = 2
elif(punto[1]<0):
quadrante = 3
return (quadrante,np.sqrt(punto[0]^2+punto[1]^2))
funzione((0,0))
###Output
_____no_output_____
###Markdown
**8** - definire una funzione che prenda in input una lista di tuple rappresentante i punti del piano cartesiano e riordini la lista in funzione della loro distanza dal centro. **9** - definire una funzione che prenda in input una lista di interi e un numero N, e ritorna true se esiste una coppia di numeri che ha per somma N. Per esempio lista = [1,4,3,2,5,], N = 5, la funzione ritorna true perchรจ 1+4 = 5. Se N = 15 la funzione ritorna false.
###Code
def check(lista,N):
for i in range(len(lista)):
for j in range(i):
if lista[i]+lista[j]==N:
return True
return False
check([1,4,3,2,5],10)
###Output
_____no_output_____
###Markdown
**10** - Definire una funzione che prende in input un file di testo, contente in ogni riga una sola parola e che restituisca come output una lista con la parola piรน frequente e la meno frequente. In caso di paritร la funzione deve restituire la prima parola piรน frequente e meno frequente. Esempio: * piovra * casa * pippo * casa * pippo * lucertola * stupidoLa funzione resitutisce ['casa', piovra]
###Code
with open('file/testo.txt') as file:
s=file.readlines()
parole = [e.strip() for e in s]
dizionario = {}
for parola in parole:
if (parola not in dizionario):
dizionario[parola]=1
else:
dizionario[parola]=dizionario[parola]+1
print(dizionario)
minValue = len(parole)+1
maxValue = 0
minKey=''
maxKey=''
for key,value in dizionario.items():
if(minValue > value):
minValue=value
minKey = key
if (maxValue < value):
maxValue=value
maxKey = key
print(minKey,maxKey)
###Output
{'piovra': 1, 'casa': 2, 'pippo': 2, 'lucertola': 1, 'stupido': 1}
piovra casa
###Markdown
**11** - Scrivere una funzione che prende in ingresso una stringa e restituisce una lista con in posizione 0 una nuova stringa con 1 al posto delle vocali e 0 al posto delle consonanti e in posizione 1 la trasformazione del numero binario in decimale. Esempio:[01001,9] = function ( 'pippo')
###Code
vocali = 'aeiouAEIOU'
def function(parola):
transformed='';
for l in parola:
if(l in vocali):
transformed = transformed + '1'
elif l.isalpha():
transformed = transformed + '0'
value = 0
for i in range(len(transformed)):
value = value + int(transformed[-(i+1)])* 2**i
return (transformed,value)
function('pippo')
###Output
_____no_output_____
|
datashader-work/geoviews-examples/gallery/matplotlib/new_york_boroughs.ipynb
|
###Markdown
Define data
###Code
tiles = gv.tile_sources.Wikipedia
# Project data to Web Mercator
nybb = gpd.read_file(gpd.datasets.get_path('nybb'))
poly_data = nybb.to_crs(ccrs.GOOGLE_MERCATOR.proj4_init)
polys = gv.Polygons(poly_data, vdims='BoroName', crs=ccrs.GOOGLE_MERCATOR)
###Output
_____no_output_____
###Markdown
Plot
###Code
tiles.opts(zoom=10) * polys.opts(color='BoroName', cmap='tab20', fig_size=200, padding=0.1)
###Output
_____no_output_____
|
demo_multi-phase_lockdown.ipynb
|
###Markdown
Using ESOP to design multi-phase lock-downs under constraints Accompanying paper available at https://arxiv.org/abs/2005.11257
###Code
%load_ext autoreload
%autoreload 2
from skopt import gp_minimize as gpm
import numpy as np
import time
from utils import Population
from matplotlib import pyplot as plt
import pylab as plb
###Output
_____no_output_____
###Markdown
**Getting started**: we load the generic population file and set the simulations to run for 500 time steps.**Notation**: We use I2, P2, L2 to denote respectively, the initiation point, period and the level of the (third) lockdown that ESOP is trying to optimize. These are referred ensemble as IPL. The previous two lock-downs (that have already happened) are denoted using (I0, P0, L0) and (I1, P1, L1) respectively. In this experiment, all three parameters I2, P2, L2 are being optimized (but (I0, P0, L0) and (I1, P1, L1) are fixed) under constraints that I2 must be no less than I1 + P1 + 10 (i.e. the third lock-down must start no earlier than 10 days after the second lock-down ended) and P0 must be no larger than 40 (i.e. no lock-down longer than 40 days)
###Code
pop = Population.fileInit( "pop_generic" )
N = pop.N
T = 500
SIDX_S = 0 # Number of susceptible but non-recovered individuals
SIDX_E = 1 # Number of exposed individuals
SIDX_I = 2 # Number of infectious individuals
SIDX_Q = 3 # Number of quanrantined individuals
SIDX_R = 4 # Number of recovered individuals
SIDX_X = 5 # Number of expired individuals
SIDX_V = 6 # Average virulence of the viral strains in E and I populations
SIDX_EI = 7 # Number of infected individuals
SIDX_D = 8 # Number of individuals infected each day
# From the normalized parameters being optimized by ESOP, recover the I2, P2, L2 values
def getIPL2( x ):
global IPL2c, IPL2w, offset, ul, ll
IPLVal = np.floor( IPL2c + (np.array(x) - 0.5) / 0.5 * IPL2w ).astype(int) + offset.astype(int)
correctedIPLVal = np.minimum( np.maximum( IPLVal, ll ), ul )
return correctedIPLVal
# Get a lockdown schedule corresponding to a certain IPL value for the 3rd lock-down.
# Make sure to incorporate the previous two lockdowns as well.
# The IPL parameter stores lockdown levels from 0 to 10 instead of 0 to 5
# Thus, before using them, divide by 2
def getLKP( IPL ):
LKP = np.zeros((T,))
LKP[I0:I0+P0] = L0
LKP[I1:I1+P1] = L1
I2 = IPL[0]
P2 = IPL[1]
L2 = IPL[2] / 2
LKP[I2:I2+P2] = L2
return LKP
# Find out the objective value corresponding to the stats sent as input
def obj( stats, IPL ):
global SIDX_EI
fEpi = np.max( stats[ SIDX_EI, : ] )
# One day of lockdown at level l causes (l/50)% of population to lose their jobs
daysLockdown = IPL[1]
# Remember, the IPL stores levels from 0 to 10 instead of 0 to 5
level = IPL[2] / 2
fEco = level / 5 * daysLockdown * N / 1000
return [fEpi, fEco, fEpi + fEco]
# Ask the VIPER simulator what does it think will happen if the 3rd lock-down
# is initiated as specified in the normalized parameter x
def askSimulator( x ):
global evalCount, globalDict, freshMask
# Before starting a simulation, turn back time to reset everything
pop.reset()
# Also seed the RNG so that simulations are replicatable
np.random.seed(0)
IPL = getIPL2( x )
# Avoid simulating again for previously queried points
if tuple(IPL) in globalDict:
freshMask.append( False )
return globalDict[tuple(IPL)][-1]
LKP = getLKP( IPL )
stats = pop.simulate( T = T, LKP = LKP, minimal = True )
globalDict[tuple(IPL)] = obj( stats, IPL )
freshMask.append( True )
evalCount += 1
return obj( stats, IPL )[-1]
# Once ESOP is done, find out which parameter, i.e. which values of I2, P2, L2 won!
def getWinner():
global globalDict
keys = list( globalDict.keys() )
vals = np.array( list( globalDict.values() ) )[:,-1]
winnerIdx = np.argmin( vals )
winner = np.array( keys[ winnerIdx ] )
return winner
###Output
_____no_output_____
###Markdown
**Multi-scale Optimization**: ESOP performs Bayesian optimization in a multi-scale manner in several epochs. Initial epochs perform coarse optimization to roughly identify the region with promising objective values. Later epochs zoom into those regions to more and more finely search to obtain points offering highly optimal objective values.In multi-dimensional optimization cases such as this, where 3 parameters are simultaneously getting optimized, this procedure is made slightly more robust by retaining a diverse _active set_ of good-performing candidates from each epoch and searching around them in the next epoch.
###Code
def getAllPairsDistances( A, B ):
weights = np.array( [ 0.01, 0.01, 0.1 ] )
Aw = weights * A
Bw = weights * B
squaredNormsA = np.square( np.linalg.norm( Aw, axis = 1 ) )
squaredNormsB = np.square( np.linalg.norm( Bw, axis = 1 ) )
dists = squaredNormsA[:, np.newaxis] + squaredNormsB - 2 * Aw.dot( Bw.T )
dists[ dists < 0 ] = 0
return dists
def addCandidates( res ):
global branchBound, candidates
perm = np.argsort( res.func_vals )
for i in range( 2 * branchBound ):
key = tuple( getIPL2( res.x_iters[ perm[i] ] ) )
val = res.func_vals[ perm[i] ]
if key not in candidates:
candidates[key] = val
def updateActive():
global branchBound, candidates
keys = list( candidates.keys() )
vals = list( candidates.values() )
perm = np.argsort( vals )
# Screen the top few best performing keys
n = min( 2 * branchBound, len( candidates ) )
keysScreened = np.array( [ keys[ perm[j] ] for j in range(n) ] )
activeSet = np.zeros( (branchBound, keysScreened.shape[1]) )
# Definitely include the best performing candidate in the active set
i = 0
activeSet[0,:] = keysScreened[i,:]
# To select the rest of the active set, perform the k-means++ algorithm
for t in range( 1 , min( branchBound, len( candidates ) ) ):
dist = np.min( getAllPairsDistances( keysScreened, activeSet[0:t,:] ), axis = 1 )
probs = dist/np.sum(dist)
i = np.argmax( probs ) # Alternatively, we could have used np.random.choice( np.arange(n), p = probs )
activeSet[t,:] = keysScreened[i,:]
return activeSet
# The first two lock-downs have already happened
# Their parameters are fixed and not optimizable anymore
I0 = 15
P0 = 15
L0 = 4
I1 = 56
P1 = 35
L1 = 4
# All parameters are optimized in various ranges that are described using
# the center point of the range, the half-width of the range and the initial
# offset of the range e.g. the range with center c, width w and offset o is
# the range [o + c - w, o + c + w]. Each of the three parameters I2, P2, L2
# have their own centers, widths and offsets specified below
IPL2c = np.array( [25, 20, 5] )
IPL2w = np.array( [25, 20, 5] )
offset = np.array( [100, 0, 0] )
# Upper and lower bounds on legal values of lock-down initiation points
ul = np.array( [150, 40, 10] )
ll = np.array( [100, 0, 0] )
branchBound = 2
activeSet = []
evalCount = 0
globalDict = {}
freshMask = []
candidates = {}
res = gpm( askSimulator, [(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)], acq_func = "EI", n_calls = 20, n_random_starts = 5, random_state = 0 )
addCandidates( res )
activeSet = updateActive()
# Halve the width of the region in which we are searching
IPL2w = IPL2w / 2
for epoch in range(5):
print( "epoch = ", epoch )
candidates = {}
for subepoch in range( len( activeSet ) ):
print( "subepoch %d of %d" % ( subepoch + 1, len( activeSet ) ) )
IPL2c = activeSet[subepoch]
res = gpm( askSimulator, [(0.0, 1.0), (0.0, 1.0), (0.0, 1.0)], acq_func = "EI", n_calls = 20 - epoch, n_random_starts = 5, random_state = 0 )
addCandidates( res )
# Refine the search in the next epoch
# Choose new candidates to populate the active set
activeSet = updateActive()
# Halve the width of the region in which we are searching
IPL2w = IPL2w / 2
###Output
epoch = 0
subepoch 1 of 2
###Markdown
**Convergence Plot**: The following plot shows how fast ESOP converges to a near-optimal point
###Code
# Find out the suggestion made by ESOP
IPLBest = getWinner()
print( "Start on day %d, last for %d days, at level %f" % ( IPLBest[0], IPLBest[1], IPLBest[2]/2 ) )
validVals = np.array( list( globalDict.values() ) )
mins = np.minimum.accumulate( validVals[:,2] )
plt.figure()
plt.plot( np.arange(1, len( validVals ) + 1), mins, color = 'k', linewidth = 4 )
plt.xlabel( "# Calls to VIPER" )
plt.ylabel( "Objective value" )
plt.tight_layout()
###Output
Start on day 114, last for 25 days, at level 3.500000
###Markdown
**Results of partial lock-downs**: to understand what would have happened had the third lockdown not taken place, we perform hypothetical experiments with just the first and then just the first two lock-downs
###Code
# Get hold of outcomes with just the first lockdown
pop.reset()
np.random.seed(0)
LKP = np.zeros((T,))
LKP[I0:I0+P0] = L0
(statsLKP1, tInfect, tQuarantine, tRecovery, tExpiry) = pop.simulate( T = T, LKP = LKP )
# Get hold of outcomes with just the first two lockdowns
pop.reset()
np.random.seed(0)
LKP = np.zeros((T,))
LKP[I0:I0+P0] = L0
LKP[I1:I1+P1] = L1
(statsLKP2, tInfect, tQuarantine, tRecovery, tExpiry) = pop.simulate( T = T, LKP = LKP )
###Output
_____no_output_____
###Markdown
Final Results
###Code
pop.reset()
np.random.seed(0)
LKP = getLKP( IPLBest )
(stats, tInfect, tQuarantine, tRecovery, tExpiry) = pop.simulate( T = T, LKP = LKP )
outcome = obj( stats, IPLBest )
print( "%d Infected" % len(tInfect) )
print( "%d Expired" % len(tExpiry) )
print( "Peak %d" % max(stats[SIDX_EI,:]) )
print( "Recovery time %f" % np.mean( tRecovery ) )
print( "Expiry time %f" % np.mean( tExpiry ) )
print( "Quarantine time %f" % np.mean( tQuarantine ) )
print( "%d Quarantined" % len( tQuarantine ) )
print( "Objective acheived is %f (fEpi) + %f (fEco) = %f (overall)" % (outcome[0], outcome[1], outcome[2]) )
###Output
13977 Infected
1999 Expired
Peak 6052
Recovery time 82.791667
Expiry time 22.046523
Quarantine time 11.152221
13303 Quarantined
Objective acheived is 6052.000000 (fEpi) + 350.000000 (fEco) = 6402.000000 (overall)
###Markdown
**Outcome of Lock-down suggested by ESOP**: The following code is meant to reproduce Fig 4(d) in the accompanying paper.
###Code
names = [ "S", "E", "I", "Q", "R", "X", "V", "EI", "D" ]
fullnames = [ "Susceptible (S)", "Exposed (E)", "Infectious (I)", "Quarantined (Q)", "Recovered (R)", "Expired (X)", "V", "Exposed + Infectious (E + I)", "Daily" ]
fig = plt.figure()
plt.plot( statsLKP1[SIDX_EI,:], color = "#ff7f0e", linestyle = ":", linewidth = 4 )
plt.plot( statsLKP2[SIDX_EI,:], color = "#ff7f0e", linestyle = ":", linewidth = 4 )
plt.plot( stats[SIDX_EI,:], label = fullnames[SIDX_EI], color = "#ff7f0e", linewidth = 4 )
plt.plot( stats[SIDX_X,:], label = fullnames[SIDX_X], color = "#d62728", linewidth = 4 )
plt.plot( stats[SIDX_I,:] - stats[SIDX_Q], label = "Non-quaran. Infectious (I - Q)", color = "#9467bd", linewidth = 4 )
ax = plt.gca()
plt.grid()
plt.xlabel("Time", size = 16)
plt.ylabel("Number of Individuals", size = 16)
ax.tick_params( axis='both', which='major', labelsize = 12 )
plt.legend( loc = "upper right", fontsize = 10 )
ax.set_xlim( 0, 300 )
ax2 = ax.twinx()
fillLimit0 = getLKP( [0,0,0] )
plb.fill( np.arange( T ), fillLimit0, facecolor = "green", label = "earlier lock-downs", alpha = 0.15 )
fillLimit1 = LKP
fillLimit1[:100] = 0
plb.fill( np.arange( T ), fillLimit1, facecolor = "blue", label = "lock-down #3 (by ESOP)", alpha = 0.15 )
ax2.set_ylim([0,5])
plt.ylabel( "Lock-down level", size = 16 )
plt.legend( loc = "center right", fontsize = 10 )
plt.tight_layout()
###Output
_____no_output_____
|
notebooks/FixTimeShifts.ipynb
|
###Markdown
Time Shift Detection and FixingThis notebook illustrates the usage of the `fix_time_shifts` function. This algorithm determines if the time stamps provided with the data have "shifted" at any point and then corrects the shift if found. These shifts can often be caused by incorrect handling of daylight savings time, but can come from other sources as well. They are best visualized by viewing the 2D time series data as an image.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sys
sys.path.append('..')
from solardatatools.data_transforms import fix_time_shifts, make_2d
from solardatatools.dataio import get_pvdaq_data
from solardatatools.plotting import plot_2d
df1 = get_pvdaq_data(sysid=1199, year=[2015, 2016, 2017], api_key='DEMO_KEY')
D = make_2d(df1, key='dc_power')
###Output
_____no_output_____
###Markdown
Use the providing plotting function to view the 2D representation of the power data. Power output is representated by the color of the pixel.
###Code
plot_2d(D);
Dfixed, ixs = fix_time_shifts(D, verbose=True, return_ixs=True)
_ = plot_2d(Dfixed)
if len(ixs) > 0:
for ix in ixs:
plt.axvline(ix, color='green', alpha=0.7)
###Output
_____no_output_____
###Markdown
Next, we show that the algorithm correctly determines that there are not time shifts in a clean data set.
###Code
df2 = get_pvdaq_data(sysid=35, year=[2011, 2012, 2013], api_key='DEMO_KEY')
D = make_2d(df2, key='dc_power')
plot_2d(D);
Dfixed, ixs = fix_time_shifts(D, verbose=True, return_ixs=True)
###Output
No time shifts found
###Markdown
Time Shift Detection and FixingThis notebook illustrates the usage of the `fix_time_shifts` function. This algorithm determines if the time stamps provided with the data have "shifted" at any point and then corrects the shift if found. These shifts can often be caused by incorrect handling of daylight savings time, but can come from other sources as well. They are best visualized by viewing the 2D time series data as an image.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sys
sys.path.append('..')
from solardatatools.matrix_embedding import make_2d
from solardatatools.time_axis_manipulation import fix_time_shifts
from solardatatools.data_filling import zero_nighttime, interp_missing
from solardatatools.dataio import get_pvdaq_data
from solardatatools.plotting import plot_2d
df1 = get_pvdaq_data(sysid=1199, year=[2015, 2016, 2017], api_key='DEMO_KEY')
D = interp_missing(zero_nighttime(make_2d(df1, key='dc_power')))
###Output
_____no_output_____
###Markdown
Use the providing plotting function to view the 2D representation of the power data. Power output is representated by the color of the pixel.
###Code
plot_2d(D);
Dfixed, ixs = fix_time_shifts(D, verbose=True, return_ixs=True, c1=1, c2=500, solar_noon_estimator='com')
ixs
_ = plot_2d(Dfixed)
if len(ixs) > 0:
for ix in ixs:
plt.axvline(ix, color='green', alpha=0.7)
###Output
_____no_output_____
###Markdown
Next, we show that the algorithm correctly determines that there are no time shifts in a clean data set.
###Code
df2 = get_pvdaq_data(sysid=35, year=[2011, 2012, 2013], api_key='DEMO_KEY')
D = interp_missing(zero_nighttime(make_2d(df2, key='dc_power')))
plot_2d(D);
Dfixed, ixs = fix_time_shifts(D, verbose=True, return_ixs=True, c1=1, c2=500, solar_noon_estimator='com')
ixs
###Output
_____no_output_____
|
docs/examples/Manual Workflow Example.ipynb
|
###Markdown
Support for Current CCT VIS Workflow in Hatchet We begin by loading real data, downloaded from Kaggle, into a pandas dataframe and loading derived information from that dataframe.
###Code
dataset = pd.read_csv(os.path.join(example_code, 'data/Electricity_Production_By_Source.csv'))
vis_in = dataset.groupby(['Entity']).max().reset_index()
def getContinent(row):
try:
c_code = pc.country_name_to_country_alpha2(row['Entity'], cn_name_format='default')
return pc.country_alpha2_to_continent_code(c_code)
except:
return 'OTH'
vis_in['Continent'] = vis_in.apply(getContinent, axis=1)
vis_in
###Output
_____no_output_____
###Markdown
We can use the basic functionality in roundtrip to pass this data into our scatterplot as a DataFrame (or other complex datatype). The visualization developer -- whoever built scatterplot -- will manage the necessary conversions.
###Code
%scatter_plt vis_in
###Output
_____no_output_____
###Markdown
Once visualized, we can return our filter selections back to our notebook with a magic function provided by the visualization developer `get_filter`. This function makes a pass through call to a "return" function provided by our Roundtrip API and stores the data in `fltr`.
###Code
%get_filter fltr
fltr = json.loads(fltr)
###Output
_____no_output_____
###Markdown
`fltr` Is returned from the "get_filter" function as a jsonified string so we can load it and then filter our visualization. If we re-run our above `scatter_plt` cell we will see that the selected circles are now gone.
###Code
vis_in = vis_in[~vis_in.index.isin(fltr)]
vis_in
###Output
_____no_output_____
|
tutorials/notebook/cx_site_chart_examples/boxplot_9.ipynb
|
###Markdown
Example: CanvasXpress boxplot Chart No. 9This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:https://www.canvasxpress.org/examples/boxplot-9.htmlThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.Everything required for the chart to render is included in the code below. Simply run the code block.
###Code
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="boxplot9",
data={
"y": {
"smps": [
"Var1",
"Var2",
"Var3",
"Var4",
"Var5",
"Var6",
"Var7",
"Var8",
"Var9",
"Var10",
"Var11",
"Var12",
"Var13",
"Var14",
"Var15",
"Var16",
"Var17",
"Var18",
"Var19",
"Var20",
"Var21",
"Var22",
"Var23",
"Var24",
"Var25",
"Var26",
"Var27",
"Var28",
"Var29",
"Var30",
"Var31",
"Var32",
"Var33",
"Var34",
"Var35",
"Var36",
"Var37",
"Var38",
"Var39",
"Var40",
"Var41",
"Var42",
"Var43",
"Var44",
"Var45",
"Var46",
"Var47",
"Var48",
"Var49",
"Var50",
"Var51",
"Var52",
"Var53",
"Var54",
"Var55",
"Var56",
"Var57",
"Var58",
"Var59",
"Var60"
],
"data": [
[
4.2,
11.5,
7.3,
5.8,
6.4,
10,
11.2,
11.2,
5.2,
7,
16.5,
16.5,
15.2,
17.3,
22.5,
17.3,
13.6,
14.5,
18.8,
15.5,
23.6,
18.5,
33.9,
25.5,
26.4,
32.5,
26.7,
21.5,
23.3,
29.5,
15.2,
21.5,
17.6,
9.7,
14.5,
10,
8.2,
9.4,
16.5,
9.7,
19.7,
23.3,
23.6,
26.4,
20,
25.2,
25.8,
21.2,
14.5,
27.3,
25.5,
26.4,
22.4,
24.5,
24.8,
30.9,
26.4,
27.3,
29.4,
23
]
],
"vars": [
"len"
]
},
"x": {
"supp": [
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ"
],
"order": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
],
"dose": [
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2
]
}
},
config={
"axisAlgorithm": "rPretty",
"axisTickScaleFontFactor": 1.8,
"axisTitleFontStyle": "bold",
"axisTitleScaleFontFactor": 1.8,
"binAlignment": "center",
"binned": True,
"graphOrientation": "vertical",
"graphType": "Boxplot",
"groupingFactors": [
"dose"
],
"jitter": False,
"showBoxplotOriginalData": True,
"showLegend": False,
"smpLabelRotate": 90,
"smpLabelScaleFontFactor": 1.8,
"smpTitle": "dose",
"smpTitleFontStyle": "bold",
"smpTitleScaleFontFactor": 1.8,
"theme": "CanvasXpress",
"title": "The Effect of Vitamin C on Tooth Growth in Guinea Pigs",
"xAxis2Show": False,
"xAxisMinorTicks": False,
"xAxisTitle": "len"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="boxplot_9.html")
###Output
_____no_output_____
|
sess2_imageClassification/exercise-2_data-augmentation.ipynb
|
###Markdown
Exercise 2: Data AugmentationTaking a picture of an object from a different angle does not change the type of the object. Machine learning models overfitting to learn to only recognize an object from a single angle or, worse, under certain lighting conditions or with certain backgrounds is a real problem.Luckily, it is possible to lower this risk without taking new data but, instead, altering existing images in a dataset.This process is called "data agumentation" and it will be the focus of this excercise.
###Code
%matplotlib inline
from matplotlib import pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import cifar10
from keras import Sequential
from keras import layers
import keras
###Output
_____no_output_____
###Markdown
Load in DataWe are going to use the [CIFAR 10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset as an example.This dataset contains pictures of many differnet kinds of objects
###Code
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
fig, ax = plt.subplots()
ax.imshow(x_train[0])
###Output
_____no_output_____
###Markdown
I'm not sure what I'm looking at here. Let's hope our CNN does As before, we need to convert the labels into a binary representation and images into a floating point number
###Code
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
###Output
_____no_output_____
###Markdown
Illustrate Data AugmentationA few things we can do to this picture to emulate taking a picture with a different camera, view, etc are:- Rotate the image- Darken it- Magnify itKeras has a tool, [ImageDataGenerator](https://keras.io/preprocessing/image/), that helps with this process.
###Code
gen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True,
rotation_range=45, shear_range=20, validation_split=0.1)
fig, axs = plt.subplots(2, 2)
for ax in axs.flatten():
ax.imshow(gen.random_transform(x_train[0]))
###Output
_____no_output_____
###Markdown
Hopefully, this is the right kind of noise to help the model generalize better Fitting without augmentationJust to serve as a baseline. We will borrow the architecture from the [Keras example for CIFAR](https://keras.io/examples/cifar10_cnn/).
###Code
def make_model():
return Sequential([
layers.Conv2D(32, (3, 3), padding='same', activation='relu',
input_shape=x_train.shape[1:]),
layers.Conv2D(32, (3, 3), padding='same', activation='relu',
input_shape=x_train.shape[1:]),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Dropout(0.25),
layers.Conv2D(64, (3, 3), padding='same', activation='relu',
input_shape=x_train.shape[1:]),
layers.Conv2D(64, (3, 3), padding='same', activation='relu',
input_shape=x_train.shape[1:]),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dropout(0.5),
layers.Dense(10, activation='softmax')
])
model = make_model()
model.compile('rmsprop', 'categorical_crossentropy', metrics=['acc'])
model.fit(x_train, y_train, batch_size=64, epochs=32)
scores = model.evaluate(x_test, y_test)
print('Accuracy: {}'.format(scores[1]))
###Output
_____no_output_____
###Markdown
Fitting a CNN with AugmentationYou now have a tool that will generate a new series of images given a current training set. Rather than generating all of the images in advance, Keras lets you generate them on-the-fly with the `fit_generator` function. Making a generator with [reasonable settings](https://keras.io/examples/cifar10_cnn/)
###Code
gen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode='nearest',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.)
model = make_model()
model.compile('rmsprop', 'categorical_crossentropy', metrics=['acc'])
gen.fit(x_train) # Need to fit the model for some of the random moves
model.fit_generator(gen.flow(x_train, y_train, batch_size=64),
steps_per_epoch=x_train.shape[0] // 64,
epochs=32)
scores = model.evaluate(x_test, y_test)
print('Accuracy: {}'.format(scores[1]))
###Output
_____no_output_____
|
notebooks/Complementar/Outros_01/T20/T20 - Code.ipynb
|
###Markdown
Introduรงรฃo Os mรฉtodos e estudos de morfologia matemรกtica inicialmente desenvolvidos para imagens binรกrias apresentam caracaterรญsticas imporatantes em aplicaรงรฃo em imagens tons de cinza. A operaรงรฃo dos mรฉtodos รฉ mantida, porรฉm neste caso as aplicaรงรตes visam reduzir, ruรญdos, ressaltar bordas, destacare componentes, alรฉm de outros resultados.Entretanto hรก um destaque para o mรฉtodo do gradiente morfolรณgico, que utiliza a combinaรงรฃo entre erosรฃo e dilataรงรฃo para promover ressalto das bordas de uma imagem, da seguinte forma:$$ g = (f\oplus b) - (f \ominus b) $$ Discussรตes sobre os mรฉtodos A operaรงรฃo do gradiente mofolรณgico funciona como uma filtragem passa-alta no dominio do tempo, ressaltando bordas e removendo regiรตes homogรชneas. O operador de erosรฃo funciona diminuindo as regiรตes com tons brancos, enquanto ao mesmo tempo a dilataรงรฃo aumenta as regiรตes brancas, a subtraรงรฃo desses efeitos resultado nas bordas da imagem, como a seguir:
###Code
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pywt
###Output
_____no_output_____
###Markdown
- Abrir imagem:
###Code
img = np.array(cv2.imread('house.tif'))
plt.figure(1)
plt.imshow(img)
#plt.axis("off")
plt.title("Imagem original")
plt.show()
###Output
_____no_output_____
###Markdown
Gradiente Morfolรณgico: - Elementro Estruturante 01:
###Code
sizeEE = (5,5)
kernel = cv2.getStructuringElement(0, sizeEE, (-1,-1))
print(kernel)
img_gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
plt.figure(2,figsize=(12,12))
plt.subplot(131)
plt.imshow(img)
plt.axis("off")
plt.title("Imagem original")
plt.subplot(132)
plt.imshow(img_gradient)
plt.axis("off")
plt.title("Imagem apรณs gradiente morfolรณgico")
plt.subplot(133)
plt.imshow(kernel, 'gray', interpolation='nearest')
plt.axis("off")
plt.grid()
plt.title("El. Estruturante")
plt.show()
###Output
_____no_output_____
###Markdown
- Elementro Estruturante 02:
###Code
sizeEE = (5,5)
kernel = cv2.getStructuringElement(1, sizeEE, (-1,-1))
print(kernel)
img_gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
plt.figure(3,figsize=(10,9))
plt.subplot(131)
plt.imshow(img)
plt.axis("off")
plt.title("Imagem original")
plt.subplot(132)
plt.imshow(img_gradient)
plt.axis("off")
plt.title("Imagem apรณs gradiente morfolรณgico")
plt.subplot(133)
plt.imshow(kernel, 'gray', interpolation='nearest')
plt.axis("off")
plt.grid()
plt.title("El. Estruturante")
plt.show()
###Output
_____no_output_____
###Markdown
- Elementro Estruturante 03:
###Code
sizeEE = (5,5)
kernel = cv2.getStructuringElement(2, sizeEE, (-1,-1))
print(kernel)
img_gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
plt.figure(3+1, figsize=(12,12))
plt.subplot(131)
plt.imshow(img)
plt.axis("off")
plt.title("Imagem original")
plt.subplot(132)
plt.imshow(img_gradient)
plt.axis("off")
plt.title("Imagem apรณs gradiente morfolรณgico")
plt.subplot(133)
plt.imshow(kernel, 'gray', interpolation='nearest')
plt.axis("off")
plt.grid()
plt.title("El. Estruturante")
plt.show()
###Output
_____no_output_____
|
Model/ModelBuild.ipynb
|
###Markdown
Installing necessary packages to run Tensorflow-Object Detetion_API in colab **Note**: It's best if you use a gpu for this, go to edit->notebook settings and change hardware accelerator to GPU
###Code
!pip install tensorflow-gpu==1.15
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
###Output
_____no_output_____
###Markdown
Get the tensorflow object detection api files
###Code
!git clone https://github.com/tensorflow/models.git
###Output
_____no_output_____
###Markdown
Install the protobuf compilers so that we can train and convert our model
###Code
!apt-get install -qq protobuf-compiler python-tk
!pip install -q Cython contextlib2 pillow lxml matplotlib PyDrive
!pip install -q pycocotools
%cd /content/models/research
!protoc object_detection/protos/*.proto --python_out=.
import os
os.environ['PYTHONPATH'] += ':/content/models/research/:/content/models/research/slim/'
!python object_detection/builders/model_builder_test.py
###Output
_____no_output_____
###Markdown
Check how much execution time you have left, max is 12h
###Code
import time, psutil
Start = time.time()- psutil.boot_time()
Left= 12*3600 - Start
print('Time remaining for this session is: ', Left/3600)
!pwd
###Output
_____no_output_____
###Markdown
Model preperation Any model exported using the **export_inference_graph.py** tool can be loaded here simply by changing **PATH_TO_FROZEN_GRAPH** to point to a new .pb file.By default we use an "**SSD with Mobilenet**" model here. See the detection model zoo for a list of other models that can be run out-of-the-box with varying speeds and accuracies. + download model
###Code
cd /content//models/
!pwd
!curl -O http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_11_06_2017.tar.gz
###Output
_____no_output_____
###Markdown
+ load model from graph
###Code
!tar xvzf ssd_mobilenet_v1_coco_11_06_2017.tar.gz
%rm -rf checkpoints
%mkdir checkpoints
cp ssd_mobilenet_v1_coco_11_06_2017/model.ckpt.* checkpoints/
###Output
_____no_output_____
###Markdown
Download the dataset, you can put your own link or upload a zip file with your data, we have labeled our food data and have uploaded it to roboflow, an online system that takes your images and xml labels and builds a dataset, it is then able to convert it to TFrecord, the format we will use.If you already have the data in TFrecord formmat good, if you only have the bounding box files and images you can make your own scripts to convet them or use the same process as we did
###Code
!curl -L [YOUR DATASET LINK HERE] > roboflow.zip; unzip roboflow.zip; rm roboflow.zip
mkdir tf_record
!mkdir annotations
!mv train/Foods_label_map.pbtxt annotations/label_map.pbtxt
!mv train/Foods.tfrecord tf_record/train.record
!mv test/Foods.tfrecord tf_record/test.record
###Output
_____no_output_____
###Markdown
Modify config file under models/research/object_detection/samples/configsCheck the pbtxt file to get the number of classes
###Code
!cp research/object_detection/samples/configs/ssd_mobilenet_v2_coco.config .
###Output
_____no_output_____
###Markdown
Modify it* We have 90 classes so change num_classes to 90* Set fine_tune_checkpoint to checkpoints/model.ckpt* Change:train_input_reader: { tf_record_input_reader { input_path: "tf_record/train.record" } label_map_path: "annotations/label_map.pbtxt" } eval_input_reader: { tf_record_input_reader { input_path: "tf_record/test.record" } label_map_path: "annotations/label_map.pbtxt" shuffle: false num_readers: 1 }
###Code
!pwd
! rm -rf train
! rm -rf test
# Make directory for storing training progress
!mkdir train
# Make directory for storing validation results
!mkdir eval
###Output
_____no_output_____
###Markdown
Begin the training process in the config file the model will run for 200K steps however since it is probable you won't have enough time you can stop the model once you reach the loss val you want (loss vals of 1 are ideal).
###Code
#Begin training
!python research/object_detection/legacy/train.py --logtostderr --train_dir=train --pipeline_config_path=ssd_mobilenet_v1_pets.config
!pwd
!mkdir exported
!mkdir exported/complete
###Output
_____no_output_____
###Markdown
Export your model to a frozen graph, pay close attention to the trained_checkpoint_prefix. The model will make checkpoints and saved the most recent ones in the train file, depending on how many steps you make you need to specify the step for the checkpoint, in our case one of the checkpoints that we found acceptable was the model.ckpt-15225* the * means that in the folder train there were more files with that prefix, when we specify the ckp step here the script will take all those necessary files for that step and exprot your model in a frozen graph in protocol buffer format
###Code
!python research/object_detection/export_inference_graph.py \
--input_type=image_tensor \
--pipeline_config_path=ssd_mobilenet_v1_pets.config \
--trained_checkpoint_prefix=train/model.ckpt-15225 \
--output_directory=exported/complete
mkdir exported/tflite
###Output
_____no_output_____
###Markdown
After it has exported our model into a frozen graph it can make predictions on images, however since we want to integrate the model in our FoodieShoot application we need to convert it to a Tflite format, in these format the model will perform well without consuming too much of our mobile resources.Here we are taking that frozen graph and converting it to a tf lite frozen graph
###Code
!python research/object_detection/export_tflite_ssd_graph.py \
--pipeline_config_path=ssd_mobilenet_v1_pets.config \
--trained_checkpoint_prefix=train/model.ckpt-15225 \
--output_directory=exported/tflite \
--add_postprocessing_op=true
cp annotations/label_map.pbtxt exported
!pwd
###Output
_____no_output_____
###Markdown
After exporting the model to a tf lite frozen graph the TF object detection android api can't yet use the frozen graph and has to take that same graph and convert it to a model.tflite format, to do that we use toco, since we have processed our data and scaled it to 300x300 pxls the tflite must then use those formats. On the android api we will then scale down images to fit that size, this processe may make the prediction more difficult and slower but after testing we found 300 to be the best value for all types of mobile devices
###Code
!toco \
--graph_def_file="/content/models/exported/tflite/tflite_graph.pb" \
--output_file="/content/models/exported/tflite/detect.tflite" \
--input_shapes=1,300,300,3 \
--input_arrays=normalized_input_image_tensor \
--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \
--inference_type=FLOAT \
--allow_custom_ops
###Output
_____no_output_____
###Markdown
Zip and download the files, we are zipping the models folder as well for a future train if we ever want to use the checkpoints, but for android you only need the exported/lite and labels
###Code
%cd /content
!zip -r Models_models.zip models
!zip -r Models_exported.zip /content/models/exported
###Output
_____no_output_____
###Markdown
If you want you can test an image
###Code
%cd /content
!mkdir img
%cd img
from google.colab import files
from os import path
uploaded = files.upload()
for name, data in uploaded.items():
with open('image1.jpg', 'wb') as f:
f.write(data)
f.close()
print('saved file ' + name)
cd /content/models/research/object_detection
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
# This is needed to display the images.
%matplotlib inline
from utils import label_map_util
from utils import visualization_utils as vis_util
# What model to download.
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '/content/models/exported/complete/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('/content/models/annotations', 'label_map.pbtxt')
NUM_CLASSES = 90
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = '/content/img/'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 2) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
###Output
_____no_output_____
|
Improving Deep Neural networks- Hyperparameter Tuning - Regularization and Optimization/Gradient Checking.ipynb
|
###Markdown
Gradient CheckingWelcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking. You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker. But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".Let's do it!
###Code
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
###Output
_____no_output_____
###Markdown
1) How does gradient checking work?Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$. Let's look back at the definition of a derivative (or gradient):$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small."We know the following:- $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly. - You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct. Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct! 2) 1-dimensional gradient checkingConsider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input.You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct. **Figure 1** : **1D linear model** The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation"). **Exercise**: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
### START CODE HERE ### (approx. 1 line)
J = np.dot(theta, x)
### END CODE HERE ###
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
###Output
J = 8
###Markdown
**Expected Output**: ** J ** 8 **Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$.
###Code
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
###Output
dtheta = 2
###Markdown
**Expected Output**: ** dtheta ** 2 **Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking.**Instructions**:- First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow: 1. $\theta^{+} = \theta + \varepsilon$ 2. $\theta^{-} = \theta - \varepsilon$ 3. $J^{+} = J(\theta^{+})$ 4. $J^{-} = J(\theta^{-})$ 5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$- Then compute the gradient using backward propagation, and store the result in a variable "grad"- Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula:$$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$You will need 3 Steps to compute this formula: - 1'. compute the numerator using np.linalg.norm(...) - 2'. compute the denominator. You will need to call np.linalg.norm(...) twice. - 3'. divide them.- If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.
###Code
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon=1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = forward_propagation(x, thetaplus) # Step 3
J_minus = forward_propagation(x, thetaminus) # Step 4
gradapprox = (J_plus - J_minus) / (2 * epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = backward_propagation(x, theta)
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print("The gradient is correct!")
else:
print("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
###Output
The gradient is correct!
difference = 2.91933588329e-10
###Markdown
**Expected Output**:The gradient is correct! ** difference ** 2.9193358103083e-10 Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`. Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it! 3) N-dimensional gradient checking The following figure describes the forward and backward propagation of your fraud detection model. **Figure 2** : **deep neural network***LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*Let's look at your implementations for forward propagation and backward propagation.
###Code
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1. / m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
###Output
_____no_output_____
###Markdown
Now, run backward propagation.
###Code
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1. / m * np.dot(dZ3, A2.T)
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1. / m * np.dot(dZ2, A1.T) * 2 # Should not multiply by 2
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.dot(dZ1, X.T)
db1 = 4. / m * np.sum(dZ1, axis=1, keepdims=True) # Should not multiply by 4
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
###Output
_____no_output_____
###Markdown
You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct. **How does gradient checking work?**.As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still:$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary. **Figure 2** : **dictionary_to_vector() and vector_to_dictionary()** You will need these functions in gradient_check_n()We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that.**Exercise**: Implement gradient_check_n().**Instructions**: Here is pseudo-code that will help you implement the gradient check.For each i in num_parameters:- To compute `J_plus[i]`: 1. Set $\theta^{+}$ to `np.copy(parameters_values)` 2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$ 3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`. - To compute `J_minus[i]`: do the same thing with $\theta^{-}$- Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute: $$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$
###Code
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon=1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs two parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference > 1e-7:
print("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
###Output
[93mThere is a mistake in the backward propagation! difference = 0.285093156781[0m
|
1.2 R Matrix/de-DE/1.2.11 R - Create Matrix.ipynb
|
###Markdown
Tag 1. Kapitel 2. Matrix Lektion 11. Matrix erstellenWir haben bereits Vektoren behandelt, die es uns erlauben indexierte Elemente zu speichern. Eine Matrix erlaubt es uns darรผberhinaus zweidimensionale Datenststruckturen von Elementen des selben Datentyps zu erstellen.Bevor wir jetzt mit den Matrizen beginnen sollten wir uns noch einen kleinen Tipp zur Erstellung von sequenziellen Zahlenfolgen anschauen. Wir kรถnnen die *Doppelpunkt-Notation* verwenden, die wir vom Slicing kennen, um sequentielle Vektoren zu erstellen:
###Code
1:6
v <- 1:6
v
###Output
_____no_output_____
###Markdown
Um jetzt eine Matrix in R zu erstellen nutzen wir die `matrix()` Funktion. Wir kรถnnen einen Vektor รผbergeben:
###Code
matrix(v)
###Output
_____no_output_____
###Markdown
Wir haben eine zweidimensionale Matrix mit 6 Zeilen und einer Spalte erhalten. Um die Anzahl der Zeilen festzulegen, รผbergibt man das `nrow` Argument:
###Code
matrix(v,nrow=2)
matrix(v,byrow=FALSE,nrow=2)
matrix(v,byrow=TRUE,nrow=2)
###Output
_____no_output_____
###Markdown
Wir haben jetzt also eine 2 mal 3 Matrix. Dies konnten wir durch das `nrow` Argument erzeugen. Wie legen wir nun die Reihenfolge der Elemente fest? Wir kรถnnten zuerst die Spalten fรผllen (wie es im Beispiel gerade eben passiert ist) oder zuerst die Zeilen. Das `byrow` Argument ermรถgliocht es uns, dies festzulegen. Hier zwei Beispiele:
###Code
matrix(1:16,byrow = FALSE,nrow=4)
matrix(1:16, byrow=TRUE, nrow=4)
###Output
_____no_output_____
###Markdown
Matrizen aus Vektoren erstellenWir kรถnnen mehrere Vektoren kombinieren, um sie in eine Matrix einzuspielen. Stell dir beispielhaft die beiden folgenden Vektoren von Aktienpreisen vor:
###Code
# Historische Aktien Schluss-Preise KW 49 / 2019
# Google
GOOG <- c(1289.92,1295.28,1320.54,1328.13,1340.61)
GOOG
# Tesla
TSLA <- c(334.86,336.20,33.02,330.36,335.89)
TSLA
aktien <- c(GOOG,TSLA)
aktien
aktien.matrix <- matrix(aktien,byrow=TRUE,nrow=2)
aktien.matrix
tage <- c('Mo','Di','Mi','Do','Fr')
aktien.namen <- c('GOOG','TSLA')
# colnames(aktien.matrix) <- tage
# rownames(aktien.matrix) <- aktien.namen
colnames(aktien.matrix) <- c('Mo','Di','Mi','Do','Fr')
rownames(aktien.matrix) <- c('GOOG','TSLA')
###Output
_____no_output_____
###Markdown
Matrizen benennenJetzt wo wir unsere Matrix erstelt haben ist es naheliegend die Zeilen und Spalten fรผr einen besseren Zugriff zu benennen. Das funktioniert prinzipiell gleich wie die `names()` Funktion fรผr Vektoren: Wir definieren `colnames()` (fรผr die Spalten) und `rownames()` (fรผr die Zeilen). Bennenen wir nun unsere Aktien-Matrix:
###Code
aktien.matrix
###Output
_____no_output_____
|
Notebooks/CatsVSDogs/CatsVSDogs.ipynb
|
###Markdown
High Dimensional & Deep Learning : Image classification on CatsVSDogs dataset. SummaryThis tutorial is highly inspired by the [blog](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html) from Franรงois Chollet at the initiative of [Keras](https://keras.io/). Objectives* Use convolutional networks to build image classifiers on colour images* Use pre-trained models (VGG/Inception to improve the accuracy of the results)* Fine-Tuned pre-trained models Libraries
###Code
# Utils
import sys
import os
import shutil
import time
import pickle
import numpy as np
# Deep Learning Librairies
import tensorflow as tf
import tensorflow.keras.preprocessing.image as kpi
import tensorflow.keras.layers as kl
import tensorflow.keras.optimizers as ko
import tensorflow.keras.backend as k
import tensorflow.keras.models as km
import tensorflow.keras.applications as ka
# Data visualization
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
These code lines allow you to check if your computer is using CPU or GPU ressources. **Warning** : You won't be able to use GPU if another notebook is open and still uses GPU.
###Code
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
MODE = "GPU" if "GPU" in [k.device_type for k in device_lib.list_local_devices()] else "CPU"
print(MODE)
###Output
_____no_output_____
###Markdown
DatasetThe dataset used in this TP is the `CatsVSDogs` dataset used in a [Kaggle Contest](https://www.kaggle.com/c/dogs-vs-cats) which contains 25.000 images. It is a huge number when you do not have a lot of computation power. As our goal here is to understand behaviour of algorithms and not to achieve the best performances we have created two different subsamples of this dataset which are available in the *data* directory.* First subsample : 100 cats images and 100 dogs images for training. 40 cats images and 40 dogs images for validation.* Second subsample : 1000 cats images and 1000 dogs images for training. 400 cats images and 400 dogs images for validation. Dataset organisation To use some of the image generators of keras, that we will use later, we have to organise the dataset so that each data of a same class are within the same folder. Our data are then organized this way :```data_dirโโโโsubsample/โ โโโโtrain/โ โ โโโโcats/โ โ โ โ cat.0.jpgโ โ โ โ cat.1.jpgโ โ โ โ ...โ โ โโโโdogs/โ โ โ โ dog.0.jpgโ โ โ โ dog.1.jpgโ โ โ โ ...โ โโโโvalidation/โ โ โโโโcats/โ โ โ โ cat.1000.jpgโ โ โ โ cat.1000.jpgโ โ โ โ ...โ โ โโโโdogs/โ โ โ โ dog.1000.jpgโ โ โ โ dog.1000.jpgโ โ โ โ ...``` Parameter
###Code
data_dir = 'data/' # data path
# subsample directory path
N_train = 200 #2000
N_val = 80 #800
data_dir_sub = data_dir+'subsample_%d_Ntrain_%d_Nval' %(N_train, N_val)
###Output
_____no_output_____
###Markdown
Illustration of the dataThe `load_img` function allows to load an image as a PIL image.
###Code
img = kpi.load_img(data_dir_sub+'/train/cats/cat.1.jpg') # this is a PIL image
img
###Output
_____no_output_____
###Markdown
The function `img_to_array` generates an `array numpy` from a PIL image.
###Code
x = kpi.img_to_array(img)
plt.imshow(x/255, interpolation='nearest')
plt.show()
print(x.shape)
###Output
_____no_output_____
###Markdown
**Q** What are the dimensions of the x array? To what correspond these dimensions? Pre-processingThe `ImageDataGenerator` `keras`function allows to apply different treatments on the images (transformation, normalisation). This transformation allows to produce tranformation on the images without saving a lot of changed images on the disk. The transformations make the classifier more robust.All the possible transformations are listed in the documentation of the function.
###Code
kpi.ImageDataGenerator?
datagen = kpi.ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
###Output
_____no_output_____
###Markdown
In oder to visualize the transformed images, we will use the`.flow()` command that generates transformed images from an original image and saves them in the specified directory.In the following code we produce 8 of these transformed images.
###Code
img = kpi.load_img(data_dir_sub+"/train/cats/cat.1.jpg") # this is a PIL image
x = kpi.img_to_array(img)
x_ = np.expand_dims(x, axis=0)
if not(os.path.isdir(data_dir_sub+"/preprocessing_example")):
os.mkdir(data_dir_sub+"/preprocessing_example")
i = 0
for batch in datagen.flow(x_, batch_size=1,save_to_dir=data_dir_sub+"/preprocessing_example", save_prefix='cat', save_format='jpeg'):
i += 1
if i > 7:
break
###Output
_____no_output_____
###Markdown
Display transformed images
###Code
X_list=[]
for f in os.listdir(data_dir_sub+"/preprocessing_example"):
X_list.append(kpi.img_to_array(kpi.load_img(data_dir_sub+"/preprocessing_example/"+f)))
fig=plt.figure(figsize=(16,8))
fig.patch.set_alpha(0)
ax = fig.add_subplot(3,3,1)
ax.imshow(x/255, interpolation="nearest")
ax.set_title("Image original")
for i,xt in enumerate(X_list):
ax = fig.add_subplot(3,3,i+2)
ax.imshow(xt/255, interpolation="nearest")
ax.set_title("Random transformation %d" %(i+1))
plt.tight_layout()
plt.savefig("cats_transformation.png", dpi=100, bbox_to_anchor="tight", facecolor=fig.get_facecolor())
plt.show()
###Output
_____no_output_____
###Markdown
Image classification from scratch with a convolutional networkWe will here build a classifier with a custom architecture of a convolutional network.We first define epochs and batch_size parameters.* `epochs`: we start with a small number (5-10) in order to check that computing time is reasonable.* `batch_size`:When using keras Generator, size of the batch should be a divider of the size of the sample, otherwise algorithms produce very unstable results.
###Code
epochs = 10
batch_size=20
###Output
_____no_output_____
###Markdown
Data GenerationWe defined two `ImageDataGenerator` objects :* `train_datagen`: for learning, where different transformations are applied as above, in order to pass various examples to the model.* `valid_datagen`: for validation, where only rescaling is applied.**Question** Why do we apply different transformations for learning and validation? Images have various dimensions :
###Code
x_0 = kpi.img_to_array(kpi.load_img(data_dir_sub+"/train/cats/cat.0.jpg"))
x_1 = kpi.img_to_array(kpi.load_img(data_dir_sub+"/train/cats/cat.1.jpg"))
x_0.shape, x_1.shape
###Output
_____no_output_____
###Markdown
which is annoying because all images must have the same dimension to be used in this network. The `flow_from_directory` method allows to specify an output dimension in which all transformed images will be produced.
###Code
img_width = 150
img_height = 150
# this is the augmentation configuration we will use for training
train_datagen = kpi.ImageDataGenerator(
rescale=1./255,
rotation_range=40)
# this is the augmentation configuration we will use for testing:
# only rescaling
valid_datagen = kpi.ImageDataGenerator(rescale=1./255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
data_dir_sub+"/train/", # this is the target directory
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary') # since we use binary_crossentropy loss, we need binary labels
# this is a similar generator, for validation data
validation_generator = valid_datagen.flow_from_directory(
data_dir_sub+"/validation/",
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
###Output
_____no_output_____
###Markdown
Model architectureThe model we define is composed of 3 convolution blocks with the following form : * A Conv2D layer with 32-3X3 filters and a `Relu` activation function.* A MaxPooling layer with 2X2 window.Followed by * A flatten layer.* A Dense layer with 64 neurons and a Relu activation function.* A Dropout layer with a 50% drop rate.* A Dense layer with 1 neuron and a softmax activation function.
###Code
model_conv = km.Sequential()
model_conv.add(kl.Conv2D(32, (3, 3), input_shape=(img_width, img_height, 3), data_format="channels_last"))
model_conv.add(kl.Activation('relu'))
model_conv.add(kl.MaxPooling2D(pool_size=(2, 2)))
model_conv.add(kl.Conv2D(32, (3, 3)))
model_conv.add(kl.Activation('relu'))
model_conv.add(kl.MaxPooling2D(pool_size=(2, 2)))
model_conv.add(kl.Conv2D(64, (3, 3)))
model_conv.add(kl.Activation('relu'))
model_conv.add(kl.MaxPooling2D(pool_size=(2, 2)))
model_conv.add(kl.Flatten()) # this converts our 3D feature maps to 1D feature vectors
model_conv.add(kl.Dense(64))
model_conv.add(kl.Activation('relu'))
model_conv.add(kl.Dropout(0.5))
model_conv.add(kl.Dense(1))
model_conv.add(kl.Activation('sigmoid'))
model_conv.summary()
###Output
_____no_output_____
###Markdown
As our problem here is a two classes classifier we will use the `binary_crossentropy` loss function.
###Code
model_conv.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Training The training can then be done by using the `fit_generator` function instead of the `fit` function used in the MNIST notebook. This function can be used by passing generator object instead of the data to the function
###Code
ts = time.time()
model_conv.fit_generator(train_generator, steps_per_epoch=N_train // batch_size, epochs=epochs,
validation_data=validation_generator,validation_steps=N_val // batch_size)
te = time.time()
t_learning_conv_simple_model = te-ts
print("Learning Time for %d epochs : %d seconds"%(epochs,t_learning_conv_simple_model))
###Output
_____no_output_____
###Markdown
Prediction
###Code
ts = time.time()
score_conv_val = model_conv.evaluate_generator(validation_generator, N_val /batch_size, verbose=1)
score_conv_train = model_conv.evaluate_generator(train_generator, N_train / batch_size, verbose=1)
te = time.time()
t_prediction_conv_simple_model = te-ts
print('Train accuracy:', score_conv_train[1])
print('Validation accuracy:', score_conv_val[1])
print("Time Prediction: %.2f seconds" %t_prediction_conv_simple_model )
###Output
_____no_output_____
###Markdown
**Q** Compare the accuracy and loss values for training and validation to the ones observed in the last epochs of training. What do you observe? Is this normal ? **Q** What can you say about the performance of this model?**Exercice** Add more transformation to the learning generator. Does this help? Pre-trained NetworkWe have seen above that the complexity of the data makes it difficult to build quickly an efficient classifier from scratch even with an elaborate method as a convolutional network.We will now see that this problem can easily be tackled by using **pre-trained models**. These models are models that are very complex (see image below). They have been trained on a very huge amount of image data in order to classify them. The figure below represents a *VGG 16*. This model is composed of *5 convolutional blocks* which allows to build features on the images. The last block is a *fully convolutional block*. This last block can be seen as a simple *MLP model* which is used on the features build by the convolutional block.How this model, designed to solve a different problem that our problem can be helpfull?Here is our two-stage strategy : 1. We will send our data through the 5 convolutional blocks in order to build features. These blocks have been trained on a huge amount of data and can then build intelligent features.2. We will build our own MLP classifier designed to solve our CatsVsDogs problem, and we will train it on the features built on the first step. Network illustration Step 1 : Build features Download the weights of the 5 blocks convolutional layer.We will now download the weights of a VGG16 model that has been learned on the [image-net](http://www.image-net.org) dataset. The image-net is composed of millions of images for 1000 categories.If it's the first time you use these weights, you will have to download it (it will start automatically) and they will be save in your home `"~/.keras/models"`The *include_top* argument of the `VGG16` application allows to precise if we want to use or not the last block (fully-connected later)
###Code
model_VGG16_without_top = ka.VGG16(include_top=False, weights='imagenet')
model_VGG16_without_top.summary()
###Output
_____no_output_____
###Markdown
Building featuresWe will now send our data to the loaded model in order to build our features.
###Code
datagen = kpi.ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(
data_dir_sub+"/train",
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None, # this means our generator will only yield batches of data, no labels
shuffle=False)
features_train = model_VGG16_without_top.predict_generator(generator, N_train / batch_size, verbose = 1)
generator = datagen.flow_from_directory(
data_dir_sub+"/validation",
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
features_validation = model_VGG16_without_top.predict_generator(generator, N_val / batch_size, verbose = 1)
###Output
_____no_output_____
###Markdown
Step 2 : Building our classifier on top of featuresWe will now build a simple classifier in order to use the previously build features to classify our data. Training**Exercise** Write this classifier
###Code
# %load solutions/classifier_pretrained_model.py
model_VGG_fcm = km.Sequential()
model_VGG_fcm.add(kl.Flatten(input_shape=features_train.shape[1:]))
model_VGG_fcm.add(kl.Dense(64, activation='relu'))
model_VGG_fcm.add(kl.Dropout(0.5))
model_VGG_fcm.add(kl.Dense(1, activation='sigmoid'))
model_VGG_fcm.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model_VGG_fcm.summary()
train_labels = np.array([0] * int((N_train/2)) + [1] * int((N_train/2)))
validation_labels = np.array([0] * int((N_val/2)) + [1] * int((N_val/2)))
model_VGG_fcm.fit(features_train, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(features_validation, validation_labels))
t_learning_VGG_fcm = te-ts
###Output
_____no_output_____
###Markdown
We now save the weights of this classifier to be used later. Prediction
###Code
ts = time.time()
score_VGG_fcm_val = model_VGG_fcm.evaluate(features_validation, validation_labels)
score_VGG_fcm_train = model_VGG_fcm.evaluate(features_train, train_labels)
te = time.time()
t_prediction_VGG_fcm = te-ts
print('Train accuracy:', score_VGG_fcm_train[1])
print('Validation accuracy:', score_VGG_fcm_val[1])
print("Time Prediction: %.2f seconds" %t_prediction_VGG_fcm)
###Output
_____no_output_____
###Markdown
**Q** Comment the performance of this new model
###Code
model_VGG_fcm.save_weights(data_dir_sub+'/weights_model_VGG_fully_connected_model_%d_epochs_%d_batch_size.h5' %(epochs, batch_size))
###Output
_____no_output_____
###Markdown
Fine TuningWe have notably increased the performances of our model with a model that is really quick. We can continue to try to improve our results by modifying the small MLP classifier network we build. But to really improve our model, it would be nice to also change the weights of the previous layers in order to make them fit our problem.This is possible and it's called FineTuning.In this part we will then build a Model which is composed of the 5 convolutional block of the VGG model (with its weights learned on Image Net) and the classifier block we built (with the weights that we have learned previously). Model creation.We first download the model as done previously.However, the model will be trained on our images, we then have to specify the input_shape of our data.
###Code
# build the VGG16 network
model_VGG16_without_top = ka.VGG16(include_top=False, weights='imagenet', input_shape=(img_height, img_width, 3))
print('Model loaded.')
###Output
_____no_output_____
###Markdown
We then build a classfier model like the one we built above and we load the learned weights.
###Code
# build a classifier model to put on top of the convolutional model
top_model = km.Sequential()
top_model.add(kl.Flatten(input_shape=model_VGG16_without_top.output_shape[1:]))
top_model.add(kl.Dense(64, activation='relu'))
top_model.add(kl.Dropout(0.5))
top_model.add(kl.Dense(1, activation='sigmoid'))
top_model.load_weights(data_dir_sub+'/weights_model_VGG_fully_connected_model_%d_epochs_%d_batch_size.h5' %(epochs, batch_size))
###Output
_____no_output_____
###Markdown
Finally we assemble these two models
###Code
# add the model on top of the convolutional base
model_VGG_LastConv_fcm = km.Model(inputs=model_VGG16_without_top.input, outputs=top_model(model_VGG16_without_top.output))
model_VGG_LastConv_fcm.summary()
###Output
_____no_output_____
###Markdown
Freezed blockOur model is ready to be fine-tuned! However, as seen above it contains a huge number of parameters that our computer may not handle.We will start by fine-tune only the last block of convolution of our classifier. This is possible by updating the trainable arguments of the layers that we don't want to be updated.
###Code
for layer in model_VGG_LastConv_fcm.layers[:15]:
layer.trainable = False
###Output
_____no_output_____
###Markdown
Generate Data
###Code
# prepare data augmentation configuration
train_datagen = kpi.ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = kpi.ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
data_dir_sub+"/train/",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
data_dir_sub+"/validation/",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
###Output
_____no_output_____
###Markdown
Training
###Code
model_VGG_LastConv_fcm.compile(loss='binary_crossentropy',
optimizer=ko.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# fine-tune the model
ts = time.time()
model_VGG_LastConv_fcm.fit_generator(
train_generator,
steps_per_epoch=N_train // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=N_val // batch_size)
te = time.time()
t_learning_VGG_LastConv_fcm = te-ts
###Output
_____no_output_____
###Markdown
Prediction
###Code
ts = time.time()
score_VGG_LastConv_fcm_val = model_VGG_LastConv_fcm.evaluate_generator(validation_generator, N_val // batch_size, verbose=1)
score_VGG_LastConv_fcm_train = model_VGG_LastConv_fcm.evaluate_generator(train_generator, N_train // batch_size, verbose=1)
te = time.time()
t_prediction_VGG_LastConv_fcm = te-ts
print('Train accuracy:', score_VGG_LastConv_fcm_val[1])
print('Validation accuracy:', score_VGG_LastConv_fcm_train[1])
print("Time Prediction: %.2f seconds" %t_prediction_VGG_LastConv_fcm)
###Output
_____no_output_____
###Markdown
Prediction on Kaggle Dataset Let's see now how our trained model performs on the kaggle real test dataset (data/test)**Exercise** Apply the model to this dataset and display results on a sample to check it performs well
###Code
# %load solutions/test_kaggle.py
###Output
_____no_output_____
|
SEEDS_2021.ipynb
|
###Markdown
ใฉใคใใฉใชใผใฎ่ชญใฟ่พผใฟๆฉๆขฐๅญฆ็ฟใใฐใฉใๆ็ปใฉใคใใฉใชใผใชใฉใๅฟ
่ฆใชใฉใคใใฉใชใผใ่ชญใฟ่พผใฟใพใใ
###Code
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
###Output
_____no_output_____
###Markdown
ๅฎๆฐๅคใฎ่จญๅฎๅพ็ถใฎใใญใฐใฉใ ใงๅไฝใๅคๅใใใๅฏ่ฝใฎใใๅคใซใคใใฆใฏใไปฅไธใฎๅคๆฐใงๅฎ็พฉใใฆใใพใใๅฟ
่ฆใซๅฟใใฆใไฟฎๆญฃใใฆใใ ใใใ
###Code
epochs = 10
batch_size = 1024
num_neuron = 1024
num_layer = 5
activation = "relu"
###Output
_____no_output_____
###Markdown
ใใผใฟใใกใคใซใฎใใฆใณใญใผใ
###Code
!if [[ ! -e 'FIB1.csv' ]]; then wget 'https://www-hasegawa.ist.osaka-u.ac.jp/~ykoizumi/FIB1.csv'; fi
###Output
_____no_output_____
###Markdown
้ขๆฐ็พคใฎๅฎ็พฉ
###Code
def normalize(x, axis=None):
xmean = x.mean(axis=axis, keepdims=True)
xstd = np.std(x, axis=axis, keepdims=True)
zscore = (x - xmean) / xstd
return xmean, xstd, zscore
def denormalize(xmean, xstd, zscore):
x = zscore * xstd + xmean
return x
# ๅญฆ็ฟ
def train_model(x, y, model, epochs, batch_size):
# early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# history = model.fit(x, y, epochs=epochs, batch_size=batch_size, validation_split = 0.1, verbose=1, callbacks=[early_stop])
# history = model.fit(x, y, epochs=epochs, batch_size=batch_size, validation_split = 0.1, verbose=1)
history = model.fit(x, y, epochs=epochs, batch_size=batch_size, verbose=1)
return model, history
# ใขใใซไฝๆ
def build_model(num_neuron, num_layer, activation = "relu"):
model = keras.Sequential()
model.add(layers.Dense(num_neuron, input_shape=(1,), activation=activation))
for i in range(num_layer - 1):
model.add(layers.Dense(num_neuron, activation=activation))
model.add(layers.Dense(1))
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss="mse", optimizer=optimizer, metrics=["mae", "mse"])
return model
def load_data(file_name):
data = pd.read_csv(file_name).sort_values("IP")
data = data[~data.duplicated(subset="IP")]
data = data.reset_index()
data["position"] = data.index.values
return np.array(data["IP"]), np.array(data["position"])
def plot_history(history):
figure_width = 10
figure_height = figure_width / 1.6180
marker_size = 10
line_width = 2
plt.rcParams["font.size"] = 16
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["axes.linewidth"] = 1.0
hist = pd.DataFrame(history.history)
hist["epoch"] = history.epoch
fig = plt.figure(figsize=(figure_width, figure_height))
ax = fig.add_subplot(1, 1, 1)
ax.plot(hist["epoch"], hist["mse"], label="Train Error")
# ax.plot(hist["epoch"], hist["val_mse"], label="Val Error")
ax.set_xlabel("Epoch")
ax.set_ylabel("Mean Square Error [$y^2$]")
ax.set_xlim(0,)
ax.set_ylim(0,)
# plt.legend(
# bbox_to_anchor=(0, 1),
# loc="upper left",
# frameon=False,
# borderaxespad=0,
# labelspacing=0.1,
# )
return
def plot_result(x_orig, y_orig, y_pred):
figure_width = 10
figure_height = figure_width / 1.6180
marker_size = 10
line_width = 2
plt.rcParams["font.size"] = 16
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["axes.linewidth"] = 1.0
fig = plt.figure(figsize=(figure_width, figure_height))
ax = fig.add_subplot(1, 1, 1)
ax.plot(x_orig, y_orig, "b-", label="Original position")
ax.plot(x_orig, y_pred, "r-", label="Learned index")
ax.set_xlim(0,)
ax.set_ylim(0,)
ax.set_ylabel("Position")
ax.set_xlabel("IP Prefix/IP Address")
plt.legend(
bbox_to_anchor=(0, 1),
loc="upper left",
frameon=False,
borderaxespad=0,
labelspacing=0.1,
)
return
def plot_subfigure(x_orig, y_orig, y_pred, div=(2, 3)):
figure_width = 20
figure_height = figure_width / 1.6180
marker_size = 10
line_width = 2
plt.rcParams["font.size"] = 12
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["axes.linewidth"] = 1.0
fig = plt.figure(figsize=(figure_width, figure_height))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
num_figure = div[0] * div[1]
x_min = np.min(x_orig)
x_max = np.max(x_orig)
x_diff = x_max - x_min
x_delta = x_diff / num_figure
for i in range(num_figure):
left = (x_delta * i) + x_min
right = (x_delta * (i + 1)) + x_min
ind = (left <= x_orig) & (x_orig <= right)
x1 = x_orig[ind]
y1 = y_orig[ind]
y2 = y_pred[ind]
ax = fig.add_subplot(div[0], div[1], i + 1)
ax.plot(x1, y1, "b-", label="Original position")
ax.plot(x1, y2, "r-", label="Learned index")
# ax.set_xlim(0,)
# ax.set_ylim(0,)
ax.set_ylabel("Position")
ax.set_xlabel("IP Prefix/IP Address")
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.ticklabel_format(style="sci", axis="both", scilimits=(0,0))
ax.legend(
loc="upper left",
frameon=False,
borderaxespad=0,
labelspacing=0.1,
)
return
###Output
_____no_output_____
###Markdown
ใใผใฟใฎ่ชญใฟ่พผใฟใจๆญฃ่ฆๅๆๅฎใใใใกใคใซใใใใใผใฟใ่ชญใฟ่พผใใงๅฟ
่ฆใชๅคใๆฝๅบใใพใใใใใซใๆฉๆขฐๅญฆ็ฟใงๅฆ็ใใใใใใใซใๅคใๆญฃ่ฆๅใใพใใ
###Code
# Load data
x_np, y_np = load_data("FIB1.csv")
# Normalization
xmean, xstd, norm_x = normalize(x_np)
ymean, ystd, norm_y = normalize(y_np)
###Output
_____no_output_____
###Markdown
ๆฉๆขฐๅญฆ็ฟใขใใซใฎๆง็ฏใปๅญฆ็ฟใปๆจ่ซๆฉๆขฐๅญฆ็ฟใฎใขใใซใๆง็ฏใๆบๅใใใใผใฟใงๅญฆ็ฟใๅญฆ็ฟใใใขใใซใงไบๆธฌๅคใๅฐๅบใใพใ
###Code
# Build model
index = build_model(num_neuron=num_neuron, num_layer=num_layer, activation=activation)
# Train model
index, history = train_model(x=norm_x, y=norm_y, model=index, epochs=epochs, batch_size=batch_size)
# Save weights and biases
index.save("weight-single-learned-index.h5")
# Load weights and biases
# index.load_weights("weight-single-learned-index.h5")
# Prediction result
y_pred = denormalize(ymean, ystd, index.predict(norm_x))
###Output
_____no_output_____
###Markdown
็ตๆใฎๆ็ป ๅญฆ็ฟ้็จใฎ่กจ็คบๅญฆ็ฟใฎ้็จใงใ่ชคๅทฎ๏ผๅนณๅไบไน่ชคๅทฎ๏ผใใฉใฎใใใซๅคๅใใใใ่กจ็คบ
###Code
plot_history(history)
###Output
_____no_output_____
###Markdown
ๅๅธฐๅ
จไฝใฎ่กจ็คบ
###Code
plot_result(x_np, y_np, y_pred)
###Output
_____no_output_____
###Markdown
ๅๅธฐใฎ่ฉณ็ดฐ่กจ็คบ
###Code
plot_subfigure(x_np, y_np, y_pred, div=(4, 4))
###Output
_____no_output_____
###Markdown
่ชๅใฎ็ ็ฉถ็ตๆใ่กจ็คบ
###Code
def plot_user_graph(x, y, xlabel="", ylabel=""):
figure_width = 10
figure_height = figure_width / 1.6180
marker_size = 12
line_width = 2
plt.rcParams["font.size"] = 16
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["axes.linewidth"] = 1.0
fig = plt.figure(figsize=(figure_width, figure_height))
ax = fig.add_subplot(1, 1, 1)
ax.plot(x, y, "ro-", linewidth=line_width, markerfacecolor='w', markersize=marker_size, markeredgewidth=line_width)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
return
def plot_user_scatter(x, y, xlabel="", ylabel=""):
figure_width = 10
figure_height = figure_width / 1.6180
marker_size = 12
line_width = 2
plt.rcParams["font.size"] = 16
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["axes.linewidth"] = 1.0
fig = plt.figure(figsize=(figure_width, figure_height))
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x, y, s=120, c="w", edgecolors="r", marker="o", linewidths=line_width)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
return
def plot_user_bar(x, y, xlabel="", ylabel=""):
figure_width = 10
figure_height = figure_width / 1.6180
marker_size = 12
line_width = 2
plt.rcParams["font.size"] = 16
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["axes.linewidth"] = 1.0
fig = plt.figure(figsize=(figure_width, figure_height))
ax = fig.add_subplot(1, 1, 1)
ax.bar(x, y, width=0.8, color="w", edgecolor="r", linewidth=line_width)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
return
x = [0, 1, 2, 3, 4, 5] # input here
y = [0, 1, 4, 9, 7, 4] # input here
plot_user_graph(x=x, y=y, xlabel="Parameter", ylabel="Value") # ๆใ็ทใฐใฉใ
plot_user_scatter(x=x, y=y, xlabel="Parameter", ylabel="Value") # ๆฃๅธๅณ
plot_user_bar(x=x, y=y, xlabel="Parameter", ylabel="Value") # ๆฃใฐใฉใ
###Output
_____no_output_____
|
dense_correspondence/training/training_tutorial.ipynb
|
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_upright.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
Using SpartanDataset:
- in train mode
- number of scenes 11
- total images: 2851
###Markdown
Train the networkThis should take about ~12-15 minutes with a GTX 1080 Ti
###Code
import torch
class Test (torch.nn.Module):
def __init__(self):
super(Test, self).__init__()
self.layer = torch.nn.Linear(1, 10)
def forward (self, x):
return self.layer(x)
t = Test()
t.to(torch.device('cuda'))
t(torch.rand([20,1]).to(torch.device('cuda')))
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
###Output
training descriptor of dimension 3
using SINGLE_OBJECT_WITHIN_SCENE
logging_dir: /home/michelism/data/pdc/trained_models/tutorials/caterpillar_3
###Markdown
Evaluate the network quantitativelyThis should take ~5 minutes.
###Code
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
_____no_output_____
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_upright.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
Using SpartanDataset:
- in train mode
- number of scenes 11
- total images: 2851
###Markdown
Train the networkThis should take about ~12-15 minutes with a GTX 1080 Ti
###Code
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
###Output
training descriptor of dimension 3
using SINGLE_OBJECT_WITHIN_SCENE
logging_dir: /home/yili/data/pdc/trained_models/tutorials/caterpillar_3
###Markdown
Evaluate the network quantitativelyThis should take ~5 minutes.
###Code
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
_____no_output_____
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_only_9.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "code/data_volume/pdc/trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
_____no_output_____
###Markdown
Train the networkThis should take about ~12-15 minutes with a GTX 1080 Ti
###Code
# All of the saved data for this network will be located in the
# code/data_volume/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
###Output
_____no_output_____
###Markdown
Evaluate the network quantitativelyThis should take ~5 minutes.
###Code
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
_____no_output_____
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_upright.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
_____no_output_____
###Markdown
Train the networkThis should take about ~12-15 minutes with a GTX 1080 Ti
###Code
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
###Output
_____no_output_____
###Markdown
Evaluate the network quantitativelyThis should take ~5 minutes.
###Code
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
_____no_output_____
###Markdown
See `evaluation_quantitative_tutorial.ipynb` for a better place to display the plots.
###Code
print(model_folder)
###Output
_____no_output_____
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'rope_601_planar_sequence_only.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
print dataset.get_image_mean(), dataset.get_image_std_dev()
logging_dir = "code/data_volume/pdc_synthetic_2/trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
normalize = True
name = "rope_601_planar_sequence_3"
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["dense_correspondence_network"]["normalize"] = normalize
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
Using SpartanDataset:
- in train mode
- number of scenes 1
- total images: 3500
[0.5573105812072754, 0.37420374155044556, 0.37020164728164673] [0.24336038529872894, 0.2987397611141205, 0.31875079870224]
###Markdown
TRAIN
###Code
# All of the saved data for this network will be located in the
# code/data_volume/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
###Output
INFO:root:Loading knots info for scene rope_1465_contorted_perlin
###Markdown
EVAL
###Code
logging_dir = "code/data_volume/pdc_synthetic_2/trained_models/tutorials"
# names = ["rope_1254_contorted_task_depth_3", "rope_784_contorted_task_depth_3", "rope_523_contorted_task_depth_3", ]
names = ["rope_523_task_loops_quartersize_3", "rope_523_task_loops_halfsize_3"]
#if True: #EVALUATE
for name in names:
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_to_absolute_path(model_folder)
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
got /home/priya/code/data_volume/pdc_synthetic_2/trained_models/tutorials/rope_523_task_loops_quartersize_3
got output_dir
got train_output_dir /home/priya/code/data_volume/pdc_synthetic_2/trained_models/tutorials/rope_523_task_loops_quartersize_3/analysis/train
got test_output_dir /home/priya/code/data_volume/pdc_synthetic_2/trained_models/tutorials/rope_523_task_loops_quartersize_3/analysis/test
got cross_scene_output_dir /home/priya/code/data_volume/pdc_synthetic_2/trained_models/tutorials/rope_523_task_loops_quartersize_3/analysis/cross_scene
making necessary dirs
model_param_file 003501.pth
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'keypoint_rope_downsample.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training_one_dp.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
# logging_dir = "trained_models/keypoint"
# num_iterations = 3500
name = "rope_"+time.strftime("%Y_%m_%d_%H_%M_%S")
# d = 3
train_config["training"]["logging_dir_name"] = name
# train_config["training"]["logging_dir"] = logging_dir
# train_config["dense_correspondence_network"]["descriptor_dimension"] = d
# train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
Using SpartanDataset:
- in train mode
- number of scenes 10
- total images: 553
###Markdown
Train the networkThis should take about ~12-15 minutes with a GTX 1080 Ti
###Code
# testing installation
import torch
print(torch.__version__)
print(torch.cuda.is_available())
a = torch.ones(3,3).to('cuda')
b = torch.ones(3,3).to('cuda')
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
# print("training descriptor of dimension %d" %(d))
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
# print("finished training descriptor of dimension %d" %(d))
###Output
INFO:root:Loading pose data for scene rope_2021_11_03_16_24_22_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_28_38_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_33_57_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_37_34_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_40_03_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_42_33_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_46_06_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_48_04_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_50_50_downsample
INFO:root:Loading pose data for scene rope_2021_11_03_16_53_07_downsample
INFO:root:setting up tensorboard_logger
INFO:root:tensorboard logger started
###Markdown
Evaluate the network quantitativelyThis should take ~5 minutes.
###Code
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
_____no_output_____
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_upright.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
_____no_output_____
###Markdown
Train the networkThis should take about ~12-15 minutes with a GTX 1080 Ti
###Code
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
###Output
_____no_output_____
###Markdown
Evaluate the network quantitativelyThis should take ~5 minutes.
###Code
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
_____no_output_____
###Markdown
Load the configuration for training
###Code
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_upright.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
TRAIN = True
EVALUATE = True
###Output
_____no_output_____
###Markdown
Train the networkThis should take about ~12-15 minutes with a GTX 1080 Ti
###Code
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
print "finished training descriptor of dimension %d" %(d)
###Output
_____no_output_____
###Markdown
Evaluate the network quantitativelyThis should take ~5 minutes.
###Code
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
###Output
_____no_output_____
|
Home update/notebook/Nuove Case.ipynb
|
###Markdown
Idealista
###Code
Latex(r"\href{" + url_idealista + r"}{Link Nuove case}")
###Output
_____no_output_____
###Markdown
Immobiliare
###Code
for i, f in enumerate(files, start = 1):
section = f'Casa {i}'
url = mapping_url[f]
display(Latex(r"\subsection{" + section + r"}"))
display(Latex(r"\href{" + url + r"}{Link}"))
image = PIL.Image.open(f)
display(image)
if i < len(files):
display(Latex(r"\newpage"))
###Output
_____no_output_____
|
Companies_Funding.ipynb
|
###Markdown
Load Data to Postgres SQL
###Code
!pip install psycopg2
!pip install ipython-sql
%load_ext sql
%sql $conn_string
DB_ENDPOINT = "localhost"
DB = 'Companies_Funding_DB'
DB_USER = 'postgres'
DB_PASSWORD = 'Ilovegoogle@88'
DB_PORT = '5432'
# postgresql://username:password@host:port/database
conn_string = "postgresql://{}:{}@{}:{}/{}" \
.format(DB_USER, DB_PASSWORD, DB_ENDPOINT, DB_PORT, DB)
print(conn_string)
rds_connection_string = "postgres:Ilovegoogle@88@localhost:5432/Companies_Funding_DB"
engine = create_engine(f'postgresql://{rds_connection_string}')
engine.table_names()
companies_df.to_sql(name='funding', con=engine, if_exists='append', index=False)
pd.read_sql_query('select * from funding', con=engine).head()
###Output
_____no_output_____
|
mhw_pipeline/oi-sst-to-zarr-example.ipynb
|
###Markdown
OISST to zarr via xarray and dask[Tony Cannistra](https://www.github.com/acannistra), January 2020---This notebook turns the OISST data from NOAA into a `zarr` directory for upload to cloud storage. **Components**:* Download OISST Data * Prepare OISST data as xarray `mfdataset`* Chunk OISST data through time (due to memory constraints)* Append each OISST chunk to local `zarr` object. * [optional] Upload local `zarr` to Google Cloud Storage.
###Code
# parameters / directories
LOCAL_OISST = 'data/'
LOCAL_ZARR = '/tmp/oisst.zarr'
TIME_CHUNK_SIZE_YEARS = 3
###Output
_____no_output_____
###Markdown
Download OISST
###Code
#TODO
###Output
_____no_output_____
###Markdown
Prepare OISST `xarray.Dataset`
###Code
oisst = xr.open_mfdataset(os.path.join(LOCAL_OISST, "*.nc"), combine='nested', concat_dim = 'time', parallel=True)
oisst
## Need to chunk the data uniformly
oisst = oisst.chunk({
'time': 100,
'lat' : 50,
'lon' : 500
})
oisst.sst.data
## Create zarr compression encoding
compressor = zarr.Blosc(cname='zstd', clevel=3, shuffle=2)
encoding = {vname: {'compressor': compressor} for vname in oisst.variables}
encoding
###Output
_____no_output_____
###Markdown
Create time-based slices of OISST and append to `zarr`
###Code
time_chunk_start = datetime.utcfromtimestamp(int(oisst.time.min().values.astype('O')//1e9))
max_observation_date = datetime.utcfromtimestamp(int(oisst.time.max().values.astype('O')//1e9))
year_delta = relativedelta(years=TIME_CHUNK_SIZE_YEARS)
day_offset = relativedelta(days=1)
first_write = True
while time_chunk_start < max_observation_date:
time_chunk_end = time_chunk_start + year_delta
print(time_chunk_start, time_chunk_end)
timechunk = oisst.sel(time = slice(
str(time_chunk_start),
str(time_chunk_end)
))
if first_write:
_action = timechunk.to_zarr(LOCAL_ZARR, compute=False, mode='w', encoding=encoding)
first_write = False
else:
_action = timechunk.to_zarr(LOCAL_ZARR, compute=False, mode='a', append_dim='time')
try:
_action.compute()
except:
print("ERROR: {}".format(time_chunk_start, time_chunk_end))
continue
time_chunk_start = time_chunk_end + day_offset
###Output
1981-09-01 00:00:00 1984-09-01 00:00:00
1984-09-02 00:00:00 1987-09-02 00:00:00
1987-09-03 00:00:00 1990-09-03 00:00:00
1990-09-04 00:00:00 1993-09-04 00:00:00
1993-09-05 00:00:00 1996-09-05 00:00:00
1996-09-06 00:00:00 1999-09-06 00:00:00
1999-09-07 00:00:00 2002-09-07 00:00:00
2002-09-08 00:00:00 2005-09-08 00:00:00
2005-09-09 00:00:00 2008-09-09 00:00:00
2008-09-10 00:00:00 2011-09-10 00:00:00
2011-09-11 00:00:00 2014-09-11 00:00:00
2014-09-12 00:00:00 2017-09-12 00:00:00
2017-09-13 00:00:00 2020-09-13 00:00:00
###Markdown
Examine Resulting `zarr`
###Code
oisst_z = xr.open_zarr(LOCAL_ZARR)
oisst_z.sst.data
mean_sst = oisst_z.sst.mean(dim='time').compute() # ~5 minutes
fig = plt.figure(figsize=(10,6))
mean_sst.plot()
mean_sst_slice = oisst_z.sel(lat=slice(25, 60), lon = slice(220, 250)).sst.mean(dim='time').compute() # faster (20s)
fig = plt.figure(figsize=(10,6))
mean_sst_slice.plot(cmap='Reds')
###Output
_____no_output_____
###Markdown
[optional] Upload to GCS*takes ~5m*
###Code
%%bash
source ~/.bashrc
gsutil -m cp -r /tmp/oisst.zarr/ gs://oisst
###Output
_____no_output_____
###Markdown
Assess functionality of GCP `zarr`
###Code
gcp_project_id = '170771369993'
fs = gcsfs.GCSFileSystem(project=gcp_project_id, token="/home/jovyan/gc-pangeo-storage.json")
oisst_gcp_z = xr.open_zarr(fs.get_mapper('oisst/oisst.zarr'))
oisst_gcp_z.sst.data
mean_sst_slice = oisst_gcp_z.sel(lat=slice(25, 60), lon = slice(220, 250)).sst.mean(dim='time').compute() # faster (20s)
fig = plt.figure(figsize=(10,6))
mean_sst_slice.plot(cmap='Reds')
###Output
_____no_output_____
|
VAERS_Side_Effect_Data_Preprocessing.ipynb
|
###Markdown
Downloading Data from KaggleTo download the dataset from Kaggle, you need the kaggle.json file that contains your username and api key. In order to obtain that, just follow this link: https://www.analyticsvidhya.com/blog/2021/06/how-to-load-kaggle-datasets-directly-into-google-colab/ and download it.Once you have it, you need to copy '.env.example' and rename it to '.env'.Then place your username and api key in the .env folder from kaggle.json file.Now you can proceed with downloading the data and preprocessing it.At the end of this, you will be able to obtain processed data
###Code
! pip install kaggle -q
! pip install python-dotenv -q
import dotenv
import os
import zipfile
dotenv.load_dotenv()
json_content = '{"username":' + f"\"{os.getenv('KAGGLE_USERNAME')}\"" + ',"key":' + f"\"{os.getenv('KAGGLE_KEY')}\"" + '}'
home = os.path.expanduser("~")
kaggle_path = os.path.join(home, '.kaggle')
try:
os.makedirs(kaggle_path)
with open(os.path.join(kaggle_path, 'kaggle.json'), 'w') as f:
f.write(json_content)
except OSError as e:
if e.errno != errno.EEXIST:
raise
! kaggle datasets download landfallmotto/covid19-vaccine-adverse-reactions-vaers-dataset
! mkdir data
with zipfile.ZipFile('covid19-vaccine-adverse-reactions-vaers-dataset.zip', 'r') as zip_ref:
zip_ref.extractall('data')
os.remove('covid19-vaccine-adverse-reactions-vaers-dataset.zip')
###Output
_____no_output_____
###Markdown
Data Preprocessing
###Code
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#Importing dataset
df = pd.read_csv('data/vaers_jan_nov_2021.csv')
df.head()
# Check for null values
df.isna().sum()
#Removing all JENSSEN vaccine information
vaers = df.loc[df['VAX_MANU'].str.contains('PFIZER|MODERNA')]
len(vaers)
#Removing useless features
vaers = vaers[['VAERS_ID','SYMPTOM1','VAX_MANU', 'AGE_YRS', 'SEX',]]
len(vaers)
#Removing null and empty values drom dataset
vaers = vaers.dropna()
len(vaers)
#Removing duplicate ID fields
vaers = vaers.drop_duplicates(subset=['VAERS_ID'], keep=False)
len(vaers)
#Removing no adverse event rows
vaers = vaers[~vaers.SYMPTOM1.str.contains("No adverse event")]
len(vaers)
#Removing unknown sex/gender rows
vaers = vaers[~vaers.SEX.str.contains("U")]
len(vaers)
vaers['AGE_YRS'] = vaers['AGE_YRS'].apply(np.ceil)
vaers.head()
#Finiding outliers of age
Q1 = vaers['AGE_YRS'].quantile(0.25)
Q3 = vaers['AGE_YRS'].quantile(0.75)
IQR = Q3 - Q1
Lower_Fence = Q1 - (1.5 * IQR)
Upper_Fence = Q3 + (1.5 * IQR)
print(IQR)
print(Lower_Fence)
print(Upper_Fence)
#Removing Outliers the data set
vaers = vaers[~((vaers['AGE_YRS'] < Lower_Fence) |(vaers['AGE_YRS'] > Upper_Fence))]
len(vaers)
# Changing the names of columns
vaers = vaers.rename(columns={'VAERS_ID':'ID', 'VAX_MANU':'Vaccine', 'AGE_YRS':'Age', 'SEX':'Sex', 'SYMPTOM1':'Side_Effect'})
vaers.head()
c = vaers.groupby(['Side_Effect'])['ID'].count().nlargest(100)
count = vaers['Vaccine'].value_counts()
count = vaers['Age'].value_counts()
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(c)
temp = vaers.copy()
vaers = temp.copy()
vaers
###Output
_____no_output_____
###Markdown
Feature Extraction
###Code
! pip install fuzzywuzzy -q
! pip install python-Levenshtein -q
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
# Side Effects based on most common side effects of Covid 19 according
# to https://www.cdc.gov/coronavirus/2019-ncov/vaccines/expect/after.html
# More side effects can bee added to 'side_effects' list, but it might affect
# the overall accuracy and performance
side_effects = ['Headache', 'Pain', 'Swelling']
print(side_effects)
# Function to perform string matching
def checker(wrong_options,correct_options):
names_array=[]
ratio_array=[]
for wrong_option in wrong_options:
if wrong_option in correct_options:
names_array.append(wrong_option)
ratio_array.append('100')
else:
x=process.extractOne(wrong_option,correct_options,scorer=fuzz.token_set_ratio, score_cutoff=90)
if (x != None):
names_array.append(x[0])
ratio_array.append(x[1])
else:
names_array.append(None)
ratio_array.append(None)
return names_array,ratio_array
# Convert symptoms table to list
str2Match = vaers['Side_Effect'].tolist()
strOptions = side_effects
# Perform string matching on symptoms column
name_match,ratio_match=checker(str2Match,strOptions)
# Replace the symptoms with the newly matched ones
vaers['Side_Effect'] = pd.Series(name_match)
len(vaers)
###Output
_____no_output_____
###Markdown
Final DataFrame
###Code
#Removing NaN values
vaers = vaers.dropna()
vaers = vaers.reset_index(drop=True)
len(vaers)
#Confirm if any column has NaN value
vaers.isna().any()
# Extract the processed dataset, download it and then save it in your google drive under vaers folder
vaers.to_csv(r'data/processed_vaers_dataset.csv', index = False)
###Output
_____no_output_____
###Markdown
Random Forest Classification
###Code
! pip install tensorflow_decision_forests -q
! pip install wurlitzer -q
import tensorflow_decision_forests as tfdf
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import math
try:
from wurlitzer import sys_pipes
except:
from colabtools.googlelog import CaptureLog as sys_pipes
from IPython.core.magic import register_line_magic
from IPython.display import Javascript
# Some of the model training logs can cover the full
# screen if not compressed to a smaller viewport.
# This magic allows setting a max height for a cell.
@register_line_magic
def set_cell_height(size):
display(
Javascript("google.colab.output.setIframeHeight(0, true, {maxHeight: " +
str(size) + "})"))
###Output
_____no_output_____
###Markdown
Data Preperation
###Code
# using One-Hot Encoder to generate binary values using get_dummies
vaers_rf = pd.get_dummies(vaers, columns=["Vaccine", "Sex"])
vaers_rf = vaers_rf.iloc[:,1:7]
vaers_rf = vaers_rf.astype({"Age": int, "Vaccine_MODERNA": int, "Vaccine_PFIZER\BIONTECH": int, "Sex_F": int, "Sex_M": int})
vaers_rf
# Encode the categorical label into an integer.
#
# Details:
# This stage is necessary if your classification label is represented as a
# string. Note: Keras expected classification labels to be integers.
# Name of the label column.
label = "Side_Effect"
classes = vaers_rf[label].unique().tolist()
print(f"Label classes: {classes}")
vaers_rf[label] = vaers_rf[label].map(classes.index)
# Split the dataset into a training and a testing dataset.
def split_dataset(dataset, test_ratio=0.20):
"""Splits a panda dataframe in two."""
test_indices = np.random.rand(len(dataset)) < test_ratio
return dataset[~test_indices], dataset[test_indices]
train_ds_pd, test_ds_pd = split_dataset(vaers_rf)
print("{} examples in training, {} examples for testing.".format(
len(train_ds_pd), len(test_ds_pd)))
train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label)
test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label)
###Output
_____no_output_____
###Markdown
Model Training
###Code
%set_cell_height 300
# Specify the model.
rf_model = tfdf.keras.RandomForestModel()
# Optionally, add evaluation metrics.
rf_model.compile(
metrics=["accuracy"])
# Train the model.
# "sys_pipes" is optional. It enables the display of the training logs.
with sys_pipes():
rf_model.fit(x=train_ds)
# Summary of the Model
%set_cell_height 300
rf_model.summary()
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
# Model Evaluation on Test dataset
evaluation = rf_model.evaluate(test_ds, return_dict=True)
print()
for name, value in evaluation.items():
print(f"{name}: {value:.4f}")
# General Evaluation of the model
rf_model.make_inspector().evaluation()
###Output
_____no_output_____
###Markdown
Save Model and Graph Plotting
###Code
! mkdir model
# Save Model
rf_model.save("model/RFModel")
# Random Forest Tree plotter, each box represents a Tree Node
tfdf.model_plotter.plot_model_in_colab(rf_model, tree_idx=0, max_depth=3)
# Plotting Accuracy and Loss of the model
import matplotlib.pyplot as plt
logs = rf_model.make_inspector().training_logs()
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Accuracy (out-of-bag)")
plt.subplot(1, 2, 2)
plt.plot([log.num_trees for log in logs], [log.evaluation.loss for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Logloss (out-of-bag)")
plt.show()
###Output
_____no_output_____
###Markdown
LSTM Model Training
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.layers import Dropout
import plotly.graph_objs as go
import cufflinks
from IPython.core.interactiveshell import InteractiveShell
import plotly.figure_factory as ff
InteractiveShell.ast_node_interactivity = 'all'
from plotly.offline import iplot
cufflinks.go_offline()
cufflinks.set_config_file(world_readable=True, theme='pearl')
###Output
_____no_output_____
###Markdown
Data Preperation
###Code
# The maximum number of side effects to be determined. (most frequent)
MAX_NB_SIDE = 5000
# This is fixed.
EMBEDDING_DIM = 100
# using One-Hot Encoder to generate binary values using get_dummies
vaers_lstm = pd.get_dummies(vaers, columns=["Vaccine", "Sex"])
vaers_lstm
X = vaers_lstm.iloc[:,2:7].values
print('Shape of data tensor:', X.shape)
Y = pd.get_dummies(vaers_lstm['Side_Effect']).values
print('Shape of label tensor:', Y.shape)
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.20, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
###Output
(15794, 5) (15794, 3)
(3949, 5) (3949, 3)
###Markdown
Model Training
###Code
model = Sequential()
model.add(Embedding(MAX_NB_SIDE, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(3, activation='relu'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
epochs = 5
batch_size = 64
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,validation_split=0.1,callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
accr = model.evaluate(X_test,Y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
###Output
124/124 [==============================] - 0s 3ms/step - loss: 0.8932 - accuracy: 0.6280
Test set
Loss: 0.893
Accuracy: 0.628
###Markdown
Saving Model and Graph Plotting
###Code
! mkdir model
# Save Model
model.save("model/LSTMModel")
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show();
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show();
# Predict the symptom using new data
new_data = [['36', '1', '0', '1', '0']] # Age, MODRENA, PFIZER, FEMALE, MALE
new_data = np.asarray(new_data).astype(np.float32)
pred = model.predict(new_data)
labels = ['Headache', 'Pain', 'Swelling']
print(pred, labels[np.argmax(pred)])
###Output
[[0.14577474 0.3763443 0.08911598]] Pain
|
sequence_analysis_walkthrough/QIIME2_Merging_and_Processing.ipynb
|
###Markdown
> Pipeling to Merge Sequencing Libraries from Different Sequencing Runs that have been denoised by DADA2 and Finish with the Standard Processing Pipeline* Merge multiple libraries for the *SAME* sequencing region (i.e. ITS+ITS... or SSU+SSU...)* Complete remainder of processing pipieline: prepare Rep Seqs, Classify Seqs, Make Tree, Make Phyloseq Object*Informed by the "Fecal Microbiota Transplant Tutorial" for QIIME2* Commands to Install Dependencies || QIIME2 ||* conda create -n qiime2-2018.2 --file https://data.qiime2.org/distro/core/qiime2-2018.2-conda-linux-64.txt * source activate qiime2-2018.2 || Copyrighter rrn Database ||* The script will automatically install the curated GreenGenes rrn attribute database* https://github.com/fangly/AmpliCopyrighter || rpy2 (don't use conda version) ||* pip install rpy2 || phyloseq ||* conda install -c r-igraph * Rscript -e "source('http://bioconductor.org/biocLite.R');biocLite('phyloseq')" || R packages ||* ape (natively installed with in conda environment) Citations * Caporaso, J. G., Kuczynski, J., Stombaugh, J., Bittinger, K., Bushman, F. D., Costello, E. K., *et al.* (2010). QIIME allows analysis of high-throughput community sequencing data. Nature methods, 7(5), 335-336.* McMurdie and Holmes (2013) phyloseq: An R Package for Reproducible Interactive Analysis and Graphics of Microbiome Census Data. PLoS ONE. 8(4):e61217* Paradis E., Claude J. & Strimmer K. 2004. APE: analyses of phylogenetics and evolution in R language. Bioinformatics 20: 289-290.* Angly, F. E., Dennis, P. G., Skarshewski, A., Vanwonterghem, I., Hugenholtz, P., & Tyson, G. W. (2014). CopyRighter: a rapid tool for improving the accuracy of microbial community profiles through lineage-specific gene copy number correction. Microbiome, 2(1), 11. Last Modified by R. Wilhelm on October 12th, 2017 Step 1: User Input
###Code
import os, re
# Provide the directory where files are located
#directory = '/home/user/PROJECT/16S/'
directory = ''
# Provide a list of all the FeatureTables you will merge
# Produced by QIIME2 in STEP 7 (i.e. DADA2 Denoising/Merging/FeatureTable)
#qva_files = ['SSU.table.Library.1.qza','SSU.table.Library.2.qza', ..., 'SSU.table.Library.n.qza']
qva_files = ['','','']
# Provide a list of all the Representative Sequences you will merge
# Also produced from STEP 7 (i.e. DADA2 Denoising/Merging/FeatureTable)
#seq_files = ['SSU.rep.seqs.Library.1.qza','SSU.rep.seqs.Library.2.qza', ..., 'SSU.rep.seqs.Library.n.qza']
seq_files = ['','','']
# Provide a concatenated metadatafile with matching column order
#metadata = 'SSU.metadata.combined.tsv'
metadata = ''
## Enter Minimum Support for Keeping QIIME Classification
# Note: Classifications that do not meet this criteria will simply be retained, but labeled 'putative'
min_support = 0.8
domain = 'bacteria' # 'bacteria' | 'fungi'
################################## IMPORTANT #####################################
## DO NOT give your samples names that begin with a digit. It will break this script ##
## at the import to phyloseq stage. It doesn't ruin anything, but makes it a pain. ##
########################################################################################
###Output
_____no_output_____
###Markdown
Step 2: Merge Feature Tables using QIIME
###Code
%mkdir $directory/output
combined_qva = [m+" "+n for m,n in zip(["--i-tables"]*len(qva_files),qva_files)]
os.system(' '.join([
"qiime feature-table merge",
' '.join(combined_qva),
"--o-merged-table "+directory+"/output/merged.table.final.qza"
]))
# Include "--p-overlap-method sum" if you have re-run samples with the same name
combined_seq = [m+" "+n for m,n in zip(["--i-data"]*len(seq_files),seq_files)]
os.system(' '.join([
"qiime feature-table merge-seqs",
' '.join(combined_seq),
"--o-merged-data "+directory+"/output/merged.rep.seqs.final.qza"
]))
###Output
_____no_output_____
###Markdown
Step 3: Create Summary of OTUs
###Code
os.system(' '.join([
"qiime feature-table summarize",
"--i-table "+directory+"/output/merged.table.final.qza",
"--o-visualization "+directory+"/output/merged.table.final.qzv",
"--m-sample-metadata-file "+directory+metadata
]))
os.system(' '.join([
"qiime feature-table tabulate-seqs",
"--i-data "+directory+"/output/merged.rep.seqs.final.qza",
"--o-visualization "+directory+"/output/merged.rep.seqs.final.qzv"
]))
###Output
_____no_output_____
###Markdown
Step 4: Make Phylogenetic Tree
###Code
if domain != "fungi":
# Generate Alignment with MAFFT
os.system(' '.join([
"qiime alignment mafft",
"--i-sequences "+directory+"/output/merged.rep.seqs.final.qza",
"--o-alignment "+directory+"/output/merged.rep.seqs.aligned.qza"
]))
# Mask Hypervariable parts of Alignment
os.system(' '.join([
"qiime alignment mask",
"--i-alignment "+directory+"/output/merged.rep.seqs.aligned.qza",
"--o-masked-alignment "+directory+"/output/merged.rep.seqs.aligned.masked.qza"
]))
# Generate Tree with FastTree
os.system(' '.join([
"qiime phylogeny fasttree",
"--i-alignment "+directory+"/output/merged.rep.seqs.aligned.masked.qza",
"--o-tree "+directory+"/output/merged.rep.seqs.tree.unrooted.qza"
]))
# Root Tree
os.system(' '.join([
"qiime phylogeny midpoint-root",
"--i-tree "+directory+"/output/merged.rep.seqs.tree.unrooted.qza",
"--o-rooted-tree "+directory+"/output/merged.rep.seqs.tree.final.qza"
]))
###Output
_____no_output_____
###Markdown
Step 5: Classify Seqs
###Code
# Classify
if domain == 'bacteria':
os.system(' '.join([
"qiime feature-classifier classify-sklearn",
"--i-classifier /home/db/GreenGenes/qiime2_13.8.99_515.806_nb.classifier.qza",
"--i-reads "+directory+"/output/merged.rep.seqs.final.qza",
"--o-classification "+directory+"/output/merged.taxonomy.final.qza"
]))
if domain == 'fungi':
os.system(' '.join([
"qiime feature-classifier classify-sklearn",
"--i-classifier /home/db/UNITE/qiime2_unite_ver7.99_20.11.2016_classifier.qza",
"--i-reads "+directory+"/output/merged.rep.seqs.final.qza",
"--o-classification "+directory+"/output/merged.taxonomy.final.qza"
]))
# Output Summary
os.system(' '.join([
"qiime metadata tabulate",
"--m-input-file "+directory+"/output/merged.taxonomy.final.qza",
"--o-visualization "+directory+"/output/merged.taxonomy.final.summary.qzv"
]))
###Output
_____no_output_____
###Markdown
Step 6: Prepare Data for Import to Phyloseq
###Code
## Make Function to Re-Format Taxonomy File to Contain Full Column Information
# and factor in the certain of the taxonomic assignment
def format_taxonomy(tax_file, min_support):
output = open(re.sub(".tsv",".fixed.tsv",tax_file), "w")
output.write("\t".join(["OTU","Domain","Phylum","Class","Order","Family","Genus","Species"])+"\n")
with open(tax_file, "r") as f:
next(f) #skip header
for line in f:
line = line.strip()
line = line.split("\t")
read_id = line[0]
tax_string = line[1]
# Annotate those strings which do not meet minimum support
if float(line[2]) < float(min_support):
tax_string = re.sub("__","__putative ",tax_string)
# Remove All Underscore Garbage (gimmie aesthetics)
tax_string = re.sub("k__|p__|c__|o__|f__|g__|s__","",tax_string)
# Add in columns containing unclassified taxonomic information
# Predicated on maximum 7 ranks (Domain -> Species)
full_rank = tax_string.split(";")
last_classified = full_rank[len(full_rank)-1]
count = 1
while last_classified == " ":
last_classified = full_rank[len(full_rank)-count]
count = count + 1
for n in range(full_rank.index(last_classified)+1, 7, 1):
try:
full_rank[n] = "unclassified "+last_classified
except:
full_rank.append("unclassified "+last_classified)
output.write(read_id+"\t"+'\t'.join(full_rank)+"\n")
return()
#####################
## Export from QIIME2
## Final Output Names
fasta_file = directory+"/output/merged.rep.seqs.final.fasta"
tree_file = directory+"/output/merged.tree.final.nwk"
tax_file = directory+"/output/merged.taxonomy.final.tsv"
count_table = directory+"/output/merged.counts.final.biom"
# Export Classifications
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.taxonomy.final.qza",
"--output-dir "+directory+"/output/"
]))
# Reformat Classifications to meet phyloseq format
format_taxonomy(directory+"/output/taxonomy.tsv", min_support)
# Export SV Table
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.table.final.qza",
"--output-dir "+directory+"/output/"
]))
# Export SV Sequences
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.rep.seqs.final.qza",
"--output-dir "+directory+"/output/"
]))
if domain == "bacteria":
# Export Tree
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.rep.seqs.tree.final.qza",
"--output-dir "+directory+"/output/"
]))
%mv $directory/output/tree.nwk $tree_file
# Rename Exported Files
%mv $directory/output/dna-sequences.fasta $fasta_file
%mv $directory/output/feature-table.biom $count_table
%mv $directory/output/taxonomy.fixed.tsv $tax_file
###Output
_____no_output_____
###Markdown
Step 7: Get 16S rRNA Gene Copy Number (rrn)
###Code
## This step is based on the database contructed for the software 'copyrighter'
## The software itself lacked information about datastructure (and, the import of a biom from QIIME2 failed, likely because there are multiple versions of the biom format)
if domain == 'bacteria':
## Download copyrighter database
!git clone https://github.com/fangly/AmpliCopyrighter $directory/temp/
## There are multiple GreenGenes ID numbers for a given taxonomic string.
## However, the copyrighter database uses the same average rrn copy number.
## We will therefore just use the taxonomic strings, since QIIME2 does not output the ID numbers
!sed -e '1,1075178d; 1078115d' $directory/temp/data/201210/ssu_img40_gg201210.txt > $directory/output/copyrighter.tax.strings.tsv
## Create Dictionary of rrnDB
rrnDB = {}
with open(directory+"/output/copyrighter.tax.strings.tsv", "r") as f:
for line in f:
line = line.strip()
line = line.split("\t")
try:
rrnDB[line[0]] = line[1]
except:
pass
## Attribute rrn to readID from taxonomy.tsv
output = open(directory+"/output/merged.seqID.to.rrn.final.tsv","w")
output.write("Feature ID\trrn\n")
with open(directory+"/output/taxonomy.tsv", "r") as f:
missing = 0
total = 0
next(f) # Skip Header
for line in f:
line = line.strip()
line = line.split("\t")
seqID = line[0]
try:
rrn = rrnDB[line[1]]
except:
rrn = "NA"
missing = missing + 1
total = total + 1
output.write(seqID+"\t"+rrn+"\n")
print("\nPercent of OTUs Missing {:.1%}".format(float(missing)/total))
print("Don't Panic! The majority of missing OTUs could be low abundance.")
###Output
_____no_output_____
###Markdown
Step 8: Import into Phyloseq
###Code
## Setup R-Magic for Jupyter Notebooks
import rpy2
%load_ext rpy2.ipython
def fix_biom_conversion(file):
with open(file, 'r') as fin:
data = fin.read().splitlines(True)
with open(file, 'w') as fout:
fout.writelines(data[1:])
import pandas as pd
%R library(phyloseq)
%R library(ape)
#### IMPORT DATA to R
## For '.tsv' files, use Pandas to create a dataframe and then pipe that to R
## For '.biom' files, first convert using 'biom convert' on the command-line
## Had problems importing the count table with pandas, opted for using read.table in R
# Import Taxonomy File
tax_file = pd.read_csv(directory+"/output/merged.taxonomy.final.tsv", sep="\t")
%R -i tax_file
%R rownames(tax_file) = tax_file$OTU
%R tax_file$OTU <- NULL
%R tax_file <- tax_file[sort(row.names(tax_file)),] #read names must match the count_table
# Import Sample Data
sample_file = pd.read_csv(directory+metadata, sep="\t", keep_default_na=False)
%R -i sample_file
%R rownames(sample_file) = sample_file$X.SampleID
%R sample_file$X.SampleID <- NULL
%R sample_file$LinkerPrimerSequence <- NULL ## Clean-up some other stuff
# Import Count Data
os.system(' '.join([
"biom convert",
"-i",
directory+"/output/merged.counts.final.biom",
"-o",
directory+"/output/merged.counts.final.tsv",
"--to-tsv"
]))
# The biom converter adds a stupid line that messes with the table formatting
fix_biom_conversion(directory+"/output/merged.counts.final.tsv")
# Finally import
count_table = pd.read_csv(directory+"/output/merged.counts.final.tsv", sep="\t")
%R -i count_table
%R rownames(count_table) = count_table$X.OTU.ID
%R count_table$X.OTU.ID <- NULL
%R count_table <- count_table[sort(row.names(count_table)),] #read names must match the tax_table
# Convert to Phyloseq Objects
%R p_counts = otu_table(count_table, taxa_are_rows = TRUE)
%R p_samples = sample_data(sample_file)
%R p_tax = tax_table(tax_file)
%R taxa_names(p_tax) <- rownames(tax_file) # phyloseq throws out rownames
%R colnames(p_tax) <- colnames(tax_file) # phyloseq throws out colnames
# Merge Phyloseq Objects
%R p = phyloseq(p_counts, p_tax)
if domain == "bacteria":
# Import Phylogenetic Tree
tree_file = directory+"/output/merged.tree.final.nwk"
%R -i tree_file
%R p_tree <- read.tree(tree_file)
# Combine All Objects into One Phyloseq
%R p_final <- merge_phyloseq(p, p_samples, p_tree)
else:
# Combine All Objects into One Phyloseq
%R p_final <- merge_phyloseq(p, p_samples)
# Save Phyloseq Object as '.rds'
output = directory+"/output/p_merged.final.rds"
%R -i output
%R saveRDS(p_final, file = output)
# Confirm Output
%R print(p_final)
###Output
_____no_output_____
###Markdown
Step 9: Clean-up Intermediate Files and Final Outputs
###Code
# You'll have to delete the 'temp' directory manually.
# Remove Files
if domain == "bacteria":
%rm $directory/output/*tree.unrooted.qza
%rm $directory/output/*aligned.masked.qza
%rm $directory/output/*.biom
%rm $directory/output/taxonomy.tsv
%rm $directory/output/copyrighter.tax.strings.tsv
# Separate Final Files
%mkdir $directory/final/
%mv $directory/output/*.final.rds $directory/final/
%mv $directory/output/*.taxonomy.final.tsv $directory/final/
%mv $directory/output/*.counts.final.tsv $directory/final/
%mv $directory/output/*.final.fasta $directory/final/
%cp $directory$metadata $directory/final/
%mv $directory/output/merged.seqID.to.rrn.final.tsv $directory/final/
# Gzip and Move Intermediate Files
!pigz -p 10 $directory/output/*.qza
!pigz -p 10 $directory/output/*.qzv
%mv $directory/output/ $directory/intermediate_files
print("Your sequences have been successfully saved to directories 'final' and 'intermediate_files'")
###Output
_____no_output_____
###Markdown
> Pipeling to Merge Sequencing Libraries from Different Sequencing Runs that have been denoised by DADA2 and Finish with the Standard Processing Pipeline* Merge multiple libraries for the *SAME* sequencing region (i.e. ITS+ITS... or SSU+SSU...)* Complete remainder of processing pipieline: prepare Rep Seqs, Classify Seqs, Make Tree, Make Phyloseq Object*Informed by the "Fecal Microbiota Transplant Tutorial" for QIIME2* Commands to Install Dependencies || QIIME2 ||* conda create -n qiime2-2017.9 --file https://data.qiime2.org/distro/core/qiime2-2017.9-conda-linux-64.txt * source activate qiime2-2017.9 || rpy2 (don't use conda version) ||* pip install rpy2 || phyloseq ||* conda install -c r-igraph * Rscript -e "source('http://bioconductor.org/biocLite.R');biocLite('phyloseq')" || R packages ||* ape (natively installed with in conda environment) Last Modified by R. Wilhelm on October 12th, 2017 Step 1: User Input
###Code
import os, re
# Provide the directory where files are located
directory = '/home/roli/FORESTs_BHAVYA/Combined_Libraries/ITS/'
#directory = '/home/roli/FORESTs_BHAVYA/Combined_Libraries/16S/'
# Provide a list of all the FeatureTables you will merge
# Produced by QIIME2 in STEP 7 (i.e. DADA2 Denoising/Merging/FeatureTable)
qva_files = ['ITS.table.Honnedaga.qza','ITS.table.Woods.qza']
#qva_files = ['SSU.table.Honnedaga.qza','SSU.table.Woods.qza']
# Provide a list of all the Representative Sequences you will merge
# Also produced from STEP 7 (i.e. DADA2 Denoising/Merging/FeatureTable)
seq_files = ['ITS.rep.seqs.Honnedaga.qza','ITS.rep.seqs.Woods.qza']
#seq_files = ['SSU.rep.seqs.Honnedaga.qza','SSU.rep.seqs.Woods.qza']
# Provide a concatenated metadatafile with matching column order
metadata = 'ITS.metadata.combined.tsv'
## Enter Minimum Support for Keeping QIIME Classification
# Note: Classifications that do not meet this criteria will simply be retained, but labeled 'putative'
min_support = 0.8
domain = 'fungi' # 'bacteria' | 'fungi'
###Output
_____no_output_____
###Markdown
Step 2: Merge Feature Tables using QIIME
###Code
%mkdir $directory/output
for n in range(0, len(qva_files), 1):
if n == 0:
os.system(' '.join([
"qiime feature-table merge",
"--i-table1 "+directory+"/"+qva_files[n],
"--i-table2 "+directory+"/"+qva_files[n+1],
"--o-merged-table "+directory+"/output/merged.table.final.qza"
]))
os.system(' '.join([
"qiime feature-table merge-seq-data",
"--i-data1 "+directory+"/"+seq_files[n],
"--i-data2 "+directory+"/"+seq_files[n+1],
"--o-merged-data "+directory+"/output/merged.rep.seqs.final.qza"
]))
if n > 0 and n + 1 < len(qva_files):
os.system(' '.join([
"qiime feature-table merge",
"--i-table1 "+directory+"/output/merged.table.final.qza",
"--i-table2 "+directory+"/"+qva_files[n+1],
"--o-merged-table "+directory+"/output/merged.table.final.qza"
]))
os.system(' '.join([
"qiime feature-table merge-seq-data",
"--i-data1 "+directory+"/output/merged.rep.seqs.final.qza",
"--i-data2 "+directory+"/"+seq_files[n+1],
"--o-merged-data "+directory+"/output/merged.rep.seqs.final.qza"
]))
###Output
_____no_output_____
###Markdown
Step 3: Create Summary of OTUs
###Code
os.system(' '.join([
"qiime feature-table summarize",
"--i-table "+directory+"/output/merged.table.final.qza",
"--o-visualization "+directory+"/output/merged.table.final.qzv",
"--m-sample-metadata-file "+directory+metadata
]))
os.system(' '.join([
"qiime feature-table tabulate-seqs",
"--i-data "+directory+"/output/merged.rep.seqs.final.qza",
"--o-visualization "+directory+"/output/merged.rep.seqs.final.qzv"
]))
###Output
_____no_output_____
###Markdown
Step 4: Make Phylogenetic Tree
###Code
if domain != "fungi":
# Generate Alignment with MAFFT
os.system(' '.join([
"qiime alignment mafft",
"--i-sequences "+directory+"/output/merged.rep.seqs.final.qza",
"--o-alignment "+directory+"/output/merged.rep.seqs.aligned.qza"
]))
# Mask Hypervariable parts of Alignment
os.system(' '.join([
"qiime alignment mask",
"--i-alignment "+directory+"/output/merged.rep.seqs.aligned.qza",
"--o-masked-alignment "+directory+"/output/merged.rep.seqs.aligned.masked.qza"
]))
# Generate Tree with FastTree
os.system(' '.join([
"qiime phylogeny fasttree",
"--i-alignment "+directory+"/output/merged.rep.seqs.aligned.masked.qza",
"--o-tree "+directory+"/output/merged.rep.seqs.tree.unrooted.qza"
]))
# Root Tree
os.system(' '.join([
"qiime phylogeny midpoint-root",
"--i-tree "+directory+"/output/merged.rep.seqs.tree.unrooted.qza",
"--o-rooted-tree "+directory+"/output/merged.rep.seqs.tree.final.qza"
]))
###Output
_____no_output_____
###Markdown
Step 5: Classify Seqs
###Code
# Classify
if domain == 'bacteria':
os.system(' '.join([
"qiime feature-classifier classify-sklearn",
"--i-classifier /home/db/GreenGenes/qiime2_13.8.99_515.806_nb.classifier.qza",
"--i-reads "+directory+"/output/merged.rep.seqs.final.qza",
"--o-classification "+directory+"/output/merged.taxonomy.final.qza"
]))
if domain == 'fungi':
os.system(' '.join([
"qiime feature-classifier classify-sklearn",
"--i-classifier /home/db/UNITE/qiime2_unite_ver7.99_20.11.2016_classifier.qza",
"--i-reads "+directory+"/output/merged.rep.seqs.final.qza",
"--o-classification "+directory+"/output/merged.taxonomy.final.qza"
]))
# Output Summary
os.system(' '.join([
"qiime metadata tabulate",
"--m-input-file "+directory+"/output/merged.taxonomy.final.qza",
"--o-visualization "+directory+"/output/merged.taxonomy.final.summary.qzv"
]))
###Output
_____no_output_____
###Markdown
Step 6: Prepare Data for Import to Phyloseq
###Code
## Make Function to Re-Format Taxonomy File to Contain Full Column Information
# and factor in the certain of the taxonomic assignment
def format_taxonomy(tax_file, min_support):
rank_dict = {'k__':"Domain",'k__':"Domain",}
output = open(re.sub(".tsv",".fixed.tsv",tax_file), "w")
output.write("\t".join(["Domain","Phylum","Class","Order","Family","Genus","Species"])+"\n")
with open(tax_file, "r") as f:
next(f) #skip header
for line in f:
line = line.strip()
line = line.split("\t")
read_id = line[0]
tax_string = line[1]
# Annotate those strings which do not meet minimum support
if float(line[2]) < float(min_support):
tax_string = re.sub("__","__putative ",tax_string)
# Manage cases were taxonomic string is empty
tax_string = re.sub("k__;|p__;|c__;|o__;|f__;|g__;","unclassified;",tax_string)
tax_string = re.sub("s__","unclassified",tax_string) #note: this makes all species unclassified.
# Remove All Underscore Garbage (gimmie aesthetics)
tax_string = re.sub("k__|p__|c__|o__|f__|g__|s__","",tax_string)
# Add in columns containing unclassified taxonomic information
# Predicated on maximum 7 ranks (Domain -> Species)
full_rank = tax_string.split(";")
last_classified = full_rank[len(full_rank)-1]
for n in range(len(full_rank), 7, 1):
full_rank.append("unclassifed "+last_classified)
output.write(read_id+"\t"+'\t'.join(full_rank)+"\n")
return()
#####################
## Export from QIIME2
## Final Output Names
fasta_file = directory+"/output/merged.rep.seqs.final.fasta"
tree_file = directory+"/output/merged.tree.final.nwk"
tax_file = directory+"/output/merged.taxonomy.final.tsv"
count_table = directory+"/output/merged.counts.final.biom"
# Export Classifications
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.taxonomy.final.qza",
"--output-dir "+directory+"/output/"
]))
# Reformat Classifications to meet phyloseq format
format_taxonomy(directory+"/output/taxonomy.tsv", min_support)
# Export SV Table
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.table.final.qza",
"--output-dir "+directory+"/output/"
]))
# Export SV Sequences
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.rep.seqs.final.qza",
"--output-dir "+directory+"/output/"
]))
if domain == "bacteria":
# Export Tree
os.system(' '.join([
"qiime tools export",
directory+"/output/merged.rep.seqs.tree.final.qza",
"--output-dir "+directory+"/output/"
]))
%mv $directory/output/tree.nwk $tree_file
# Rename Exported Files
%mv $directory/output/dna-sequences.fasta $fasta_file
%mv $directory/output/feature-table.biom $count_table
%mv $directory/output/taxonomy.fixed.tsv $tax_file
###Output
_____no_output_____
###Markdown
Step 7: Import into Phyloseq
###Code
## Setup R-Magic for Jupyter Notebooks
import rpy2
%load_ext rpy2.ipython
def fix_biom_conversion(file):
with open(file, 'r') as fin:
data = fin.read().splitlines(True)
with open(file, 'w') as fout:
fout.writelines(data[1:])
import pandas as pd
%R library(phyloseq)
%R library(ape)
#### IMPORT DATA to R
## For '.tsv' files, use Pandas to create a dataframe and then pipe that to R
## For '.biom' files, first convert using 'biom convert' on the command-line
## Had problems importing the count table with pandas, opted for using read.table in R
# Import Taxonomy File
tax_file = pd.read_csv(directory+"/output/merged.taxonomy.final.tsv", sep="\t")
%R -i tax_file
%R tax_file <- tax_file[sort(row.names(tax_file)),] #read names must match the count_table
# Import Sample Data
sample_file = pd.read_csv(directory+metadata, sep="\t")
%R -i sample_file
%R rownames(sample_file) = sample_file$X.SampleID
%R sample_file$X.SampleID <- NULL
%R sample_file$LinkerPrimerSequence <- NULL ## Clean-up some other stuff
# Import Count Data
os.system(' '.join([
"biom convert",
"-i",
directory+"/output/merged.counts.final.biom",
"-o",
directory+"/output/merged.counts.final.tsv",
"--to-tsv"
]))
# The biom converter adds a stupid line that messes with the table formatting
fix_biom_conversion(directory+"/output/merged.counts.final.tsv")
# Finally import
count_table = pd.read_csv(directory+"/output/merged.counts.final.tsv", sep="\t")
%R -i count_table
%R rownames(count_table) = count_table$X.OTU.ID
%R count_table$X.OTU.ID <- NULL
%R count_table <- count_table[sort(row.names(count_table)),] #read names must match the tax_table
# Convert to Phyloseq Objects
%R p_counts = otu_table(count_table, taxa_are_rows = TRUE)
%R p_samples = sample_data(sample_file)
%R p_tax = tax_table(tax_file)
%R taxa_names(p_tax) <- rownames(tax_file) # phyloseq throws out rownames
%R colnames(p_tax) <- colnames(tax_file) # phyloseq throws out colnames
# Merge Phyloseq Objects
%R p = phyloseq(p_counts, p_tax)
if domain == "bacteria":
# Import Phylogenetic Tree
tree_file = directory+"/output/merged.tree.final.nwk"
%R -i tree_file
%R p_tree <- read.tree(tree_file)
# Combine All Objects into One Phyloseq
%R p_final <- merge_phyloseq(p, p_samples, p_tree)
else:
# Combine All Objects into One Phyloseq
%R p_final <- merge_phyloseq(p, p_samples)
# Save Phyloseq Object as '.rds'
output = directory+"/output/p_merged.final.rds"
%R -i output
%R saveRDS(p_final, file = output)
# Confirm Output
%R print(p_final)
###Output
_____no_output_____
###Markdown
Step 8: Clean-up Intermediate Files and Final Outputs
###Code
# Remove Files
if domain == "bacteria":
%rm $directory/output/*tree.unrooted.qza
%rm $directory/output/*aligned.masked.qza
%rm $directory/output/*.biom
%rm $directory/output/taxonomy.tsv
# Separate Final Files
%mkdir $directory/final/
%mv $directory/output/*.final.rds $directory/final/
%mv $directory/output/*.taxonomy.final.tsv $directory/final/
%mv $directory/output/*.counts.final.tsv $directory/final/
%mv $directory/output/*.final.fasta $directory/final/
%cp $directory$metadata $directory/final/
# Gzip and Move Intermediate Files
!pigz -p 10 $directory/output/*.qza
!pigz -p 10 $directory/output/*.qzv
%mv $directory/output/ $directory/intermediate_files
print("Your sequences have been successfully saved to directories 'final' and 'intermediate_files'")
###Output
Your sequences have been successfully saved to directories 'final' and 'intermediate_files'
|
_doc/notebooks/lectures/wines_multi.ipynb
|
###Markdown
Classification multi-classeOn cherche ร prรฉdire la note d'un vin avec un classifieur multi-classe.
###Code
%matplotlib inline
from papierstat.datasets import load_wines_dataset
df = load_wines_dataset()
X = df.drop(['quality', 'color'], axis=1)
y = df['quality']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
from sklearn.linear_model import LogisticRegression
clr = LogisticRegression()
clr.fit(X_train, y_train)
import numpy
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
###Output
_____no_output_____
###Markdown
On regarde la matrice de confusion.
###Code
from sklearn.metrics import confusion_matrix
import pandas
pandas.DataFrame(confusion_matrix(y_test, clr.predict(X_test)))
###Output
_____no_output_____
###Markdown
On l'affiche diffรฉremment avec le nom des classes.
###Code
conf = confusion_matrix(y_test, clr.predict(X_test))
dfconf = pandas.DataFrame(conf)
labels = list(clr.classes_)
if len(labels) < dfconf.shape[1]:
labels += [9] # La classe 9 est trรจs reprรฉsentรฉe, elle est parfois absente en train.
elif len(labels) > dfconf.shape[1]:
labels = labels[:dfconf.shape[1]] # ou l'inverse
dfconf.columns = labels
dfconf.index = labels
dfconf
###Output
_____no_output_____
###Markdown
Pas extraordinaire. On applique la stratรฉgie [OneVsRestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html).
###Code
from sklearn.multiclass import OneVsRestClassifier
clr = OneVsRestClassifier(LogisticRegression())
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
###Output
_____no_output_____
###Markdown
Le modรจle logistique rรฉgression multi-classe est รฉquivalent ร la stratรฉgie *OneVsRest*. Voyons l'autre.
###Code
from sklearn.multiclass import OneVsOneClassifier
clr = OneVsOneClassifier(LogisticRegression())
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
conf = confusion_matrix(y_test, clr.predict(X_test))
dfconf = pandas.DataFrame(conf)
labels = list(clr.classes_)
if len(labels) < dfconf.shape[1]:
labels += [9] # La classe 9 est trรจs reprรฉsentรฉe, elle est parfois absente en train.
elif len(labels) > dfconf.shape[1]:
labels = labels[:dfconf.shape[1]] # ou l'inverse
dfconf.columns = labels
dfconf.index = labels
dfconf
###Output
_____no_output_____
###Markdown
A peu prรจs pareil mais sans doute pas de maniรจre significative. Voyons avec un arbre de dรฉcision.
###Code
from sklearn.tree import DecisionTreeClassifier
clr = DecisionTreeClassifier()
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
###Output
_____no_output_____
###Markdown
Et avec [OneVsRestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html) :
###Code
clr = OneVsRestClassifier(DecisionTreeClassifier())
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
###Output
_____no_output_____
###Markdown
Et avec [OneVsOneClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsOneClassifier.html)
###Code
clr = OneVsOneClassifier(DecisionTreeClassifier())
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
###Output
_____no_output_____
###Markdown
Mieux.
###Code
from sklearn.ensemble import RandomForestClassifier
clr = RandomForestClassifier()
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
clr = OneVsRestClassifier(RandomForestClassifier())
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
###Output
_____no_output_____
###Markdown
Proche, il faut affiner avec une validation croisรฉe.
###Code
from sklearn.neural_network import MLPClassifier
clr = MLPClassifier(hidden_layer_sizes=30, max_iter=600)
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
clr = OneVsRestClassifier(MLPClassifier(hidden_layer_sizes=30, max_iter=600))
clr.fit(X_train, y_train)
numpy.mean(clr.predict(X_test).ravel() == y_test.ravel()) * 100
###Output
_____no_output_____
|
titanic dataset-5.ipynb
|
###Markdown
Paras Laul Project Name: Titanic Dataset The main aim of this project is to predict the survival of passengers based on various features which we will discuss as we go ahead All the Lifecycle In A Data Science Projectsยถ - Data Analysis- Feature Engineering- Feature Selection- Model Building- Model Deployment Data Analysis
###Code
## Data Analysis Phase
## MAin aim is to understand more about the data
#Importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df=pd.read_csv("titanic.csv") #Reading dataset using pandas library
## print the top5 records
df.head(10)
## print shape of dataset with rows and columns
df.shape
print("of passengers in original data:" +str(len(df.index)))
df.columns
###Output
_____no_output_____
###Markdown
Data Analysis
###Code
df.Survived.value_counts() #unique values in target variable
sns.countplot(x='Survived',data=df)
df[df.Sex=='male'].Survived.value_counts()
df[df.Sex=='female'].Survived.value_counts()
df[['Sex','Survived']].groupby('Sex',as_index=False).mean()
sns.countplot(x='Survived',hue='Sex',data=df)
df[['SibSp','Survived']].groupby('SibSp',as_index=False).mean()
df[['Pclass','Survived']].groupby('Pclass',as_index=False).mean()
sns.countplot(x='Survived',hue='Pclass',data=df)
df[['Parch','Survived']].groupby('Parch',as_index=False).mean()
sns.countplot(x='Survived',hue='Parch',data=df)
df.Age.agg(['max','min','mean','median'])
sns.countplot(x='Survived',hue='Age',data=df)
sns.countplot(x='Survived',hue='Embarked',data=df)
plt.hist('Age')
plt.show()
df.Fare.agg(['max','min','mean','median'])
df.sort_values
df[(df.Fare==0)].Survived.value_counts()
df.Pclass.value_counts()
df.Embarked.value_counts()
df[df.Embarked=='S'].Survived.value_counts()
###Output
_____no_output_____
###Markdown
Data wrangling Checking and removing the missing data.
###Code
df.isnull() #checking the null values(true->null,false->not null)
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Age, Cabin and Embarked columns has 177,687 and 2 missing values respectively.
###Code
sns.heatmap(df.isnull()) #heatmap for missing values
###Output
_____no_output_____
###Markdown
Feature engineering(Feature Scaling) We will be performing all the below steps in Feature Engineering- Missing values- Temporal variables- Categorical variables: remove rare labels- Standarise the values of the variables to the same range
###Code
#removing Cabin column
df=df.drop(['Cabin'],axis=1)
df.Embarked.value_counts()
df=df.fillna({'Age':df.Age.median(),
'Embarked':'S'}) #filling missing Age values with mean and missing Embarked values with most frequent value i.e S
df.isnull().sum()
###Output
_____no_output_____
###Markdown
No missing values
###Code
sns.heatmap(df.isnull()) #heatmap
###Output
_____no_output_____
###Markdown
Converting string values to categorical values
###Code
df1 = pd.get_dummies(df[['Sex', 'Embarked']])
df1
df = df[['Sex', 'Pclass', 'Fare','Embarked', 'Survived','Age']]
df
df2 = pd.concat((df, df1), axis='columns')
df2
df2.drop(['Sex', 'Embarked'], axis=1, inplace=True)
df2
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
df2['Pclass'] = scaler.fit_transform(df2[['Pclass']])
df2['Fare'] = scaler.fit_transform(df2[['Fare']])
df2['Age'] = scaler.fit_transform(df2[['Age']])
df2
###Output
_____no_output_____
###Markdown
Feature Selection : Select K Best Select features according to the k highest scores.
###Code
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
X = df2.drop("Survived",axis=1)
y = df2["Survived"]
mdlsel = SelectKBest(chi2, k=5)
mdlsel.fit(X,y)
ix = mdlsel.get_support()
data2 = pd.DataFrame(mdlsel.transform(X), columns = X.columns.values[ix]) # en iyi leri aldi... 7 tane...
data2.head(n=5)
###Output
_____no_output_____
###Markdown
Select From Model for Logistic Regression
###Code
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
X = df2.drop("Survived",axis=1)
y = df2["Survived"]
# Linear Model
linmdl = LogisticRegression()
linmdl.fit(X,y)
mdl = SelectFromModel(linmdl,prefit=True)
ix = mdl.get_support()
data3 = pd.DataFrame(mdl.transform(X), columns = X.columns.values[ix])
data3.head(n=5)
###Output
_____no_output_____
###Markdown
Recursive Feature Selection Given an external estimator that assigns weights to features (e.g., the coefficients of a linear model), the goal of recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features.
###Code
#last feature selection
from sklearn.feature_selection import RFE
mdl = RFE(linmdl,n_features_to_select=5)
mdl.fit(X,y)
ix = mdl.get_support()
data4 = pd.DataFrame(mdl.transform(X), columns = X.columns.values[ix])
data4.head(n=5)
df2
###Output
_____no_output_____
###Markdown
Splitting the model in Training and Testing
###Code
from sklearn.model_selection import train_test_split
X=df2.drop(['Survived'],axis=1)
y=df2['Survived']
X
y
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
###Output
_____no_output_____
###Markdown
Model Building Logistic Regression
###Code
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
from sklearn.linear_model import LogisticRegression
lg=LogisticRegression()
lg.fit(X_train,y_train)
y_pred=lg.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_pred) #accuracy score for Logistic Regression
###Output
_____no_output_____
###Markdown
Decision Tree
###Code
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
dc=DecisionTreeClassifier()
dc.fit(X_train,y_train)
y_p=dc.predict(X_test)
accuracy_score(y_test,y_p)
###Output
_____no_output_____
###Markdown
Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
rf=RandomForestClassifier()
rf.fit(X_train,y_train)
rf.fit(X_train,y_train)
y_pd=dc.predict(X_test)
accuracy_score(y_test,y_pd)
###Output
_____no_output_____
###Markdown
Support Vector Machine
###Code
from sklearn.svm import SVC
sv=SVC()
sv.fit(X_train,y_train)
y_pp=sv.predict(X_test)
accuracy_score(y_pp,y_test)
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning
###Code
# Create first pipeline for base without reducing features.
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
pipe = Pipeline([('classifier' , RandomForestClassifier())])
# pipe = Pipeline([('classifier', RandomForestClassifier())])
# Create param grid.
param_grid = [
{'classifier' : [LogisticRegression()],
'classifier__penalty' : ['l1', 'l2'],
'classifier__C' : np.logspace(-4, 4, 20),
'classifier__solver' : ['liblinear']},
{'classifier' : [RandomForestClassifier()],
'classifier__n_estimators' : range(1, 100),
'classifier__max_depth': range(5,15),
'classifier__criterion':['entropy', 'gini']}
]
# Create grid search object
clf = GridSearchCV(pipe, param_grid = param_grid, cv = 5, verbose=True,n_jobs=-1)
# Fit on data
best_clf = clf.fit(X_train, y_train)
clf.best_params_
clf.best_score_,clf.best_estimator_
y_pred=clf.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_pred)
###Output
_____no_output_____
###Markdown
Case of overfitting as training accuracy and test accuracy differ by a large amount
###Code
from sklearn.ensemble import RandomForestClassifier
random_forest = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=400, n_jobs=None,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
random_forest.fit(X_train, y_train)
Y_pred_rf = random_forest.predict(X_test)
random_forest.score(X_train,y_train)
acc_random_forest = round(random_forest.score(X_train, y_train) * 100, 2)
acc_random_forest
accuracy_score(Y_pred_rf,y_test)
###Output
_____no_output_____
###Markdown
Hyperparameter tuning of RandomForest
###Code
from sklearn.model_selection import GridSearchCV
model = RandomForestClassifier()
n_estim=range(100,1000,100)
## Search grid for optimal parameters
param_grid = {"n_estimators" :n_estim,"max_depth":range(1,35),'criterion':['entropy', 'gini']}
model_rf = GridSearchCV(model,param_grid = param_grid, cv=5, scoring="accuracy", n_jobs= None ,verbose = 1)
model_rf.fit(X_train,y_train)
# Best score
print(model_rf.best_score_)
#best estimator
model_rf.best_estimator_
###Output
_____no_output_____
###Markdown
Hyperparameter tuning of Decission Tree
###Code
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
params = {'max_leaf_nodes': list(range(2, 100)), 'min_samples_split': [2,3,4,5],'criterion':['gini','entropy'],'max_depth':[4,5,6,7,8,9,10,11,12,15,20,30,40,50,70,90,120,150]}
grid_search_cv = GridSearchCV(DecisionTreeClassifier(random_state=42), params, verbose=1, cv=3)
grid_search_cv.fit(X_train, y_train)
grid_search_cv.best_score_
###Output
_____no_output_____
|
paradojaInspeccion.ipynb
|
###Markdown
La paradoja de la InspecciรณnBasado en la presentaciรณn de Allen Downey https://www.youtube.com/watch?v=cXWTHfvycyM
###Code
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Set the random seed so we get the same results every time
import numpy as np
np.random.seed(17)
import matplotlib.pyplot as plt
# FUncion para mejorar el grafico.
def decorate(**options):
"""Decorate the current axes."""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
plt.legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Cantidad de hijosImportando datos del form
###Code
import pandas as pd
sheet_id = '1LE-PiXfSyzeMejdT4dwY7daE-RWMJZEmwEoG6HcA2qo'
url = f'https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv'
df = pd.read_csv(url)
df.columns = ['Horario', 'Numero de hijos']
df.head()
###Output
_____no_output_____
###Markdown
Obtener la columna
###Code
cantidadHijos = df['Numero de hijos'].copy()
###Output
_____no_output_____
###Markdown
Graficar la distribuciรณn
###Code
from empiricaldist import Pmf
pmf = Pmf.from_seq(cantidadHijos, normalize=False)
pmf.bar(label='Curso SC')
decorate(xlabel='tamanho',
ylabel='Numero de hijo',
title='Distribuciรณn de cantidad de hijos')
###Output
_____no_output_____
###Markdown
Caso alguien este bromenado
###Code
cantidadHijos[cantidadHijos > 15] = np.nan
###Output
_____no_output_____
###Markdown
La media de los datos estรก destinada a estimar la cantidad promedio de hijos de las familias en Paraguay?ยฟPero lo hace?
###Code
cantidadHijos.mean()
###Output
_____no_output_____
###Markdown
Class sizeAquรญ estรกn los datos que resumen la distribuciรณn de la cantidad de alumnos en las clases de pregrado en la Universidad de Purdue en 2013-14.
###Code
cantidadAlumnos = [(1, 1),
(2, 9),
(10, 19),
(20, 29),
(30, 39),
(40, 49),
(50, 99),
(100, 300)]
frecuencia = [138, 635, 1788, 1979, 796, 354, 487, 333]
###Output
_____no_output_____
###Markdown
Genero una muestra de esta distribuciรณn, asumiendo una distribuciรณn uniforme en cada rango y un lรญmite superior de 300.
###Code
def generate_sample(sizes, counts):
"""Generate a sample from a distribution.
sizes: sequence of (low, high) pairs
counts: sequence of integers
returns: NumPy array
"""
t = []
for (low, high), count in zip(sizes, counts):
print(low, high, count)
sample = np.random.randint(low, high+1, count)
t.extend(sample)
return np.array(t)
###Output
_____no_output_____
###Markdown
La muestra "no sesgada" es como la ve la universidad, y cada clase tiene la misma probabilidad de estar en la muestra.
###Code
sinSesgo = generate_sample(cantidadAlumnos, frecuencia)
###Output
1 1 138
2 9 635
10 19 1788
20 29 1979
30 39 796
40 49 354
50 99 487
100 300 333
###Markdown
Para generar una muestra sesgada, utilizamos los valores mismos como ponderaciones y volvemos a muestrear con reemplazo.
###Code
def resample_weighted(sample, weights):
"""Generate a biased sample.
sample: NumPy array
weights: NumPy array
returns: NumPy array
"""
n = len(sample)
p = weights / np.sum(weights)
return np.random.choice(sample, n, p=p)
sesgada = resample_weighted(sinSesgo, sinSesgo)
###Output
_____no_output_____
###Markdown
Para trazar la distribuciรณn, utilizo KDE para estimar la funciรณn de densidad, luego la evalรบo sobre la secuencia dada de "xs".
###Code
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
def kdeplot(sample, xs, label=None, **options):
"""Use KDE to plot the density function.
sample: NumPy array
xs: NumPy array
label: string
"""
density = gaussian_kde(sample, **options).evaluate(xs)
plt.plot(xs, density, label=label)
decorate(ylabel='Pdf')
###Output
_____no_output_____
###Markdown
La siguiente grรกfica muestra la distribuciรณn del tamaรฑo de la clase tal como la ve el Decano y una muestra de estudiantes.
###Code
xs = np.arange(1, 300)
kdeplot(sinSesgo, xs, 'Registrado en la universidad')
decorate(xlabel='Cantidad de Alumnos',
title='Distribucion')
plt.savefig('class_size0.png', dpi=150)
xs = np.arange(1, 300)
kdeplot(sinSesgo, xs, 'Registrado en la universidad')
kdeplot(sesgada, xs, 'Reportado en una encuesta')
decorate(xlabel='Cantidad de Alumnos',
title='Distribution')
plt.savefig('class_size1.png', dpi=150)
###Output
_____no_output_____
###Markdown
Here are the means of the unbiased and biased distributions.
###Code
np.mean(sinSesgo)
np.mean(sesgada)
muestra= np.random.choice(sesgada, 500)
rePonderada= resample_weighted(muestra, 1/muestra)
xs = np.arange(1, 300)
kdeplot(sinSesgo, xs, 'Registrado en la universidad')
kdeplot(muestra, xs, 'Reportado en una encuesta')
kdeplot(rePonderada, xs, 'Reponderado de la encuesta')
decorate(xlabel='Cantidad de Alumnos',
title='Distribucion')
###Output
_____no_output_____
|
nb/data_preprocessing.ipynb
|
###Markdown
Data preprocessingIn this notebook the data stored in `coinmarketcap.csv` is preprocessed.
###Code
# import modules
import os
import pandas as pd
###Output
_____no_output_____
###Markdown
Constants
###Code
# directory of this projects root, jupyter must be started accordingly
ROOT_DIR = os.path.abspath(os.path.join(os.getcwd(), ".."))
# directory for the cache
CACHE_DIR = os.path.join(ROOT_DIR, "cache")
# resulting csv file holding **all** data
DATA_CSV = os.path.join(ROOT_DIR, "coinmarketcap.csv")
###Output
_____no_output_____
###Markdown
Load and preprocess data Load `coinmarketcap.csv`
###Code
def loadCsv(path):
"""
Load CSV specified by `path` as pandas dataframe.
"""
return pd.read_csv(path)
###Output
_____no_output_____
###Markdown
Require a currency to have at least `minSamples`
###Code
def filterMinSamples(df, minSamples):
"""
Filter dataframe, remove currencies not having
at least `minSamples`
"""
grouped = df.groupby(["slug"]).size()
sampleFilter = grouped[grouped >= minSamples]
return df[df.slug.isin(sampleFilter.index)]
###Output
_____no_output_____
###Markdown
Require a currency to have at least a volume of `minVolume` and a market capitalisation of `minMarketCap`
###Code
def filterMinVolumeAndMinMarketCap(df, minVolume, minMarketCap):
"""
Filter dataframe, remove currencies not having
`minVolume` and `minMarketCap`
"""
names = df[(df.volume >= minVolume) &
(df["marketcap"] >= minMarketCap)].slug.unique()
return df[df.slug.isin(names)]
###Output
_____no_output_____
###Markdown
Fill missing samplesHere we look for missing dates/samples for each currency and interpolate.
###Code
def fillMissingSamples(df):
"""
Fill missing samples in dataframe.
Each currency is checked if the time serie complete
(if dates are missing).
If the serie is not complete, the missing values are
interpolated.
"""
# count filled samples
cnt = 0
grouped = df.groupby(["slug"], sort=False)
groups = []
# for eac currency check the time serie
for slug, group in grouped:
# assure we have no duplicates
assert(len(group.index) == len(group.index.unique()))
name = group.name.unique()[0]
# convert dates to datetime, may have missing dates
index = pd.to_datetime(group["date"], format="%Y%m%d")
# set index to datetime time serie
group.set_index(index, inplace=True)
# drop the date row
group = group.drop("date", axis=1)
# get the first and last date
head = group.iloc[0].head(1).name
tail = group.iloc[-1].head(1).name
# create a datetime index holding continous dates
# there are no missing dates in this index
newIndex = pd.date_range(head, tail)
# apply index to currency
group = group.reindex(newIndex)
# convert index of datetime to string representation
date = group.index.strftime("%Y%m%d")
# insert continous 'date' column
group.insert(0, "date", date)
# check if values are missing
if group.isnull().values.any():
group.slug = slug
group.name = name
# update counter
cnt += len(group[group.isnull().any(axis=1)])
# fill missing values
group.interpolate(inplace=True)
# here there should not be any missing values
assert(not group.isnull().values.any())
# drop the index, so we have the same format
# as the original dataframe
group = group.reset_index(drop=True)
groups.append(group)
print("Samples filled: {}".format(cnt))
# concatenate all groups together to a new data frame
return pd.concat(groups)
###Output
_____no_output_____
###Markdown
Put all together into a nice function
###Code
def loadCoinMarketCap(
# require at least a year
minSamples=365,
# require a volume of at least 1 million
minVolume=1000*1000,
# require a market capitalisation of at least 1 million
minMarketCap=1000*1000,
# by default, fill missing sample
fillMissingDates=True,
):
df = pd.read_csv(DATA_CSV)
df = filterMinSamples(df, minSamples)
df = filterMinVolumeAndMinMarketCap(df, minVolume, minMarketCap)
# fill missing values
if fillMissingDates:
df = fillMissingSamples(df)
# use date as index
index = pd.to_datetime(df["date"], format="%Y%m%d")
df.set_index(index, inplace=True)
df = df.drop("date", axis=1)
print("Loaded {} currencies, {} samples.".format(
len(df.slug.unique()), len(df)))
return df
###Output
_____no_output_____
###Markdown
Run the code
###Code
df = loadCoinMarketCap()
df.head()
###Output
Samples filled: 3116
Loaded 239 currencies, 259774 samples.
|
0.16/_downloads/plot_rap_music.ipynb
|
###Markdown
Compute Rap-Music on evoked dataCompute a Recursively Applied and Projected MUltiple Signal Classification(RAP-MUSIC) [1]_ on evoked data.References----------.. [1] J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively applied and projected (RAP) MUSIC. Trans. Sig. Proc. 47, 2 (February 1999), 332-340. DOI=10.1109/78.740118 https://doi.org/10.1109/78.740118
###Code
# Author: Yousra Bekhti <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.beamformer import rap_music
from mne.viz import plot_dipole_locations, plot_dipole_amplitudes
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
# Read the evoked response and crop it
condition = 'Right Auditory'
evoked = mne.read_evokeds(evoked_fname, condition=condition,
baseline=(None, 0))
evoked.crop(tmin=0.05, tmax=0.15) # select N100
evoked.pick_types(meg=True, eeg=False)
# Read the forward solution
forward = mne.read_forward_solution(fwd_fname)
# Read noise covariance matrix
noise_cov = mne.read_cov(cov_fname)
dipoles, residual = rap_music(evoked, forward, noise_cov, n_dipoles=2,
return_residual=True, verbose=True)
trans = forward['mri_head_t']
plot_dipole_locations(dipoles, trans, 'sample', subjects_dir=subjects_dir)
plot_dipole_amplitudes(dipoles)
# Plot the evoked data and the residual.
evoked.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]),
time_unit='s')
residual.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]),
time_unit='s')
###Output
_____no_output_____
|
.ipynb_checkpoints/axionDM3_notebook_512_egy-checkpoint.ipynb
|
###Markdown
Set Axion Mass
###Code
axion_mass = 1e-22 *1.783e-36 #kg
# Set units for soliton parameters
s_mass_unit = '' #Accepted units: 'kg', 'solar_masses', 'M_solar_masses', and '' for dimensionless units as used in [1]
s_position_unit = '' #Accepted units: 'm', 'km', 'pc', 'kpc', 'Mpc', 'ly', and '' for dimensionless units as used in [1]
s_velocity_unit = '' #Accepted units: 'm/s', 'km/s', 'km/h', and '' for dimensionless units as used in [1]
###Output
_____no_output_____
###Markdown
Set Simulation Parameters
###Code
# Set number of threads to target
num_threads = multiprocessing.cpu_count()
print(num_threads)
# Set box size and resolution
length = 6 #1 code unit is ~38 kpc x (1e-22/m_a)^0.5
length_units = '' #Accepted units: 'm', 'km', 'pc', 'kpc', 'Mpc', 'ly', and '' for dimensionless units as used in [1]
resol=64 #Note that for resol 128, the largest stable soliton mass is ~ 50 in code units
# Set duration of simulation in given units
duration = 6.0 #1 code unit is ~70 Gyr (no rescaling with m_a)
duration_units = '' #Accepted units: 's', 'yr', 'kyr', 'Myr', and '' for dimensionless units as used in [1]
start_time = 0.0 #Should be given in the same units as duration.
#central_mass = 2000.0 #1 code unit is ~2.3e6 M_sol (1e-22/m_a)^1.5
central_mass = 0. #Give this parameter in the SAME units as the soliton mass unit. i.e. units must match with s_mass_unit
# Set options for what to save, where to save, and in what format to save it
#Data to save
save_rho = False
save_psi = False
save_plane = True
save_energies = True
save_stability = False
save_line = True
#Formats to save to
hdf5 = False
npz = False
npy = True
save_number = 10 # Choose number of 'frames' to save. Note that, depending on resolution, this could require significant disk space.
save_path = 'TestOutput' # Set output directory
save_options = [save_rho,save_psi,save_plane,save_energies,save_stability,save_line]
###Output
4
###Markdown
Set Initial Conditions:
###Code
m = 22 #1 code unit is ~2.3e6 M_sol (1e-22/m_a)^1.5
r = 1 #1 code unit is ~38 kpc x (1e-22/m_a)^0.5
#Soliton parameters are mass, position, velocity and phase (radians)
soliton4 = [m, [r,0,0], [0,1.8,0], 0]
soliton5 = [m, [-r,0,0], [0,-1.8,0], 0]
solitons = [soliton4, soliton5]
#Note that the output files are always named according to the mass and radius of the first soliton in this list
###Output
_____no_output_____
###Markdown
Run:
###Code
#evolve_jit=numba.jit(axionDM3.evolve)
#evolve_jit(central_mass, num_threads, length, length_units, resol, duration, duration_units, save_number, save_options, save_path, npz, npy, txt, hdf5, s_mass_unit, s_position_unit, s_velocity_unit, solitons)
axionDM3.evolve(central_mass, num_threads, length, length_units, resol, duration, duration_units, save_number, save_options, save_path, npz, npy, hdf5, s_mass_unit, s_position_unit, s_velocity_unit, solitons, start_time)
###Output
Complete.
###Markdown
Visualisations:
###Code
output_animated = 3
# 0 for all contours plotted on a single graph, 1 for an animation in time, 2 for energy lists
#over time, 3 for line along axis of symmetry, 4 for Egp plane over time
save_plots = 1
# 0 to display contours without saving, 1 to save as well.
with open('{}{}'.format(save_path, '/timestamp.txt'), 'r') as timestamp:
ts = timestamp.read()
loc = save_path + '/' + ts
if output_animated == 2:
egylist = np.load('{}{}'.format(loc, '/egylist.npy')).tolist()
egpcmlist = np.load('{}{}'.format(loc, '/egpcmlist.npy')).tolist()
egpsilist = np.load('{}{}'.format(loc, '/egpsilist.npy')).tolist()
ekandqlist = np.load('{}{}'.format(loc, '/ekandqlist.npy')).tolist()
masslist = np.load('{}{}'.format(loc, '/masslist.npy')).tolist()
plt.plot(egylist,label='Total')
plt.plot(egpcmlist,label='$E_{GP}$ (central potential)')
plt.plot(egpsilist,label='$E_{GP}$ (self-interaction)')
plt.plot(ekandqlist,label='$E_{K}+E_{Q}$')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.3), frameon=False, ncol=2)
if save_plots == 1:
plt.savefig('./Visualisations/energy_diagram.eps', format='eps', dpi=1000)
plt.show()
if output_animated == 0:
for x in np.arange(0,save_number+1,1):
if x == 0:
plt.contour(np.load('{}{}{}{}'.format(loc, '/plane_#',x,'.npy')),colors='k')
if x in np.arange(1,save_number+1,1):
plt.contour(np.load('{}{}{}{}'.format(loc, '/plane_#',x,'.npy')))
if save_plots == 1:
plt.savefig('{}'.format('./Visualisations/Static_contours.eps'), format='eps', dpi=1000)
if output_animated in (1,3,4):
import warnings
warnings.filterwarnings("ignore")
plt.ioff()
fig,ax = plt.subplots(figsize=(20, 10))
plt.axes().set_aspect('equal')
data = []
for x in np.arange(0,save_number+1,1):
if output_animated == 1:
data.append(np.load('{}{}{}{}'.format(loc, '/plane_#', x, '.npy')))
if output_animated == 3:
data.append(np.load('{}{}{}{}'.format(loc, '/line_#', x, '.npy')))
if output_animated == 4:
data.append(np.load('{}{}{}{}'.format(loc, '/egp_plane_#', x ,'.npy')))
def animate(i):
plt.clf()
if output_animated in (1, 4):
plot = plt.contour(data[i])
plt.axes().set_aspect('equal')
plt.axes().get_xaxis().set_ticks([])
plt.axes().get_yaxis().set_ticks([])
if output_animated == 1:
plt.title('Mass Density - Plane')
if output_animated == 4:
plt.title('Gravitational Potential Energy Density')
if output_animated == 3:
plt.clf()
plot = plt.plot(data[i])
plt.title('Mass Density - Line')
plt.axes().set_ylim(0,max(data[0]))
interval = 0.15 #in seconds
ani = matplotlib.animation.FuncAnimation(fig,animate,save_number+1,interval=interval*1e+3,blit=False)
from IPython.display import HTML
animated_plot = HTML(ani.to_jshtml())
if save_plots == 1:
save_html = animated_plot.data
if output_animated == 1:
with open('./Visualisations/plane_animation.html', 'w') as f:
f.write(save_html)
if output_animated == 3:
with open('./Visualisations/line_animation.html', 'w') as f:
f.write(save_html)
if output_animated == 4:
with open('./Visualisations/Egp_animation.html', 'w') as f:
f.write(save_html)
display(animated_plot)
plt.close()
###Output
_____no_output_____
|
05-HSV Color Space, Balloons.ipynb
|
###Markdown
HSV Color Space, Balloons Import resources and display image
###Code
# import numpy as np
# import matplotlib.pyplot as plt
# import cv2
####################################
import numpy as np
import matplotlib.pyplot as plt
import cv2
# %matplotlib inline
# # Read in the image
# image = cv2.imread('images/water_balloons.jpg')
# # Change color to RGB (from BGR)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plt.imshow(image)
####################################
%matplotlib inline
image = cv2.imread('images/water_balloons.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
###Output
_____no_output_____
###Markdown
Plot color channels
###Code
# # RGB channels
# r = image[:,:,0]
# g = image[:,:,1]
# b = image[:,:,2]
# f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
# ax1.set_title('Red')
# ax1.imshow(r, cmap='gray')
# ax2.set_title('Green')
# ax2.imshow(g, cmap='gray')
# ax3.set_title('Blue')
# ax3.imshow(b, cmap='gray')
################################################################
# RGB Channels
r = image[:,:,0]
g = image[:,:,1]
b = image[:,:,2]
f, (ax1,ax2,ax3) = plt.subplots(1,3, figsize=(20,10))
ax1.set_title('Red')
ax1.imshow(r, cmap = 'gray')
ax2.set_title('Green')
ax2.imshow(g, cmap = 'gray')
ax3.set_title('Blue')
ax3.imshow(b, cmap = 'gray')
# yang punya warna merah tinggi adalah yang punya pixel value lebih tinggi
# # Convert from RGB to HSV
# hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# # HSV channels
# h = hsv[:,:,0]
# s = hsv[:,:,1]
# v = hsv[:,:,2]
# f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
# ax1.set_title('Hue')
# ax1.imshow(h, cmap='gray')
# ax2.set_title('Saturation')
# ax2.imshow(s, cmap='gray')
# ax3.set_title('Value')
# ax3.imshow(v, cmap='gray')
##############################################################
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
h = hsv[:,:,0]
s = hsv[:,:,1]
v = hsv[:,:,2]
f, (ax1,ax2,ax3) = plt.subplots(1,3, figsize = (20,10))
ax1.set_title('Hue')
ax1.imshow(h, cmap = 'gray')
ax2.set_title('Saturation')
ax2.imshow(s, cmap = 'gray')
ax3.set_title('Value')
ax3.imshow(v, cmap = 'gray')
###Output
_____no_output_____
###Markdown
Define pink and hue selection thresholds
###Code
# # Define our color selection criteria in HSV values
# lower_hue = np.array([160,0,0])
# upper_hue = np.array([180,255,255])
####################################################
# range of Hue is only between 0-180
lower_hue = np.array([160,0,0])
upper_hue = np.array([180,255,255])
# # Define our color selection criteria in RGB values
# lower_pink = np.array([180,0,100])
# upper_pink = np.array([255,255,230])
###################################################
lower_pink = np.array([180,0,100])
upper_pink = np.array([255,255,230])
###Output
_____no_output_____
###Markdown
Mask the image
###Code
# # Define the masked area in RGB space
# mask_rgb = cv2.inRange(image, lower_pink, upper_pink)
# # mask the image
# masked_image = np.copy(image)
# masked_image[mask_rgb==0] = [0,0,0]
# # Vizualize the mask
# plt.imshow(masked_image)
#########################################################
mask_rgb = cv2.inRange(image, lower_pink, upper_pink)
masked_image_rgb = np.copy(image)
masked_image_rgb[mask_rgb == 0] = [0,0,0]
plt.imshow(masked_image_rgb)
# # Now try HSV!
# # Define the masked area in HSV space
# mask_hsv = cv2.inRange(hsv, lower_hue, upper_hue)
# # mask the image
# masked_image = np.copy(image)
# masked_image[mask_hsv==0] = [0,0,0]
# # Vizualize the mask
# plt.imshow(masked_image)
#####################################################
mask_hsv = cv2.inRange(hsv, lower_hue, upper_hue)
masked_image_hsv = np.copy(image)
masked_image_hsv[mask_hsv == 0] = [0,0,0]
plt.imshow(masked_image_hsv)
f, (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (20,10))
ax1.set_title('pink_RGB')
ax1.imshow(masked_image_rgb)
ax2.set_title('The Original')
ax2.imshow(image)
ax3.set_title('pink_HUE')
ax3.imshow(masked_image_hsv)
###Output
_____no_output_____
|
tutorials/module4-model/m4a_approximate_continuous_normalizing_flows.ipynb
|
###Markdown
Density Estimation with FFJORDs Free-form Jacobian of Reversible Dynamics (FFJORD) is a continuous normalizing flow (CNF) variants proposed in [Grathwohl et al., 2018](https://arxiv.org/pdf/1810.01367.pdf). The core novelty is proposing utilization of stochastic trace estimators to improve scalability of the Jacobian trace computation ($O(1)$ calls to autograd instead of $O(D)$).
###Code
from torchdyn.core import NeuralODE
from torchdyn.models import CNF
from torchdyn.nn import DataControl, DepthCat, Augmenter
from torchdyn.datasets import *
from torchdyn.utils import *
# quick run for automated notebook validation
dry_run = False
###Output
_____no_output_____
###Markdown
This time, we use a fun dataset: the `DiffEqML` logo.
###Code
data = ToyDataset()
n_samples = 1 << 14
n_gaussians = 7
X, yn = data.generate(n_samples, 'diffeqml', noise=5e-2)
X = (X - X.mean())/X.std()
import matplotlib.pyplot as plt
plt.figure(figsize=(3, 3))
plt.scatter(X[:,0], X[:,1], c='olive', alpha=0.3, s=1)
import torch
import torch.utils.data as data
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
X_train = torch.Tensor(X).to(device)
train = data.TensorDataset(X_train)
trainloader = data.DataLoader(train, batch_size=1024, shuffle=True)
###Output
_____no_output_____
###Markdown
The FFJORD model In `torchdyn`, we decouple CNFs from the Jacobian trace estimators they use. This allows us to easily implement variants without alternations to the original module. Indeed, we can simply define the Hutchinson stochastic estimator separately as follows
###Code
def hutch_trace(x_out, x_in, noise=None, **kwargs):
"""Hutchinson's trace Jacobian estimator, O(1) call to autograd"""
jvp = torch.autograd.grad(x_out, x_in, noise, create_graph=True)[0]
trJ = torch.einsum('bi,bi->b', jvp, noise)
return trJ
###Output
_____no_output_____
###Markdown
And then instantiate a CNF as before.
###Code
f = nn.Sequential(
nn.Linear(2, 64),
nn.Softplus(),
nn.Linear(64, 64),
nn.Softplus(),
nn.Linear(64, 64),
nn.Softplus(),
nn.Linear(64, 2),
)
from torch.distributions import MultivariateNormal, Uniform, TransformedDistribution, SigmoidTransform, Categorical
prior = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
# stochastic estimators require a definition of a distribution where "noise" vectors are sampled from
noise_dist = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
# cnf wraps the net as with other energy models
cnf = CNF(f, trace_estimator=hutch_trace, noise_dist=noise_dist)
nde = NeuralODE(cnf, solver='dopri5', s_span=torch.linspace(0, 1, 2), sensitivity='adjoint', atol=1e-4, rtol=1e-4)
###Output
_____no_output_____
###Markdown
Augmenter takes care of setting up the additional scalar dimension for the divergence dynamics.The `DEFunc` wrapper (implicitly defined when passing `f` to the NeuralDE) will ensure compatibility of depth-concatenation and data-control with the divergence dimension.Utilizing additional augmented dimensions is also compatible, as only the first will be used for the jacobian trace.
###Code
model = nn.Sequential(Augmenter(augment_idx=1, augment_dims=1),
nde).to(device)
###Output
_____no_output_____
###Markdown
Standard Learner. It is often useful to visualize samples during normalizing flow training, in order to identify issues quickly and stop runs that are not promising. For an example of how to log images using `PyTorch Lightning` and `Wandb`, refer to torchdyn's `benchmark` notebooks.
###Code
import pytorch_lightning as pl
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module):
super().__init__()
self.model = model
self.iters = 0
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
self.iters += 1
x = batch[0]
xtrJ = self.model(x)
logprob = prior.log_prob(xtrJ[:,1:]).to(x) - xtrJ[:,0] # logp(z_S) = logp(z_0) - \int_0^S trJ
loss = -torch.mean(logprob)
nde.nfe = 0
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters(), lr=2e-3, weight_decay=1e-5)
def train_dataloader(self):
return trainloader
learn = Learner(model)
trainer = pl.Trainer(max_epochs=600)
trainer.fit(learn);
###Output
GPU available: True, used: False
TPU available: False, using: 0 TPU cores
| Name | Type | Params
-------------------------------------
0 | model | Sequential | 8 K
###Markdown
Visualizing the Samples Sampling from CNFs is easy: we query the prior latent normal and then pass the samples through the `z -> x` CNF flow. To reverse the flow, we flip `s_span`:
###Code
sample = prior.sample(torch.Size([1 << 14]))
# integrating from 1 to 0
model[1].s_span = torch.linspace(1, 0, 2)
new_x = model(sample).cpu().detach()
###Output
_____no_output_____
###Markdown
We then plot, samples, flows and density like so:
###Code
plt.figure(figsize=(12, 4))
plt.subplot(121)
plt.scatter(new_x[:,1], new_x[:,2], s=2.3, alpha=0.2, linewidths=0.1, c='blue', edgecolors='black')
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.subplot(122)
plt.scatter(X[:,0], X[:,1], s=3.3, alpha=0.2, c='red', linewidths=0.1, edgecolors='black')
plt.xlim(-2, 2)
plt.ylim(-2, 2)
###Output
_____no_output_____
###Markdown
We plot the flows from prior to data distribution:
###Code
traj = model[1].trajectory(Augmenter(1, 1)(sample.to(device)), s_span=torch.linspace(1,0,100)).detach().cpu() ; sample = sample.cpu()
traj = traj[:, :, 1:] # scrapping first dimension := jacobian trace
n = 2000
plt.figure(figsize=(6,6))
plt.scatter(sample[:n,0], sample[:n,1], s=10, alpha=0.8, c='black')
plt.scatter(traj[:,:n,0], traj[:,:n,1], s=0.2, alpha=0.2, c='olive')
plt.scatter(traj[-1,:n,0], traj[-1,:n,1], s=4, alpha=1, c='blue')
plt.legend(['Prior sample z(S)', 'Flow', 'z(0)'])
###Output
_____no_output_____
|
docs/source/metadata_tutorial.ipynb
|
###Markdown
=====================Working with Metadata=====================LibCST handles node metadata in a somewhat unusual manner in order to maintain the immutability of the tree. See :doc:`Metadata ` for the complete documentation. Providing Metadata==================While it's possible to write visitors that gather metadata from a tree ad hoc, using the provider interface gives you the advantage of being able to use dependency declaration to automatically run your providers in other visitors and type safety. For most cases, you'll want to extend :class:`~libcst.BatchableMetadataProvider` as providers that extend from that class can be resolved more efficiently in batches.Here's an example of a simple metadata provider that marks :class:`~libcst.Name` nodes that are function parameters:
###Code
import sys
sys.path.append("../../")
import libcst as cst
class IsParamProvider(cst.BatchableMetadataProvider[bool]):
"""
Marks Name nodes found as a parameter to a function.
"""
def __init__(self) -> None:
super().__init__()
self.is_param = False
def visit_Param(self, node: cst.Param) -> None:
# Mark the child Name node as a parameter
self.set_metadata(node.name, True)
def visit_Name(self, node: cst.Name) -> None:
# Mark all other Name nodes as not parameters
if not self.get_metadata(type(self), node, False):
self.set_metadata(node, False)
###Output
_____no_output_____
###Markdown
Line and Column Metadata------------------------LibCST ships with two built-in providers for line and column metadata. See :ref:`Position Metadata` for more information.Accessing Metadata==================Once you have a provider, the metadata interface gives you two primary ways of working with your providers. The first is using the resolve methods provided by :class:`~libcst.MetadataWrapper` and the second is through declaring metadata dependencies on a :class:`~libcst.CSTTransformer` or :class:`~libcst.CSTVisitor`.Using the :class:`~libcst.MetadataWrapper`------------------------------------------The metadata wrapper class provides a way to associate metadata with a module as well as a simple interface to run providers. Here's an example of using a wrapper with the provider we just wrote:
###Code
module = cst.parse_module("x")
wrapper = cst.MetadataWrapper(module)
isparam = wrapper.resolve(IsParamProvider)
x_name_node = wrapper.module.body[0].body[0].value
print(isparam[x_name_node]) # should print False
###Output
_____no_output_____
###Markdown
Using Dependency Declaration----------------------------The visitors that ship with LibCST can declare metadata providers as dependencies that will be run automatically when visited by a wrapper. Here is a visitor that prints all names that are function parameters.
###Code
from libcst.metadata import PositionProvider
class ParamPrinter(cst.CSTVisitor):
METADATA_DEPENDENCIES = (IsParamProvider, PositionProvider,)
def visit_Name(self, node: cst.Name) -> None:
# Only print out names that are parameters
if self.get_metadata(IsParamProvider, node):
pos = self.get_metadata(PositionProvider, node).start
print(f"{node.value} found at line {pos.line}, column {pos.column}")
module = cst.parse_module("def foo(x):\n y = 1\n return x + y")
wrapper = cst.MetadataWrapper(module)
result = wrapper.visit(ParamPrinter())
###Output
_____no_output_____
###Markdown
=====================Working with Metadata=====================LibCST handles node metadata in a somewhat unusual manner in order to maintain the immutability of the tree. See :doc:`Metadata ` for the complete documentation. Providing Metadata==================While it's possible to write visitors that gather metadata from a tree ad hoc, using the provider interface gives you the advantage of being able to use dependency declaration to automatically run your providers in other visitors and type safety. For most cases, you'll want to extend :class:`~libcst.BatchableMetadataProvider` as providers that extend from that class can be resolved more efficiently in batches.Here's an example of a simple metadata provider that marks :class:`~libcst.Name` nodes that are function parameters:
###Code
import sys
sys.path.append("../../")
import libcst as cst
class IsParamProvider(cst.BatchableMetadataProvider[bool]):
"""
Marks Name nodes found as a parameter to a function.
"""
def __init__(self) -> None:
super().__init__()
self.is_param = False
def visit_Param(self, node: cst.Param) -> None:
# Mark the child Name node as a parameter
self.set_metadata(node.name, True)
def visit_Name(self, node: cst.Name) -> None:
# Mark all other Name nodes as not parameters
if not self.get_metadata(type(self), node, False):
self.set_metadata(node, False)
###Output
_____no_output_____
###Markdown
Line and Column Metadata------------------------LibCST ships with two built-in providers for line and column metadata. See :ref:`Position Metadata` for more information.Accessing Metadata==================Once you have a provider, the metadata interface gives you two primary ways of working with your providers. The first is using the resolve methods provided by :class:`~libcst.MetadataWrapper` and the second is through declaring metadata dependencies on a :class:`~libcst.CSTTransformer` or :class:`~libcst.CSTVisitor`.Using the :class:`~libcst.MetadataWrapper`------------------------------------------The metadata wrapper class provides a way to associate metadata with a module as well as a simple interface to run providers. Here's an example of using a wrapper with the provider we just wrote:
###Code
module = cst.parse_module("x")
wrapper = cst.MetadataWrapper(module)
isparam = wrapper.resolve(IsParamProvider)
x_name_node = wrapper.module.body[0].body[0].value
print(isparam[x_name_node]) # should print False
###Output
_____no_output_____
###Markdown
Using Dependency Declaration----------------------------The visitors that ship with LibCST can declare metadata providers as dependencies that will be run automatically when visited by a wrapper. Here is a visitor that prints all names that are function parameters.
###Code
from libcst.metadata import SyntacticPositionProvider
class ParamPrinter(cst.CSTVisitor):
METADATA_DEPENDENCIES = (IsParamProvider, SyntacticPositionProvider,)
def visit_Name(self, node: cst.Name) -> None:
# Only print out names that are parameters
if self.get_metadata(IsParamProvider, node):
pos = self.get_metadata(SyntacticPositionProvider, node).start
print(f"{node.value} found at line {pos.line}, column {pos.column}")
module = cst.parse_module("def foo(x):\n y = 1\n return x + y")
wrapper = cst.MetadataWrapper(module)
result = wrapper.visit(ParamPrinter())
###Output
_____no_output_____
###Markdown
=====================Working with Metadata=====================LibCST handles node metadata in a somewhat unusal manner in order to maintain the immutability of the tree. See :doc:`Metadata ` for the complete documentation. Providing Metadata==================While it's possible to write visitors that gather metadata from a tree ad hoc, using the provider interface gives you the advantage of being able to use dependency declaration to automatically run your providers in other visitors and type safety. For most cases, you'll want to extend :class:`~libcst.BatchableMetadataProvider` as providers that extend from that class can be resolved more efficiently in batches.Here's an example of a simple metadata provider that marks :class:`~libcst.Name` nodes that are function parameters:
###Code
import libcst as cst
class IsParamProvider(cst.BatchableMetadataProvider[bool]):
"""
Marks Name nodes found as a parameter to a function.
"""
def __init__(self):
super().__init__()
self.is_param = False
def visit_Param(self, node: cst.Param) -> None:
# Mark the child Name node as a parameter
self.set_metadata(node.name, True)
def visit_Name(self, node: cst.Name) -> None:
# Mark all other Name nodes as not parameters
if not self.get_metadata(type(self), node, False):
self.set_metadata(node, False)
###Output
_____no_output_____
###Markdown
Line and Column Metadata------------------------LibCST ships with two built-in providers for line and column metadata. See :ref:`Position Metadata` for more information.Accessing Metadata==================Once you have a provider, the metadata interface gives you two primary ways of working with your providers. The first is using the resolve methods provided by :class:`~libcst.MetadataWrapper` and the second is through declaring metadata dependencies on a :class:`~libcst.CSTTransformer` or :class:`~libcst.CSTVisitor`.Using the :class:`~libcst.MetadataWrapper`------------------------------------------The metadata wrapper class provides a way to associate metadata with a module as well as a simple inteface to run providers. Here's an example of using a wrapper with the provider we just wrote:
###Code
module = cst.parse_module("x")
wrapper = cst.MetadataWrapper(module)
isparam = wrapper.resolve(IsParamProvider)
x_name_node = wrapper.module.body[0].body[0].value
print(isparam[x_name_node]) # should print False
###Output
_____no_output_____
###Markdown
Using Dependency Declaration----------------------------The visitors that ship with LibCST can declare metadata providers as dependencies that will be run automatically when visited by a wrapper. Here is a visitor that prints all names that are function parameters.
###Code
class ParamPrinter(cst.CSTVisitor):
METADATA_DEPENDENCIES = (IsParamProvider, cst.SyntacticPositionProvider,)
def visit_Name(self, node: cst.Name) -> None:
# Only print out names that are parameters
if self.get_metadata(IsParamProvider, node):
pos = self.get_metadata(cst.SyntacticPositionProvider, node).start
print(f"{node.value} found at line {pos.line}, column {pos.column}")
module = cst.parse_module("def foo(x):\n y = 1\n return x + y")
wrapper = cst.MetadataWrapper(module)
result = wrapper.visit(ParamPrinter())
###Output
_____no_output_____
###Markdown
=====================Working with Metadata=====================LibCST handles node metadata in a somewhat unusual manner in order to maintain the immutability of the tree. See :doc:`Metadata ` for the complete documentation. Providing Metadata==================While it's possible to write visitors that gather metadata from a tree ad hoc, using the provider interface gives you the advantage of being able to use dependency declaration to automatically run your providers in other visitors and type safety. For most cases, you'll want to extend :class:`~libcst.BatchableMetadataProvider` as providers that extend from that class can be resolved more efficiently in batches.Here's an example of a simple metadata provider that marks :class:`~libcst.Name` nodes that are function parameters:
###Code
import sys
sys.path.append("../../")
import libcst as cst
class IsParamProvider(cst.BatchableMetadataProvider[bool]):
"""
Marks Name nodes found as a parameter to a function.
"""
def __init__(self) -> None:
super().__init__()
self.is_param = False
def visit_Param(self, node: cst.Param) -> None:
# Mark the child Name node as a parameter
self.set_metadata(node.name, True)
def visit_Name(self, node: cst.Name) -> None:
# Mark all other Name nodes as not parameters
if not self.get_metadata(type(self), node, False):
self.set_metadata(node, False)
###Output
_____no_output_____
###Markdown
Line and Column Metadata------------------------LibCST ships with two built-in providers for line and column metadata. See :ref:`Position Metadata` for more information.Accessing Metadata==================Once you have a provider, the metadata interface gives you two primary ways of working with your providers. The first is using the resolve methods provided by :class:`~libcst.MetadataWrapper` and the second is through declaring metadata dependencies on a :class:`~libcst.CSTTransformer` or :class:`~libcst.CSTVisitor`.Using the :class:`~libcst.MetadataWrapper`------------------------------------------The metadata wrapper class provides a way to associate metadata with a module as well as a simple interface to run providers. Here's an example of using a wrapper with the provider we just wrote:
###Code
module = cst.parse_module("x")
wrapper = cst.MetadataWrapper(module)
isparam = wrapper.resolve(IsParamProvider)
x_name_node = wrapper.module.body[0].body[0].value
print(isparam[x_name_node]) # should print False
###Output
_____no_output_____
###Markdown
Using Dependency Declaration----------------------------The visitors that ship with LibCST can declare metadata providers as dependencies that will be run automatically when visited by a wrapper. Here is a visitor that prints all names that are function parameters.
###Code
from libcst.metadata import PositionProvider
class ParamPrinter(cst.CSTVisitor):
METADATA_DEPENDENCIES = (IsParamProvider, PositionProvider,)
def visit_Name(self, node: cst.Name) -> None:
# Only print out names that are parameters
if self.get_metadata(IsParamProvider, node):
pos = self.get_metadata(PositionProvider, node).start
print(f"{node.value} found at line {pos.line}, column {pos.column}")
module = cst.parse_module("def foo(x):\n y = 1\n return x + y")
wrapper = cst.MetadataWrapper(module)
result = wrapper.visit(ParamPrinter()) # NB: wrapper.visit not module.visit
###Output
_____no_output_____
###Markdown
=====================Working with Metadata=====================LibCST handles node metadata in a somewhat unusual manner in order to maintain the immutability of the tree. See :doc:`Metadata ` for the complete documentation. Providing Metadata==================While it's possible to write visitors that gather metadata from a tree ad hoc, using the provider interface gives you the advantage of being able to use dependency declaration to automatically run your providers in other visitors and type safety. For most cases, you'll want to extend :class:`~libcst.BatchableMetadataProvider` as providers that extend from that class can be resolved more efficiently in batches.Here's an example of a simple metadata provider that marks :class:`~libcst.Name` nodes that are function parameters:
###Code
import sys
sys.path.append("../../")
import libcst as cst
class IsParamProvider(cst.BatchableMetadataProvider[bool]):
"""
Marks Name nodes found as a parameter to a function.
"""
def __init__(self):
super().__init__()
self.is_param = False
def visit_Param(self, node: cst.Param) -> None:
# Mark the child Name node as a parameter
self.set_metadata(node.name, True)
def visit_Name(self, node: cst.Name) -> None:
# Mark all other Name nodes as not parameters
if not self.get_metadata(type(self), node, False):
self.set_metadata(node, False)
###Output
_____no_output_____
###Markdown
Line and Column Metadata------------------------LibCST ships with two built-in providers for line and column metadata. See :ref:`Position Metadata` for more information.Accessing Metadata==================Once you have a provider, the metadata interface gives you two primary ways of working with your providers. The first is using the resolve methods provided by :class:`~libcst.MetadataWrapper` and the second is through declaring metadata dependencies on a :class:`~libcst.CSTTransformer` or :class:`~libcst.CSTVisitor`.Using the :class:`~libcst.MetadataWrapper`------------------------------------------The metadata wrapper class provides a way to associate metadata with a module as well as a simple interface to run providers. Here's an example of using a wrapper with the provider we just wrote:
###Code
module = cst.parse_module("x")
wrapper = cst.MetadataWrapper(module)
isparam = wrapper.resolve(IsParamProvider)
x_name_node = wrapper.module.body[0].body[0].value
print(isparam[x_name_node]) # should print False
###Output
_____no_output_____
###Markdown
Using Dependency Declaration----------------------------The visitors that ship with LibCST can declare metadata providers as dependencies that will be run automatically when visited by a wrapper. Here is a visitor that prints all names that are function parameters.
###Code
from libcst.metadata import SyntacticPositionProvider
class ParamPrinter(cst.CSTVisitor):
METADATA_DEPENDENCIES = (IsParamProvider, SyntacticPositionProvider,)
def visit_Name(self, node: cst.Name) -> None:
# Only print out names that are parameters
if self.get_metadata(IsParamProvider, node):
pos = self.get_metadata(SyntacticPositionProvider, node).start
print(f"{node.value} found at line {pos.line}, column {pos.column}")
module = cst.parse_module("def foo(x):\n y = 1\n return x + y")
wrapper = cst.MetadataWrapper(module)
result = wrapper.visit(ParamPrinter())
###Output
_____no_output_____
|
tutorials/Bonus_Autoencoders/student/Bonus_Tutorial3.ipynb
|
###Markdown
--- Section 3: Applications of autoencoders Application 1 - Image noiseRemoving noise added to images is often showcased in dimensionality reduction techniques. The tutorial *W1D5 Dimensionality reduction* illustrated this capability with PCA.We first observe that autoencoders trained with noise-free images output noise-free images when receiving noisy images as input. However, the reconstructed images will be different from the original images (without noise) since the added noise maps to different coordinates in latent space.The ability to map noise-free and noisy versions to similar regions in latent space is known as *robustness* or *invariance* to noise. How can we build such functionality into the autoencoder? The solution is to train the autoencoder with noise-free and noisy versions mapping to the noise-free version. A faster alternative is to re-train the autoencoder for few epochs with noisy images. These short training sessions fine-tune the weights to map noisy images to their noise-free versions from similar latent space coordinates.Let's start by resetting to the reference state of the autoencoder.**Instructions:*** Please execute the cells below
###Code
reset_checkpoint(model, optimizer, checkpoint)
with torch.no_grad():
latent_test_ref = encoder(input_test)
###Output
_____no_output_____
###Markdown
Reconstructions before fine-tuningLet's verify that an autoencoder trained on clean images will output clean images from noisy inputs. We visualize this by plotting three rows:* Top row with noisy images inputs* Middle row with reconstructions of noisy images* Bottom row with reconstructions of the original images (noise-free)The bottom row helps identify samples with reconstruction issues before adding noise. This row shows the baseline reconstruction quality for these samples rather than the original images. (Why?)**Instructions:*** Please execute the cell(s) below
###Code
noise_factor = 0.4
input_train_noisy = (input_train
+ noise_factor * np.random.normal(size=input_train.shape))
input_train_noisy = np.clip(input_train_noisy, input_train.min(),
input_train.max(), dtype=np.float32)
input_test_noisy = (input_test
+ noise_factor * np.random.normal(size=input_test.shape))
input_test_noisy = np.clip(input_test_noisy, input_test.min(),
input_test.max(), dtype=np.float32)
with torch.no_grad():
output_test_noisy = model(input_test_noisy)
latent_test_noisy = encoder(input_test_noisy)
output_test = model(input_test)
plot_row([input_test_noisy[test_selected_idx],
output_test_noisy[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
###Output
_____no_output_____
###Markdown
Latent space before fine-tuningWe investigate the origin of reconstruction errors by looking at how adding noise to input affects latent space coordinates. The decoder interprets significant coordinate changes as different digits.The function `plot_latent_ab` compares latent space coordinates for the same set of samples between two conditions. Here, we display coordinates for the ten samples from the previous cell before and after adding noise:* The left plot shows the coordinates of the original samples (noise-free)* The plot on the right shows the new coordinates after adding noise**Instructions:*** Please execute the cell below
###Code
plot_latent_ab(latent_test, latent_test_noisy, y_test, test_selected_idx,
title_a='Before noise', title_b='After noise', s2=s2)
###Output
_____no_output_____
###Markdown
Fine-tuning the autoencoder with noisy imagesLet's re-train the autoencoder with noisy images on the input and original (noise-free) images on the output, and regenerate the previous plots.We now see that both noisy and noise-free images match similar locations in latent space. The network denoises the input with a latent-space representation that is more robust to noise.**Instructions:*** Please execute the cell(s) below
###Code
n_epochs = 3
batch_size = 32
model.train()
runSGD(model, input_train_noisy, input_test_noisy,
out_train=input_train, out_test=input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test_noisy = model(input_test_noisy)
latent_test_noisy = encoder(input_test_noisy)
output_test = model(input_test)
plot_row([input_test_noisy[test_selected_idx],
output_test_noisy[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_noisy, y_test, test_selected_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Global latent space shiftThe new latent space representation is more robust to noise and may result in a better internal representation of the dataset. We verify this by inspecting the latent space with clean images before and after fine-tuning with noisy images.Fine-tuning the network with noisy images causes a *domain shift* in the dataset, i.e., a change in the distribution of images since the dataset was initially composed of noise-free images. Depending on the task and the extent of changes during re-train, (number of epochs, optimizer characteristics, etc.), the new latent space representation may become less well adapted to the original data as a side-effect. How could we address *domain shift* and improve both noisy and noise-free images?**Instructions:*** Please execute the cell(s) below
###Code
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_ab(latent_test_ref, latent_test, y_test, test_subset_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Application 2 - Image occlusionWe now investigate the effects of image occlusion. Drawing from the previous exercise, we expect the autoencoder to reconstruct complete images since the train set does not contain occluded images (right?).We visualize this by plotting three rows:* Top row with occluded images* Middle row with reconstructions of occluded images* Bottom row with reconstructions of the original imagesSimilarly, we investigate the source of this issue by looking at the representation of partial images in latent space and how it adjusts after fine-tuning.**Instructions:*** Please execute the cell(s) below
###Code
reset_checkpoint(model, optimizer, checkpoint)
with torch.no_grad():
latent_test_ref = encoder(input_test)
###Output
_____no_output_____
###Markdown
Before fine-tuning**Instructions:*** Please execute the cell(s) below
###Code
input_train_mask = image_occlusion(input_train, image_shape=image_shape)
input_test_mask = image_occlusion(input_test, image_shape=image_shape)
with torch.no_grad():
output_test_mask = model(input_test_mask)
latent_test_mask = encoder(input_test_mask)
output_test = model(input_test)
plot_row([input_test_mask[test_selected_idx],
output_test_mask[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_mask, y_test, test_selected_idx,
title_a='Before occlusion', title_b='After occlusion', s2=s2)
###Output
_____no_output_____
###Markdown
After fine-tuning
###Code
n_epochs = 3
batch_size = 32
model.train()
runSGD(model, input_train_mask, input_test_mask,
out_train=input_train, out_test=input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test_mask = model(input_test_mask)
latent_test_mask = encoder(input_test_mask)
output_test = model(input_test)
plot_row([input_test_mask[test_selected_idx],
output_test_mask[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_mask, y_test, test_selected_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_ab(latent_test_ref, latent_test, y_test, test_subset_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Application 3 - Image rotationFinally, we look at the effect of image rotation in latent space coordinates. This task is arguably more challenging since it may require a complete re-write of image reconstruction.We visualize this by plotting three rows:* Top row with rotated images* Middle row with reconstructions of rotated images* Bottom row with reconstructions of the original imagesWe investigate the source of this issue by looking at the representation of rotated images in latent space and how it adjusts after fine-tuning.**Instructions:*** Please execute the cell(s) below
###Code
reset_checkpoint(model, optimizer, checkpoint)
with torch.no_grad():
latent_test_ref = encoder(input_test)
###Output
_____no_output_____
###Markdown
Before fine-tuning**Instructions:*** Please execute the cell(s) below
###Code
input_train_rotation = image_rotation(input_train, 90, image_shape=image_shape)
input_test_rotation = image_rotation(input_test, 90, image_shape=image_shape)
with torch.no_grad():
output_test_rotation = model(input_test_rotation)
latent_test_rotation = encoder(input_test_rotation)
output_test = model(input_test)
plot_row([input_test_rotation[test_selected_idx],
output_test_rotation[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_rotation, y_test, test_selected_idx,
title_a='Before rotation', title_b='After rotation', s2=s2)
###Output
_____no_output_____
###Markdown
After fine-tuning**Instructions:*** Please execute the cell(s) below
###Code
n_epochs = 5
batch_size = 32
model.train()
runSGD(model, input_train_rotation, input_test_rotation,
out_train=input_train, out_test=input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test_rotation = model(input_test_rotation)
latent_test_rotation = encoder(input_test_rotation)
output_test = model(input_test)
plot_row([input_test_rotation[test_selected_idx],
output_test_rotation[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_rotation, y_test, test_selected_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_ab(latent_test_ref, latent_test, y_test, test_subset_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Application 4 - What would digit "6" look like if we had never seen it before?Before we start melting our brains with such an impossible task, let's just ask the autoencoder to do it!We train the autoencoder from scratch without digit class `6` and visualize reconstructions from digit `6`.**Instructions:*** Please execute the cell(s) below
###Code
model = AutoencoderClass(s2=s2)
optimizer = optim.Adam(model.parameters())
encoder = model.encoder
decoder = model.decoder
missing = 6
my_input_train = input_train[y_train != missing]
my_input_test = input_test[y_test != missing]
my_y_test = y_test[y_test != missing]
n_epochs = 3
batch_size = 32
runSGD(model, my_input_train, my_input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test = model(input_test)
my_latent_test = encoder(my_input_test)
plot_row([input_test[y_test == 6], output_test[y_test == 6]],
image_shape=image_shape)
plot_latent_generative(my_latent_test, my_y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
###Markdown
Exercise 1: Removing the most dominant digit classesDigit classes `0` and `1` are dominant in the sense that these occupy large areas of the decoder grid, compared to other digit classes that occupy very little generative space.How will latent space change when removing the two most dominant digit classes? Will latent space re-distribute evenly among remaining classes or choose another two dominant classes?**Instructions:*** Please execute the cell(s) below* The intersection of two boolean arrays by condition is specified as `x[(cond_a)&(cond_b)]`
###Code
model = AutoencoderClass(s2=s2)
optimizer = optim.Adam(model.parameters())
encoder = model.encoder
decoder = model.decoder
missing_a = 1
missing_b = 0
#################################################
## TODO for students:
#################################################
# input train data
# my_input_train = ...
# input test data
# my_input_test = ...
# model
# my_y_test = ...
# Uncomment to test your code
# print(my_input_train.shape)
# print(my_input_test.shape)
# print(my_y_test.shape)
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```torch.Size([47335, 784])torch.Size([7885, 784])torch.Size([7885])``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/Bonus_Autoencoders/solutions/Bonus_Tutorial3_Solution_22e2c431.py)
###Code
n_epochs = 3
batch_size = 32
runSGD(model, my_input_train, my_input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test = model(input_test)
my_latent_test = encoder(my_input_test)
plot_row([input_test[y_test == missing_a], output_test[y_test == missing_a]],
image_shape=image_shape)
plot_row([input_test[y_test == missing_b], output_test[y_test == missing_b]],
image_shape=image_shape)
plot_latent_generative(my_latent_test, my_y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
###Markdown
--- Section 4: ANNs? Same but different!"Same same but different" is an expression used in some parts of Asia to express differences between supposedly similar subjects. In this exercise, we investigate a fundamental difference in how fully-connected ANNs process visual information compared to human vision.The previous exercises showed ANN autoencoder performing cognitive tasks with relative ease. However, there is a crucial aspect of ANN processing already encoded in the vectorization of images. This network architecture completely ignores the relative position of pixels. To illustrate this, we show that learning proceeds just as well with shuffled pixel locations.First, we obtain a reversible shuffle map stored in `shuffle_image_idx` used to shuffle image pixels randomly.  The unshuffled image set `input_shuffle` is recovered as follows:```input_shuffle[:, shuffle_rev_image_idx]]```First, we set up the reversible shuffle map and visualize a few images with shuffled and unshuffled pixels, followed by their noisy versions.**Instructions:*** Please execute the cell(s) below
###Code
# create forward and reverse indexes for pixel shuffling
shuffle_image_idx = np.arange(input_size)
shuffle_rev_image_idx = np.empty_like(shuffle_image_idx)
# shuffle pixel location
np.random.shuffle(shuffle_image_idx)
# store reverse locations
for pos_idx, pos in enumerate(shuffle_image_idx):
shuffle_rev_image_idx[pos] = pos_idx
# shuffle train and test sets
input_train_shuffle = input_train[:, shuffle_image_idx]
input_test_shuffle = input_test[:, shuffle_image_idx]
input_train_shuffle_noisy = input_train_noisy[:, shuffle_image_idx]
input_test_shuffle_noisy = input_test_noisy[:, shuffle_image_idx]
# show samples with shuffled pixels
plot_row([input_test_shuffle,
input_test_shuffle[:, shuffle_rev_image_idx]],
image_shape=image_shape)
# show noisy samples with shuffled pixels
plot_row([input_train_shuffle_noisy[test_selected_idx],
input_train_shuffle_noisy[:, shuffle_rev_image_idx][test_selected_idx]],
image_shape=image_shape)
###Output
_____no_output_____
###Markdown
We initialize and train the network in the denoising task with shuffled pixels.**Instructions:*** Please execute the cell below
###Code
model = AutoencoderClass(s2=s2)
encoder = model.encoder
decoder = model.decoder
n_epochs = 3
batch_size = 32
# train the model to denoise shuffled images
runSGD(model, input_train_shuffle_noisy, input_test_shuffle_noisy,
out_train=input_train_shuffle, out_test=input_test_shuffle,
n_epochs=n_epochs, batch_size=batch_size)
###Output
_____no_output_____
###Markdown
Finally, visualize reconstructions and latent space representation with the trained model.We visualize reconstructions by plotting three rows:* Top row with shuffled noisy images* Middle row with reconstructions of shuffled denoised images* Bottom row with unshuffled reconstructions of denoised imagesWe obtain the same organization in the encoder map as before. Sharing similar internal representations confirms the network to ignore the relative position of pixels. The decoder grid is different than before since it generates shuffled images.**Instructions:*** Please execute the cell below
###Code
with torch.no_grad():
latent_test_shuffle_noisy = encoder(input_test_shuffle_noisy)
output_test_shuffle_noisy = model(input_test_shuffle_noisy)
plot_row([input_test_shuffle_noisy[test_selected_idx],
output_test_shuffle_noisy[test_selected_idx],
output_test_shuffle_noisy[:, shuffle_rev_image_idx][test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test_shuffle_noisy, y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
###Markdown
--- SummaryHooray! You have finished the last Tutorial of NMA 2020!We hope you've enjoyed these tutorials and learned about the usefulness of autoencoders to model rich and non-linear representations of data. We hope you may find them useful in your research, perhaps to model certain aspects of cognition or even extend them to biologically plausible architectures - autoencoders of spiking neurons, anyone?These are the key take away messages from these tutorials:**Autoencoders trained in *learning by doing* tasks such as compression/decompression, removing noise, etc. can uncover rich lower-dimensional structure embedded in structured images and other cognitively relevant data.****The data domain seen during training imprints a "cognitive bias" - you only see what you expect to see, which can only be similar to what you saw before.**Such bias is related to the concept [*What you see is all there is*](https://en.wikipedia.org/wiki/Thinking,_Fast_and_Slow) coined by Daniel Kahneman in psychology.For additional applications of autoencoders to neuroscience, check the spike sorting application in the outro video, and also see [here](https://www.nature.com/articles/s41592-018-0109-9) how to replicate the input-output relationship of real networks of neurons with autoencoders.
###Code
# @title Video 2: Wrap-up
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1ph411Z7uh", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="ziiZK9P6AXQ", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Tutorial 3: Autoencoders applications**Bonus Day: Autoencoders****By Neuromatch Academy**__Content creators:__ Marco Brigham and the [CCNSS](https://www.ccnss.org/) team (2014-2018)__Content reviewers:__ Itzel Olivos, Karen Schroeder, Karolina Stosio, Kshitij Dwivedi, Spiros Chavlis, Michael Waskom --- Tutorial Objectives Autoencoder applicationsHow do autoencoders with rich internal representations perform on the MNIST cognitive task?How do autoencoders perceive unseen digit classes? How does ANN image encoding differ from human vision?We are equipped with tools and techniques to answer these questions, and hopefully, many others you may encounter in your research!  In this tutorial, you will:- Analyze how autoencoders perceive transformed data (added noise, occluded parts, and rotations), and how that evolves with short re-train sessions- Use autoencoders to visualize unseen digit classes- Understand visual encoding for fully connected ANN autoencoders
###Code
# @title Video 1: Applications
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV12v411q7nS", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="_bzW_jkH6l0", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- SetupPlease execute the cell(s) below to initialize the notebook environment.
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import ndimage
import torch
from torch import nn, optim
from sklearn.datasets import fetch_openml
# @title Figure settings
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/NMA2020/nma.mplstyle")
# @title Helper functions
def downloadMNIST():
"""
Download MNIST dataset and transform it to torch.Tensor
Args:
None
Returns:
x_train : training images (torch.Tensor) (60000, 28, 28)
x_test : test images (torch.Tensor) (10000, 28, 28)
y_train : training labels (torch.Tensor) (60000, )
y_train : test labels (torch.Tensor) (10000, )
"""
X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)
# Trunk the data
n_train = 60000
n_test = 10000
train_idx = np.arange(0, n_train)
test_idx = np.arange(n_train, n_train + n_test)
x_train, y_train = X[train_idx], y[train_idx]
x_test, y_test = X[test_idx], y[test_idx]
# Transform np.ndarrays to torch.Tensor
x_train = torch.from_numpy(np.reshape(x_train,
(len(x_train),
28, 28)).astype(np.float32))
x_test = torch.from_numpy(np.reshape(x_test,
(len(x_test),
28, 28)).astype(np.float32))
y_train = torch.from_numpy(y_train.astype(int))
y_test = torch.from_numpy(y_test.astype(int))
return (x_train, y_train, x_test, y_test)
def init_weights_kaiming_uniform(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming uniform distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming uniform distribution
nn.init.kaiming_uniform_(layer.weight.data)
def init_weights_kaiming_normal(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming normal distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming normal distribution
nn.init.kaiming_normal_(layer.weight.data)
def get_layer_weights(layer):
"""
Retrieves learnable parameters from PyTorch layer.
Args:
layer (torch.Module)
Pytorch layer
Returns:
list with learnable parameters
"""
# initialize output list
weights = []
# check whether layer has learnable parameters
if layer.parameters():
# copy numpy array representation of each set of learnable parameters
for item in layer.parameters():
weights.append(item.detach().numpy())
return weights
def eval_mse(y_pred, y_true):
"""
Evaluates mean square error (MSE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
MSE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.MSELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def eval_bce(y_pred, y_true):
"""
Evaluates binary cross-entropy (BCE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
BCE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.BCELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def plot_row(images, show_n=10, image_shape=None):
"""
Plots rows of images from list of iterables (iterables: list, numpy array
or torch.Tensor). Also accepts single iterable.
Randomly selects images in each list element if item count > show_n.
Args:
images (iterable or list of iterables)
single iterable with images, or list of iterables
show_n (integer)
maximum number of images per row
image_shape (tuple or list)
original shape of image if vectorized form
Returns:
Nothing.
"""
if not isinstance(images, (list, tuple)):
images = [images]
for items_idx, items in enumerate(images):
items = np.array(items)
if items.ndim == 1:
items = np.expand_dims(items, axis=0)
if len(items) > show_n:
selected = np.random.choice(len(items), show_n, replace=False)
items = items[selected]
if image_shape is not None:
items = items.reshape([-1] + list(image_shape))
plt.figure(figsize=(len(items) * 1.5, 2))
for image_idx, image in enumerate(items):
plt.subplot(1, len(items), image_idx + 1)
plt.imshow(image, cmap='gray', vmin=image.min(), vmax=image.max())
plt.axis('off')
plt.tight_layout()
def to_s2(u):
"""
Projects 3D coordinates to spherical coordinates (theta, phi) surface of
unit sphere S2.
theta: [0, pi]
phi: [-pi, pi]
Args:
u (list, numpy array or torch.Tensor of floats)
3D coordinates
Returns:
Sperical coordinates (theta, phi) on surface of unit sphere S2.
"""
x, y, z = (u[:, 0], u[:, 1], u[:, 2])
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z / r)
phi = np.arctan2(x, y)
return np.array([theta, phi]).T
def to_u3(s):
"""
Converts from 2D coordinates on surface of unit sphere S2 to 3D coordinates
(on surface of S2), i.e. (theta, phi) ---> (1, theta, phi).
Args:
s (list, numpy array or torch.Tensor of floats)
2D coordinates on unit sphere S_2
Returns:
3D coordinates on surface of unit sphere S_2
"""
theta, phi = (s[:, 0], s[:, 1])
x = np.sin(theta) * np.sin(phi)
y = np.sin(theta) * np.cos(phi)
z = np.cos(theta)
return np.array([x, y, z]).T
def xy_lim(x):
"""
Return arguments for plt.xlim and plt.ylim calculated from minimum
and maximum of x.
Args:
x (list, numpy array or torch.Tensor of floats)
data to be plotted
Returns:
Nothing.
"""
x_min = np.min(x, axis=0)
x_max = np.max(x, axis=0)
x_min = x_min - np.abs(x_max - x_min) * 0.05 - np.finfo(float).eps
x_max = x_max + np.abs(x_max - x_min) * 0.05 + np.finfo(float).eps
return [x_min[0], x_max[0]], [x_min[1], x_max[1]]
def plot_generative(x, decoder_fn, image_shape, n_row=16, s2=False):
"""
Plots images reconstructed by decoder_fn from a 2D grid in
latent space that is determined by minimum and maximum values in x.
Args:
x (list, numpy array or torch.Tensor of floats)
2D or 3D coordinates in latent space
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
n_row (integer)
number of rows in grid
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
Returns:
Nothing.
"""
if s2:
x = to_s2(np.array(x))
xlim, ylim = xy_lim(np.array(x))
dx = (xlim[1] - xlim[0]) / n_row
grid = [np.linspace(ylim[0] + dx / 2, ylim[1] - dx / 2, n_row),
np.linspace(xlim[0] + dx / 2, xlim[1] - dx / 2, n_row)]
canvas = np.zeros((image_shape[0] * n_row, image_shape[1] * n_row))
cmap = plt.get_cmap('gray')
for j, latent_y in enumerate(grid[0][::-1]):
for i, latent_x in enumerate(grid[1]):
latent = np.array([[latent_x, latent_y]], dtype=np.float32)
if s2:
latent = to_u3(latent)
with torch.no_grad():
x_decoded = decoder_fn(torch.from_numpy(latent))
x_decoded = x_decoded.reshape(image_shape)
canvas[j * image_shape[0]: (j + 1) * image_shape[0],
i * image_shape[1]: (i + 1) * image_shape[1]] = x_decoded
plt.imshow(canvas, cmap=cmap, vmin=canvas.min(), vmax=canvas.max())
plt.axis('off')
def plot_latent(x, y, show_n=500, s2=False, fontdict=None, xy_labels=None):
"""
Plots digit class of each sample in 2D latent space coordinates.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
n_row (integer)
number of samples
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
fontdict (dictionary)
style option for plt.text
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
if fontdict is None:
fontdict = {'weight': 'bold', 'size': 12}
if s2:
x = to_s2(np.array(x))
cmap = plt.get_cmap('tab10')
if len(x) > show_n:
selected = np.random.choice(len(x), show_n, replace=False)
x = x[selected]
y = y[selected]
for my_x, my_y in zip(x, y):
plt.text(my_x[0], my_x[1], str(int(my_y)),
color=cmap(int(my_y) / 10.),
fontdict=fontdict,
horizontalalignment='center',
verticalalignment='center',
alpha=0.8)
xlim, ylim = xy_lim(np.array(x))
plt.xlim(xlim)
plt.ylim(ylim)
if s2:
if xy_labels is None:
xy_labels = [r'$\varphi$', r'$\theta$']
plt.xticks(np.arange(0, np.pi + np.pi / 6, np.pi / 6),
['0', '$\pi/6$', '$\pi/3$', '$\pi/2$',
'$2\pi/3$', '$5\pi/6$', '$\pi$'])
plt.yticks(np.arange(-np.pi, np.pi + np.pi / 3, np.pi / 3),
['$-\pi$', '$-2\pi/3$', '$-\pi/3$', '0',
'$\pi/3$', '$2\pi/3$', '$\pi$'])
if xy_labels is None:
xy_labels = ['$Z_1$', '$Z_2$']
plt.xlabel(xy_labels[0])
plt.ylabel(xy_labels[1])
def plot_latent_generative(x, y, decoder_fn, image_shape, s2=False,
title=None, xy_labels=None):
"""
Two horizontal subplots generated with encoder map and decoder grid.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
title (string)
plot title
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
fig = plt.figure(figsize=(12, 6))
if title is not None:
fig.suptitle(title, y=1.05)
ax = fig.add_subplot(121)
ax.set_title('Encoder map', y=1.05)
plot_latent(x, y, s2=s2, xy_labels=xy_labels)
ax = fig.add_subplot(122)
ax.set_title('Decoder grid', y=1.05)
plot_generative(x, decoder_fn, image_shape, s2=s2)
plt.tight_layout()
plt.show()
def plot_latent_ab(x1, x2, y, selected_idx=None,
title_a='Before', title_b='After', show_n=500, s2=False):
"""
Two horizontal subplots with encoder maps.
Args:
x1 (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space (left plot)
x2 (list, numpy array or torch.Tensor of floats)
digit class of each sample (right plot)
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
selected_idx (list of integers)
indexes of elements to be plotted
show_n (integer)
maximum number of samples in each plot
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
Returns:
Nothing.
"""
fontdict = {'weight': 'bold', 'size': 12}
if len(x1) > show_n:
if selected_idx is None:
selected_idx = np.random.choice(len(x1), show_n, replace=False)
x1 = x1[selected_idx]
x2 = x2[selected_idx]
y = y[selected_idx]
data = np.concatenate([x1, x2])
if s2:
xlim, ylim = xy_lim(to_s2(data))
else:
xlim, ylim = xy_lim(data)
plt.figure(figsize=(12, 6))
ax = plt.subplot(121)
ax.set_title(title_a, y=1.05)
plot_latent(x1, y, fontdict=fontdict, s2=s2)
plt.xlim(xlim)
plt.ylim(ylim)
ax = plt.subplot(122)
ax.set_title(title_b, y=1.05)
plot_latent(x2, y, fontdict=fontdict, s2=s2)
plt.xlim(xlim)
plt.ylim(ylim)
plt.tight_layout()
def runSGD(net, input_train, input_test, out_train=None, out_test=None,
optimizer=None, criterion='bce', n_epochs=10, batch_size=32,
verbose=False):
"""
Trains autoencoder network with stochastic gradient descent with
optimizer and loss criterion. Train samples are shuffled, and loss is
displayed at the end of each opoch for both MSE and BCE. Plots training loss
at each minibatch (maximum of 500 randomly selected values).
Args:
net (torch network)
ANN network (nn.Module)
input_train (torch.Tensor)
vectorized input images from train set
input_test (torch.Tensor)
vectorized input images from test set
criterion (string)
train loss: 'bce' or 'mse'
out_train (torch.Tensor)
optional target images from train set
out_test (torch.Tensor)
optional target images from test set
optimizer (torch optimizer)
optional target images from train set
criterion (string)
train loss: 'bce' or 'mse'
n_epochs (boolean)
number of full iterations of training data
batch_size (integer)
number of element in mini-batches
verbose (boolean)
whether to print final loss
Returns:
Nothing.
"""
if out_train is not None and out_test is not None:
different_output = True
else:
different_output = False
# Initialize loss function
if criterion == 'mse':
loss_fn = nn.MSELoss()
elif criterion == 'bce':
loss_fn = nn.BCELoss()
else:
print('Please specify either "mse" or "bce" for loss criterion')
# Initialize SGD optimizer
if optimizer is None:
optimizer = optim.Adam(net.parameters())
# Placeholder for loss
track_loss = []
print('Epoch', '\t', 'Loss train', '\t', 'Loss test')
for i in range(n_epochs):
shuffle_idx = np.random.permutation(len(input_train))
batches = torch.split(input_train[shuffle_idx], batch_size)
if different_output:
batches_out = torch.split(out_train[shuffle_idx], batch_size)
for batch_idx, batch in enumerate(batches):
output_train = net(batch)
if different_output:
loss = loss_fn(output_train, batches_out[batch_idx])
else:
loss = loss_fn(output_train, batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Keep track of loss at each epoch
track_loss += [float(loss)]
loss_epoch = f'{i+1}/{n_epochs}'
with torch.no_grad():
output_train = net(input_train)
if different_output:
loss_train = loss_fn(output_train, out_train)
else:
loss_train = loss_fn(output_train, input_train)
loss_epoch += f'\t {loss_train:.4f}'
output_test = net(input_test)
if different_output:
loss_test = loss_fn(output_test, out_test)
else:
loss_test = loss_fn(output_test, input_test)
loss_epoch += f'\t\t {loss_test:.4f}'
print(loss_epoch)
if verbose:
# Print loss
if different_output:
loss_mse = f'\nMSE\t {eval_mse(output_train, out_train):0.4f}'
loss_mse += f'\t\t {eval_mse(output_test, out_test):0.4f}'
else:
loss_mse = f'\nMSE\t {eval_mse(output_train, input_train):0.4f}'
loss_mse += f'\t\t {eval_mse(output_test, input_test):0.4f}'
print(loss_mse)
if different_output:
loss_bce = f'BCE\t {eval_bce(output_train, out_train):0.4f}'
loss_bce += f'\t\t {eval_bce(output_test, out_test):0.4f}'
else:
loss_bce = f'BCE\t {eval_bce(output_train, input_train):0.4f}'
loss_bce += f'\t\t {eval_bce(output_test, input_test):0.4f}'
print(loss_bce)
# Plot loss
step = int(np.ceil(len(track_loss)/500))
x_range = np.arange(0, len(track_loss), step)
plt.figure()
plt.plot(x_range, track_loss[::step], 'C0')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.xlim([0, None])
plt.ylim([0, None])
plt.show()
def image_occlusion(x, image_shape):
"""
Randomly selects on quadrant of images and sets to zeros.
Args:
x (torch.Tensor of floats)
vectorized images
image_shape (tuple or list)
original shape of image
Returns:
torch.Tensor.
"""
selection = np.random.choice(4, len(x))
my_x = np.array(x).copy()
my_x = my_x.reshape(-1, image_shape[0], image_shape[1])
my_x[selection == 0, :int(image_shape[0] / 2), :int(image_shape[1] / 2)] = 0
my_x[selection == 1, int(image_shape[0] / 2):, :int(image_shape[1] / 2)] = 0
my_x[selection == 2, :int(image_shape[0] / 2), int(image_shape[1] / 2):] = 0
my_x[selection == 3, int(image_shape[0] / 2):, int(image_shape[1] / 2):] = 0
my_x = my_x.reshape(x.shape)
return torch.from_numpy(my_x)
def image_rotation(x, deg, image_shape):
"""
Randomly rotates images by +- deg degrees.
Args:
x (torch.Tensor of floats)
vectorized images
deg (integer)
rotation range
image_shape (tuple or list)
original shape of image
Returns:
torch.Tensor.
"""
my_x = np.array(x).copy()
my_x = my_x.reshape(-1, image_shape[0], image_shape[1])
for idx, item in enumerate(my_x):
my_deg = deg * 2 * np.random.random() - deg
my_x[idx] = ndimage.rotate(my_x[idx], my_deg,
reshape=False, prefilter=False)
my_x = my_x.reshape(x.shape)
return torch.from_numpy(my_x)
class AutoencoderClass(nn.Module):
"""
Deep autoencoder network object (nn.Module) with optional L2 normalization
of activations in bottleneck layer.
Args:
input_size (integer)
size of input samples
s2 (boolean)
whether to L2 normalize activatinos in bottleneck layer
Returns:
Autoencoder object inherited from nn.Module class.
"""
def __init__(self, input_size=784, s2=False):
super().__init__()
self.input_size = input_size
self.s2 = s2
if s2:
self.encoding_size = 3
else:
self.encoding_size = 2
self.enc1 = nn.Linear(self.input_size, int(self.input_size / 2))
self.enc1_f = nn.PReLU()
self.enc2 = nn.Linear(int(self.input_size / 2), self.encoding_size * 32)
self.enc2_f = nn.PReLU()
self.enc3 = nn.Linear(self.encoding_size * 32, self.encoding_size)
self.enc3_f = nn.PReLU()
self.dec1 = nn.Linear(self.encoding_size, self.encoding_size * 32)
self.dec1_f = nn.PReLU()
self.dec2 = nn.Linear(self.encoding_size * 32, int(self.input_size / 2))
self.dec2_f = nn.PReLU()
self.dec3 = nn.Linear(int(self.input_size / 2), self.input_size)
self.dec3_f = nn.Sigmoid()
def encoder(self, x):
"""
Encoder component.
"""
x = self.enc1_f(self.enc1(x))
x = self.enc2_f(self.enc2(x))
x = self.enc3_f(self.enc3(x))
if self.s2:
x = nn.functional.normalize(x, p=2, dim=1)
return x
def decoder(self, x):
"""
Decoder component.
"""
x = self.dec1_f(self.dec1(x))
x = self.dec2_f(self.dec2(x))
x = self.dec3_f(self.dec3(x))
return x
def forward(self, x):
"""
Forward pass.
"""
x = self.encoder(x)
x = self.decoder(x)
return x
def save_checkpoint(net, optimizer, filename):
"""
Saves a PyTorch checkpoint.
Args:
net (torch network)
ANN network (nn.Module)
optimizer (torch optimizer)
optimizer for SGD
filename (string)
filename (without extension)
Returns:
Nothing.
"""
torch.save({'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
filename+'.pt')
def load_checkpoint(url, filename):
"""
Loads a PyTorch checkpoint from URL is local file not present.
Args:
url (string)
URL location of PyTorch checkpoint
filename (string)
filename (without extension)
Returns:
PyTorch checkpoint of saved model.
"""
if not os.path.isfile(filename+'.pt'):
os.system(f"wget {url}.pt")
return torch.load(filename+'.pt')
def reset_checkpoint(net, optimizer, checkpoint):
"""
Resets PyTorch model to checkpoint.
Args:
net (torch network)
ANN network (nn.Module)
optimizer (torch optimizer)
optimizer for SGD
checkpoint (torch checkpoint)
checkpoint of saved model
Returns:
Nothing.
"""
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
###Output
_____no_output_____
###Markdown
--- Section 1: Download and prepare MNIST datasetWe use the helper function `downloadMNIST` to download the dataset and transform it into `torch.Tensor` and assign train and test sets to (`x_train`, `y_train`) and (`x_test`, `y_test`).The variable `input_size` stores the length of *vectorized* versions of the images `input_train` and `input_test` for training and test images.**Instructions:*** Please execute the cell below
###Code
# Download MNIST
x_train, y_train, x_test, y_test = downloadMNIST()
x_train = x_train / 255
x_test = x_test / 255
image_shape = x_train.shape[1:]
input_size = np.prod(image_shape)
input_train = x_train.reshape([-1, input_size])
input_test = x_test.reshape([-1, input_size])
test_selected_idx = np.random.choice(len(x_test), 10, replace=False)
train_selected_idx = np.random.choice(len(x_train), 10, replace=False)
test_subset_idx = np.random.choice(len(x_test), 500, replace=False)
print(f'shape image \t\t {image_shape}')
print(f'shape input_train \t {input_train.shape}')
print(f'shape input_test \t {input_test.shape}')
###Output
_____no_output_____
###Markdown
--- Section 2: Download a pre-trained modelThe class `AutoencoderClass` implements the autoencoder architectures introduced in the previous tutorial. The design of this class follows the object-oriented programming (OOP) style from tutorial W3D4. Setting the boolean parameter `s2=True` specifies the model with projection onto the $S_2$ sphere.We trained both models for `n_epochs=25` and saved the weights to avoid a lengthy initial training period - these will be our reference model states.Experiments are run from the identical initial conditions by resetting the autoencoder to the reference state at the beginning of each exercise. The mechanism for loading and storing models from PyTorch is the following:```model = nn.Sequential(...)ormodel = AutoencoderClass()torch.save({'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, filename_path)checkpoint = torch.load(filename_path)model.load_state_dict(checkpoint['model_state_dict'])optimizer.load_state_dict(checkpoint['optimizer_state_dict'])```See additional [PyTorch instructions](https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html), and when to use `model.eval()` and `model.train()` for more complex models.We provide the functions `save_checkpoint`, `load_checkpoint`, and `reset_checkpoint` to implement the steps above and download pre-trained weights from the GitHub repo.If downloading from GitHub fails, please uncomment the 3rd cell bellow to train the model for `n_epochs=10` and save it locally.**Instructions:*** Please execute the cell(s) below
###Code
root = 'https://github.com/mpbrigham/colaboratory-figures/raw/master/nma/autoencoders'
filename = 'ae_6h_prelu_bce_adam_25e_32b'
url = os.path.join(root, filename)
s2 = True
if s2:
filename += '_s2'
url += '_s2'
model = AutoencoderClass(s2=s2)
optimizer = optim.Adam(model.parameters())
encoder = model.encoder
decoder = model.decoder
checkpoint = load_checkpoint(url, filename)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# Please uncomment and execute this cell if download of
# pre-trained weights fail
# model = AutoencoderClass(s2=s2)
# encoder = model.encoder
# decoder = model.decoder
# n_epochs = 10
# batch_size = 128
# runSGD(model, input_train, input_test,
# n_epochs=n_epochs, batch_size=batch_size)
# save_checkpoint(model, optimizer, filename)
# checkpoint = load_checkpoint(url, filename)
with torch.no_grad():
output_test = model(input_test)
latent_test = encoder(input_test)
plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test, y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
###Markdown
--- Section 3: Applications of autoencoders Application 1 - Image noiseRemoving noise added to images is often showcased in dimensionality reduction techniques. The tutorial *W1D5 Dimensionality reduction* illustrated this capability with PCA.We first observe that autoencoders trained with noise-free images output noise-free images when receiving noisy images as input. However, the reconstructed images will be different from the original images (without noise) since the added noise maps to different coordinates in latent space.The ability to map noise-free and noisy versions to similar regions in latent space is known as *robustness* or *invariance* to noise. How can we build such functionality into the autoencoder? The solution is to train the autoencoder with noise-free and noisy versions mapping to the noise-free version. A faster alternative is to re-train the autoencoder for few epochs with noisy images. These short training sessions fine-tune the weights to map noisy images to their noise-free versions from similar latent space coordinates.Let's start by resetting to the reference state of the autoencoder.**Instructions:*** Please execute the cells below
###Code
reset_checkpoint(model, optimizer, checkpoint)
with torch.no_grad():
latent_test_ref = encoder(input_test)
###Output
_____no_output_____
###Markdown
Reconstructions before fine-tuningLet's verify that an autoencoder trained on clean images will output clean images from noisy inputs. We visualize this by plotting three rows:* Top row with noisy images inputs* Middle row with reconstructions of noisy images* Bottom row with reconstructions of the original images (noise-free)The bottom row helps identify samples with reconstruction issues before adding noise. This row shows the baseline reconstruction quality for these samples rather than the original images. (Why?)**Instructions:*** Please execute the cell(s) below
###Code
noise_factor = 0.4
input_train_noisy = (input_train
+ noise_factor * np.random.normal(size=input_train.shape))
input_train_noisy = np.clip(input_train_noisy, input_train.min(),
input_train.max(), dtype=np.float32)
input_test_noisy = (input_test
+ noise_factor * np.random.normal(size=input_test.shape))
input_test_noisy = np.clip(input_test_noisy, input_test.min(),
input_test.max(), dtype=np.float32)
with torch.no_grad():
output_test_noisy = model(input_test_noisy)
latent_test_noisy = encoder(input_test_noisy)
output_test = model(input_test)
plot_row([input_test_noisy[test_selected_idx],
output_test_noisy[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
###Output
_____no_output_____
###Markdown
Latent space before fine-tuningWe investigate the origin of reconstruction errors by looking at how adding noise to input affects latent space coordinates. The decoder interprets significant coordinate changes as different digits.The function `plot_latent_ab` compares latent space coordinates for the same set of samples between two conditions. Here, we display coordinates for the ten samples from the previous cell before and after adding noise:* The left plot shows the coordinates of the original samples (noise-free)* The plot on the right shows the new coordinates after adding noise**Instructions:*** Please execute the cell below
###Code
plot_latent_ab(latent_test, latent_test_noisy, y_test, test_selected_idx,
title_a='Before noise', title_b='After noise', s2=s2)
###Output
_____no_output_____
###Markdown
Fine-tuning the autoencoder with noisy imagesLet's re-train the autoencoder with noisy images on the input and original (noise-free) images on the output, and regenerate the previous plots.We now see that both noisy and noise-free images match similar locations in latent space. The network denoises the input with a latent-space representation that is more robust to noise.**Instructions:*** Please execute the cell(s) below
###Code
n_epochs = 3
batch_size = 32
model.train()
runSGD(model, input_train_noisy, input_test_noisy,
out_train=input_train, out_test=input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test_noisy = model(input_test_noisy)
latent_test_noisy = encoder(input_test_noisy)
output_test = model(input_test)
plot_row([input_test_noisy[test_selected_idx],
output_test_noisy[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_noisy, y_test, test_selected_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Global latent space shiftThe new latent space representation is more robust to noise and may result in a better internal representation of the dataset. We verify this by inspecting the latent space with clean images before and after fine-tuning with noisy images.Fine-tuning the network with noisy images causes a *domain shift* in the dataset, i.e., a change in the distribution of images since the dataset was initially composed of noise-free images. Depending on the task and the extent of changes during re-train, (number of epochs, optimizer characteristics, etc.), the new latent space representation may become less well adapted to the original data as a side-effect. How could we address *domain shift* and improve both noisy and noise-free images?**Instructions:*** Please execute the cell(s) below
###Code
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_ab(latent_test_ref, latent_test, y_test, test_subset_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Application 2 - Image occlusionWe now investigate the effects of image occlusion. Drawing from the previous exercise, we expect the autoencoder to reconstruct complete images since the train set does not contain occluded images (right?).We visualize this by plotting three rows:* Top row with occluded images* Middle row with reconstructions of occluded images* Bottom row with reconstructions of the original imagesSimilarly, we investigate the source of this issue by looking at the representation of partial images in latent space and how it adjusts after fine-tuning.**Instructions:*** Please execute the cell(s) below
###Code
reset_checkpoint(model, optimizer, checkpoint)
with torch.no_grad():
latent_test_ref = encoder(input_test)
###Output
_____no_output_____
###Markdown
Before fine-tuning**Instructions:*** Please execute the cell(s) below
###Code
input_train_mask = image_occlusion(input_train, image_shape=image_shape)
input_test_mask = image_occlusion(input_test, image_shape=image_shape)
with torch.no_grad():
output_test_mask = model(input_test_mask)
latent_test_mask = encoder(input_test_mask)
output_test = model(input_test)
plot_row([input_test_mask[test_selected_idx],
output_test_mask[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_mask, y_test, test_selected_idx,
title_a='Before occlusion', title_b='After occlusion', s2=s2)
###Output
_____no_output_____
###Markdown
After fine-tuning
###Code
n_epochs = 3
batch_size = 32
model.train()
runSGD(model, input_train_mask, input_test_mask,
out_train=input_train, out_test=input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test_mask = model(input_test_mask)
latent_test_mask = encoder(input_test_mask)
output_test = model(input_test)
plot_row([input_test_mask[test_selected_idx],
output_test_mask[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_mask, y_test, test_selected_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_ab(latent_test_ref, latent_test, y_test, test_subset_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Application 3 - Image rotationFinally, we look at the effect of image rotation in latent space coordinates. This task is arguably more challenging since it may require a complete re-write of image reconstruction.We visualize this by plotting three rows:* Top row with rotated images* Middle row with reconstructions of rotated images* Bottom row with reconstructions of the original imagesWe investigate the source of this issue by looking at the representation of rotated images in latent space and how it adjusts after fine-tuning.**Instructions:*** Please execute the cell(s) below
###Code
reset_checkpoint(model, optimizer, checkpoint)
with torch.no_grad():
latent_test_ref = encoder(input_test)
###Output
_____no_output_____
###Markdown
Before fine-tuning**Instructions:*** Please execute the cell(s) below
###Code
input_train_rotation = image_rotation(input_train, 90, image_shape=image_shape)
input_test_rotation = image_rotation(input_test, 90, image_shape=image_shape)
with torch.no_grad():
output_test_rotation = model(input_test_rotation)
latent_test_rotation = encoder(input_test_rotation)
output_test = model(input_test)
plot_row([input_test_rotation[test_selected_idx],
output_test_rotation[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_rotation, y_test, test_selected_idx,
title_a='Before rotation', title_b='After rotation', s2=s2)
###Output
_____no_output_____
###Markdown
After fine-tuning**Instructions:*** Please execute the cell(s) below
###Code
n_epochs = 5
batch_size = 32
model.train()
runSGD(model, input_train_rotation, input_test_rotation,
out_train=input_train, out_test=input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test_rotation = model(input_test_rotation)
latent_test_rotation = encoder(input_test_rotation)
output_test = model(input_test)
plot_row([input_test_rotation[test_selected_idx],
output_test_rotation[test_selected_idx],
output_test[test_selected_idx]], image_shape=image_shape)
plot_latent_ab(latent_test, latent_test_rotation, y_test, test_selected_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
with torch.no_grad():
latent_test = encoder(input_test)
plot_latent_ab(latent_test_ref, latent_test, y_test, test_subset_idx,
title_a='Before fine-tuning',
title_b='After fine-tuning', s2=s2)
###Output
_____no_output_____
###Markdown
Application 4 - What would digit "6" look like if we had never seen it before?Before we start melting our brains with such an impossible task, let's just ask the autoencoder to do it!We train the autoencoder from scratch without digit class `6` and visualize reconstructions from digit `6`.**Instructions:*** Please execute the cell(s) below
###Code
model = AutoencoderClass(s2=s2)
optimizer = optim.Adam(model.parameters())
encoder = model.encoder
decoder = model.decoder
missing = 6
my_input_train = input_train[y_train != missing]
my_input_test = input_test[y_test != missing]
my_y_test = y_test[y_test != missing]
n_epochs = 3
batch_size = 32
runSGD(model, my_input_train, my_input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test = model(input_test)
my_latent_test = encoder(my_input_test)
plot_row([input_test[y_test == 6], output_test[y_test == 6]],
image_shape=image_shape)
plot_latent_generative(my_latent_test, my_y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
###Markdown
Exercise 1: Removing the most dominant digit classesDigit classes `0` and `1` are dominant in the sense that these occupy large areas of the decoder grid, compared to other digit classes that occupy very little generative space.How will latent space change when removing the two most dominant digit classes? Will latent space re-distribute evenly among remaining classes or choose another two dominant classes?**Instructions:*** Please execute the cell(s) below* The intersection of two boolean arrays by condition is specified as `x[(cond_a)&(cond_b)]`
###Code
model = AutoencoderClass(s2=s2)
optimizer = optim.Adam(model.parameters())
encoder = model.encoder
decoder = model.decoder
missing_a = 1
missing_b = 0
#################################################
## TODO for students:
#################################################
# input train data
# my_input_train = ...
# input test data
# my_input_test = ...
# model
# my_y_test = ...
# Uncomment to test your code
# print(my_input_train.shape)
# print(my_input_test.shape)
# print(my_y_test.shape)
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```torch.Size([47335, 784])torch.Size([7885, 784])torch.Size([7885])``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/Bonus_Autoencoders/solutions/Bonus_Tutorial3_Solution_22e2c431.py)
###Code
n_epochs = 3
batch_size = 32
runSGD(model, my_input_train, my_input_test,
n_epochs=n_epochs, batch_size=batch_size)
with torch.no_grad():
output_test = model(input_test)
my_latent_test = encoder(my_input_test)
plot_row([input_test[y_test == missing_a], output_test[y_test == missing_a]],
image_shape=image_shape)
plot_row([input_test[y_test == missing_b], output_test[y_test == missing_b]],
image_shape=image_shape)
plot_latent_generative(my_latent_test, my_y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
###Markdown
--- Section 4: ANNs? Same but different!"Same same but different" is an expression used in some parts of Asia to express differences between supposedly similar subjects. In this exercise, we investigate a fundamental difference in how fully-connected ANNs process visual information compared to human vision.The previous exercises showed ANN autoencoder performing cognitive tasks with relative ease. However, there is a crucial aspect of ANN processing already encoded in the vectorization of images. This network architecture completely ignores the relative position of pixels. To illustrate this, we show that learning proceeds just as well with shuffled pixel locations.First, we obtain a reversible shuffle map stored in `shuffle_image_idx` used to shuffle image pixels randomly.  The unshuffled image set `input_shuffle` is recovered as follows:```input_shuffle[:, shuffle_rev_image_idx]]```First, we set up the reversible shuffle map and visualize a few images with shuffled and unshuffled pixels, followed by their noisy versions.**Instructions:*** Please execute the cell(s) below
###Code
# create forward and reverse indexes for pixel shuffling
shuffle_image_idx = np.arange(input_size)
shuffle_rev_image_idx = np.empty_like(shuffle_image_idx)
# shuffle pixel location
np.random.shuffle(shuffle_image_idx)
# store reverse locations
for pos_idx, pos in enumerate(shuffle_image_idx):
shuffle_rev_image_idx[pos] = pos_idx
# shuffle train and test sets
input_train_shuffle = input_train[:, shuffle_image_idx]
input_test_shuffle = input_test[:, shuffle_image_idx]
input_train_shuffle_noisy = input_train_noisy[:, shuffle_image_idx]
input_test_shuffle_noisy = input_test_noisy[:, shuffle_image_idx]
# show samples with shuffled pixels
plot_row([input_test_shuffle,
input_test_shuffle[:, shuffle_rev_image_idx]],
image_shape=image_shape)
# show noisy samples with shuffled pixels
plot_row([input_train_shuffle_noisy[test_selected_idx],
input_train_shuffle_noisy[:, shuffle_rev_image_idx][test_selected_idx]],
image_shape=image_shape)
###Output
_____no_output_____
###Markdown
We initialize and train the network in the denoising task with shuffled pixels.**Instructions:*** Please execute the cell below
###Code
model = AutoencoderClass(s2=s2)
encoder = model.encoder
decoder = model.decoder
n_epochs = 3
batch_size = 32
# train the model to denoise shuffled images
runSGD(model, input_train_shuffle_noisy, input_test_shuffle_noisy,
out_train=input_train_shuffle, out_test=input_test_shuffle,
n_epochs=n_epochs, batch_size=batch_size)
###Output
_____no_output_____
###Markdown
Finally, visualize reconstructions and latent space representation with the trained model.We visualize reconstructions by plotting three rows:* Top row with shuffled noisy images* Middle row with reconstructions of shuffled denoised images* Bottom row with unshuffled reconstructions of denoised imagesWe obtain the same organization in the encoder map as before. Sharing similar internal representations confirms the network to ignore the relative position of pixels. The decoder grid is different than before since it generates shuffled images.**Instructions:*** Please execute the cell below
###Code
with torch.no_grad():
latent_test_shuffle_noisy = encoder(input_test_shuffle_noisy)
output_test_shuffle_noisy = model(input_test_shuffle_noisy)
plot_row([input_test_shuffle_noisy[test_selected_idx],
output_test_shuffle_noisy[test_selected_idx],
output_test_shuffle_noisy[:, shuffle_rev_image_idx][test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test_shuffle_noisy, y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
###Markdown
--- SummaryHooray! You have finished the last Tutorial of NMA 2020!We hope you've enjoyed these tutorials and learned about the usefulness of autoencoders to model rich and non-linear representations of data. We hope you may find them useful in your research, perhaps to model certain aspects of cognition or even extend them to biologically plausible architectures - autoencoders of spiking neurons, anyone?These are the key take away messages from these tutorials:**Autoencoders trained in *learning by doing* tasks such as compression/decompression, removing noise, etc. can uncover rich lower-dimensional structure embedded in structured images and other cognitively relevant data.****The data domain seen during training imprints a "cognitive bias" - you only see what you expect to see, which can only be similar to what you saw before.**Such bias is related to the concept [*What you see is all there is*](https://en.wikipedia.org/wiki/Thinking,_Fast_and_Slow) coined by Daniel Kahneman in psychology.For additional applications of autoencoders to neuroscience, check the spike sorting application in the outro video, and also see [here](https://www.nature.com/articles/s41592-018-0109-9) how to replicate the input-output relationship of real networks of neurons with autoencoders.
###Code
# @title Video 2: Wrap-up
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1ph411Z7uh", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="ziiZK9P6AXQ", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Tutorial 3: Autoencoders applications**Bonus Day: Autoencoders****By Neuromatch Academy**__Content creators:__ Marco Brigham and the [CCNSS](https://www.ccnss.org/) team (2014-2018)__Content reviewers:__ Itzel Olivos, Karen Schroeder, Karolina Stosio, Kshitij Dwivedi, Spiros Chavlis, Michael Waskom **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial Objectives Autoencoder applicationsHow do autoencoders with rich internal representations perform on the MNIST cognitive task?How do autoencoders perceive unseen digit classes? How does ANN image encoding differ from human vision?We are equipped with tools and techniques to answer these questions, and hopefully, many others you may encounter in your research!  In this tutorial, you will:- Analyze how autoencoders perceive transformed data (added noise, occluded parts, and rotations), and how that evolves with short re-train sessions- Use autoencoders to visualize unseen digit classes- Understand visual encoding for fully connected ANN autoencoders
###Code
# @title Video 1: Applications
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV12v411q7nS", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="_bzW_jkH6l0", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- SetupPlease execute the cell(s) below to initialize the notebook environment.
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import ndimage
import torch
from torch import nn, optim
from sklearn.datasets import fetch_openml
# @title Figure settings
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/NMA2020/nma.mplstyle")
# @title Helper functions
def downloadMNIST():
"""
Download MNIST dataset and transform it to torch.Tensor
Args:
None
Returns:
x_train : training images (torch.Tensor) (60000, 28, 28)
x_test : test images (torch.Tensor) (10000, 28, 28)
y_train : training labels (torch.Tensor) (60000, )
y_train : test labels (torch.Tensor) (10000, )
"""
X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)
# Trunk the data
n_train = 60000
n_test = 10000
train_idx = np.arange(0, n_train)
test_idx = np.arange(n_train, n_train + n_test)
x_train, y_train = X[train_idx], y[train_idx]
x_test, y_test = X[test_idx], y[test_idx]
# Transform np.ndarrays to torch.Tensor
x_train = torch.from_numpy(np.reshape(x_train,
(len(x_train),
28, 28)).astype(np.float32))
x_test = torch.from_numpy(np.reshape(x_test,
(len(x_test),
28, 28)).astype(np.float32))
y_train = torch.from_numpy(y_train.astype(int))
y_test = torch.from_numpy(y_test.astype(int))
return (x_train, y_train, x_test, y_test)
def init_weights_kaiming_uniform(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming uniform distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming uniform distribution
nn.init.kaiming_uniform_(layer.weight.data)
def init_weights_kaiming_normal(layer):
"""
Initializes weights from linear PyTorch layer
with kaiming normal distribution.
Args:
layer (torch.Module)
Pytorch layer
Returns:
Nothing.
"""
# check for linear PyTorch layer
if isinstance(layer, nn.Linear):
# initialize weights with kaiming normal distribution
nn.init.kaiming_normal_(layer.weight.data)
def get_layer_weights(layer):
"""
Retrieves learnable parameters from PyTorch layer.
Args:
layer (torch.Module)
Pytorch layer
Returns:
list with learnable parameters
"""
# initialize output list
weights = []
# check whether layer has learnable parameters
if layer.parameters():
# copy numpy array representation of each set of learnable parameters
for item in layer.parameters():
weights.append(item.detach().numpy())
return weights
def eval_mse(y_pred, y_true):
"""
Evaluates mean square error (MSE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
MSE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.MSELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def eval_bce(y_pred, y_true):
"""
Evaluates binary cross-entropy (BCE) between y_pred and y_true
Args:
y_pred (torch.Tensor)
prediction samples
v (numpy array of floats)
ground truth samples
Returns:
BCE(y_pred, y_true)
"""
with torch.no_grad():
criterion = nn.BCELoss()
loss = criterion(y_pred, y_true)
return float(loss)
def plot_row(images, show_n=10, image_shape=None):
"""
Plots rows of images from list of iterables (iterables: list, numpy array
or torch.Tensor). Also accepts single iterable.
Randomly selects images in each list element if item count > show_n.
Args:
images (iterable or list of iterables)
single iterable with images, or list of iterables
show_n (integer)
maximum number of images per row
image_shape (tuple or list)
original shape of image if vectorized form
Returns:
Nothing.
"""
if not isinstance(images, (list, tuple)):
images = [images]
for items_idx, items in enumerate(images):
items = np.array(items)
if items.ndim == 1:
items = np.expand_dims(items, axis=0)
if len(items) > show_n:
selected = np.random.choice(len(items), show_n, replace=False)
items = items[selected]
if image_shape is not None:
items = items.reshape([-1] + list(image_shape))
plt.figure(figsize=(len(items) * 1.5, 2))
for image_idx, image in enumerate(items):
plt.subplot(1, len(items), image_idx + 1)
plt.imshow(image, cmap='gray', vmin=image.min(), vmax=image.max())
plt.axis('off')
plt.tight_layout()
def to_s2(u):
"""
Projects 3D coordinates to spherical coordinates (theta, phi) surface of
unit sphere S2.
theta: [0, pi]
phi: [-pi, pi]
Args:
u (list, numpy array or torch.Tensor of floats)
3D coordinates
Returns:
Sperical coordinates (theta, phi) on surface of unit sphere S2.
"""
x, y, z = (u[:, 0], u[:, 1], u[:, 2])
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z / r)
phi = np.arctan2(x, y)
return np.array([theta, phi]).T
def to_u3(s):
"""
Converts from 2D coordinates on surface of unit sphere S2 to 3D coordinates
(on surface of S2), i.e. (theta, phi) ---> (1, theta, phi).
Args:
s (list, numpy array or torch.Tensor of floats)
2D coordinates on unit sphere S_2
Returns:
3D coordinates on surface of unit sphere S_2
"""
theta, phi = (s[:, 0], s[:, 1])
x = np.sin(theta) * np.sin(phi)
y = np.sin(theta) * np.cos(phi)
z = np.cos(theta)
return np.array([x, y, z]).T
def xy_lim(x):
"""
Return arguments for plt.xlim and plt.ylim calculated from minimum
and maximum of x.
Args:
x (list, numpy array or torch.Tensor of floats)
data to be plotted
Returns:
Nothing.
"""
x_min = np.min(x, axis=0)
x_max = np.max(x, axis=0)
x_min = x_min - np.abs(x_max - x_min) * 0.05 - np.finfo(float).eps
x_max = x_max + np.abs(x_max - x_min) * 0.05 + np.finfo(float).eps
return [x_min[0], x_max[0]], [x_min[1], x_max[1]]
def plot_generative(x, decoder_fn, image_shape, n_row=16, s2=False):
"""
Plots images reconstructed by decoder_fn from a 2D grid in
latent space that is determined by minimum and maximum values in x.
Args:
x (list, numpy array or torch.Tensor of floats)
2D or 3D coordinates in latent space
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
n_row (integer)
number of rows in grid
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
Returns:
Nothing.
"""
if s2:
x = to_s2(np.array(x))
xlim, ylim = xy_lim(np.array(x))
dx = (xlim[1] - xlim[0]) / n_row
grid = [np.linspace(ylim[0] + dx / 2, ylim[1] - dx / 2, n_row),
np.linspace(xlim[0] + dx / 2, xlim[1] - dx / 2, n_row)]
canvas = np.zeros((image_shape[0] * n_row, image_shape[1] * n_row))
cmap = plt.get_cmap('gray')
for j, latent_y in enumerate(grid[0][::-1]):
for i, latent_x in enumerate(grid[1]):
latent = np.array([[latent_x, latent_y]], dtype=np.float32)
if s2:
latent = to_u3(latent)
with torch.no_grad():
x_decoded = decoder_fn(torch.from_numpy(latent))
x_decoded = x_decoded.reshape(image_shape)
canvas[j * image_shape[0]: (j + 1) * image_shape[0],
i * image_shape[1]: (i + 1) * image_shape[1]] = x_decoded
plt.imshow(canvas, cmap=cmap, vmin=canvas.min(), vmax=canvas.max())
plt.axis('off')
def plot_latent(x, y, show_n=500, s2=False, fontdict=None, xy_labels=None):
"""
Plots digit class of each sample in 2D latent space coordinates.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
n_row (integer)
number of samples
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
fontdict (dictionary)
style option for plt.text
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
if fontdict is None:
fontdict = {'weight': 'bold', 'size': 12}
if s2:
x = to_s2(np.array(x))
cmap = plt.get_cmap('tab10')
if len(x) > show_n:
selected = np.random.choice(len(x), show_n, replace=False)
x = x[selected]
y = y[selected]
for my_x, my_y in zip(x, y):
plt.text(my_x[0], my_x[1], str(int(my_y)),
color=cmap(int(my_y) / 10.),
fontdict=fontdict,
horizontalalignment='center',
verticalalignment='center',
alpha=0.8)
xlim, ylim = xy_lim(np.array(x))
plt.xlim(xlim)
plt.ylim(ylim)
if s2:
if xy_labels is None:
xy_labels = [r'$\varphi$', r'$\theta$']
plt.xticks(np.arange(0, np.pi + np.pi / 6, np.pi / 6),
['0', '$\pi/6$', '$\pi/3$', '$\pi/2$',
'$2\pi/3$', '$5\pi/6$', '$\pi$'])
plt.yticks(np.arange(-np.pi, np.pi + np.pi / 3, np.pi / 3),
['$-\pi$', '$-2\pi/3$', '$-\pi/3$', '0',
'$\pi/3$', '$2\pi/3$', '$\pi$'])
if xy_labels is None:
xy_labels = ['$Z_1$', '$Z_2$']
plt.xlabel(xy_labels[0])
plt.ylabel(xy_labels[1])
def plot_latent_generative(x, y, decoder_fn, image_shape, s2=False,
title=None, xy_labels=None):
"""
Two horizontal subplots generated with encoder map and decoder grid.
Args:
x (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
decoder_fn (integer)
function returning vectorized images from 2D latent space coordinates
image_shape (tuple or list)
original shape of image
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
title (string)
plot title
xy_labels (list)
optional list with [xlabel, ylabel]
Returns:
Nothing.
"""
fig = plt.figure(figsize=(12, 6))
if title is not None:
fig.suptitle(title, y=1.05)
ax = fig.add_subplot(121)
ax.set_title('Encoder map', y=1.05)
plot_latent(x, y, s2=s2, xy_labels=xy_labels)
ax = fig.add_subplot(122)
ax.set_title('Decoder grid', y=1.05)
plot_generative(x, decoder_fn, image_shape, s2=s2)
plt.tight_layout()
plt.show()
def plot_latent_ab(x1, x2, y, selected_idx=None,
title_a='Before', title_b='After', show_n=500, s2=False):
"""
Two horizontal subplots with encoder maps.
Args:
x1 (list, numpy array or torch.Tensor of floats)
2D coordinates in latent space (left plot)
x2 (list, numpy array or torch.Tensor of floats)
digit class of each sample (right plot)
y (list, numpy array or torch.Tensor of floats)
digit class of each sample
selected_idx (list of integers)
indexes of elements to be plotted
show_n (integer)
maximum number of samples in each plot
s2 (boolean)
convert 3D coordinates (x, y, z) to spherical coordinates (theta, phi)
Returns:
Nothing.
"""
fontdict = {'weight': 'bold', 'size': 12}
if len(x1) > show_n:
if selected_idx is None:
selected_idx = np.random.choice(len(x1), show_n, replace=False)
x1 = x1[selected_idx]
x2 = x2[selected_idx]
y = y[selected_idx]
data = np.concatenate([x1, x2])
if s2:
xlim, ylim = xy_lim(to_s2(data))
else:
xlim, ylim = xy_lim(data)
plt.figure(figsize=(12, 6))
ax = plt.subplot(121)
ax.set_title(title_a, y=1.05)
plot_latent(x1, y, fontdict=fontdict, s2=s2)
plt.xlim(xlim)
plt.ylim(ylim)
ax = plt.subplot(122)
ax.set_title(title_b, y=1.05)
plot_latent(x2, y, fontdict=fontdict, s2=s2)
plt.xlim(xlim)
plt.ylim(ylim)
plt.tight_layout()
def runSGD(net, input_train, input_test, out_train=None, out_test=None,
optimizer=None, criterion='bce', n_epochs=10, batch_size=32,
verbose=False):
"""
Trains autoencoder network with stochastic gradient descent with
optimizer and loss criterion. Train samples are shuffled, and loss is
displayed at the end of each opoch for both MSE and BCE. Plots training loss
at each minibatch (maximum of 500 randomly selected values).
Args:
net (torch network)
ANN network (nn.Module)
input_train (torch.Tensor)
vectorized input images from train set
input_test (torch.Tensor)
vectorized input images from test set
criterion (string)
train loss: 'bce' or 'mse'
out_train (torch.Tensor)
optional target images from train set
out_test (torch.Tensor)
optional target images from test set
optimizer (torch optimizer)
optional target images from train set
criterion (string)
train loss: 'bce' or 'mse'
n_epochs (boolean)
number of full iterations of training data
batch_size (integer)
number of element in mini-batches
verbose (boolean)
whether to print final loss
Returns:
Nothing.
"""
if out_train is not None and out_test is not None:
different_output = True
else:
different_output = False
# Initialize loss function
if criterion == 'mse':
loss_fn = nn.MSELoss()
elif criterion == 'bce':
loss_fn = nn.BCELoss()
else:
print('Please specify either "mse" or "bce" for loss criterion')
# Initialize SGD optimizer
if optimizer is None:
optimizer = optim.Adam(net.parameters())
# Placeholder for loss
track_loss = []
print('Epoch', '\t', 'Loss train', '\t', 'Loss test')
for i in range(n_epochs):
shuffle_idx = np.random.permutation(len(input_train))
batches = torch.split(input_train[shuffle_idx], batch_size)
if different_output:
batches_out = torch.split(out_train[shuffle_idx], batch_size)
for batch_idx, batch in enumerate(batches):
output_train = net(batch)
if different_output:
loss = loss_fn(output_train, batches_out[batch_idx])
else:
loss = loss_fn(output_train, batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Keep track of loss at each epoch
track_loss += [float(loss)]
loss_epoch = f'{i+1}/{n_epochs}'
with torch.no_grad():
output_train = net(input_train)
if different_output:
loss_train = loss_fn(output_train, out_train)
else:
loss_train = loss_fn(output_train, input_train)
loss_epoch += f'\t {loss_train:.4f}'
output_test = net(input_test)
if different_output:
loss_test = loss_fn(output_test, out_test)
else:
loss_test = loss_fn(output_test, input_test)
loss_epoch += f'\t\t {loss_test:.4f}'
print(loss_epoch)
if verbose:
# Print loss
if different_output:
loss_mse = f'\nMSE\t {eval_mse(output_train, out_train):0.4f}'
loss_mse += f'\t\t {eval_mse(output_test, out_test):0.4f}'
else:
loss_mse = f'\nMSE\t {eval_mse(output_train, input_train):0.4f}'
loss_mse += f'\t\t {eval_mse(output_test, input_test):0.4f}'
print(loss_mse)
if different_output:
loss_bce = f'BCE\t {eval_bce(output_train, out_train):0.4f}'
loss_bce += f'\t\t {eval_bce(output_test, out_test):0.4f}'
else:
loss_bce = f'BCE\t {eval_bce(output_train, input_train):0.4f}'
loss_bce += f'\t\t {eval_bce(output_test, input_test):0.4f}'
print(loss_bce)
# Plot loss
step = int(np.ceil(len(track_loss)/500))
x_range = np.arange(0, len(track_loss), step)
plt.figure()
plt.plot(x_range, track_loss[::step], 'C0')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.xlim([0, None])
plt.ylim([0, None])
plt.show()
def image_occlusion(x, image_shape):
"""
Randomly selects on quadrant of images and sets to zeros.
Args:
x (torch.Tensor of floats)
vectorized images
image_shape (tuple or list)
original shape of image
Returns:
torch.Tensor.
"""
selection = np.random.choice(4, len(x))
my_x = np.array(x).copy()
my_x = my_x.reshape(-1, image_shape[0], image_shape[1])
my_x[selection == 0, :int(image_shape[0] / 2), :int(image_shape[1] / 2)] = 0
my_x[selection == 1, int(image_shape[0] / 2):, :int(image_shape[1] / 2)] = 0
my_x[selection == 2, :int(image_shape[0] / 2), int(image_shape[1] / 2):] = 0
my_x[selection == 3, int(image_shape[0] / 2):, int(image_shape[1] / 2):] = 0
my_x = my_x.reshape(x.shape)
return torch.from_numpy(my_x)
def image_rotation(x, deg, image_shape):
"""
Randomly rotates images by +- deg degrees.
Args:
x (torch.Tensor of floats)
vectorized images
deg (integer)
rotation range
image_shape (tuple or list)
original shape of image
Returns:
torch.Tensor.
"""
my_x = np.array(x).copy()
my_x = my_x.reshape(-1, image_shape[0], image_shape[1])
for idx, item in enumerate(my_x):
my_deg = deg * 2 * np.random.random() - deg
my_x[idx] = ndimage.rotate(my_x[idx], my_deg,
reshape=False, prefilter=False)
my_x = my_x.reshape(x.shape)
return torch.from_numpy(my_x)
class AutoencoderClass(nn.Module):
"""
Deep autoencoder network object (nn.Module) with optional L2 normalization
of activations in bottleneck layer.
Args:
input_size (integer)
size of input samples
s2 (boolean)
whether to L2 normalize activatinos in bottleneck layer
Returns:
Autoencoder object inherited from nn.Module class.
"""
def __init__(self, input_size=784, s2=False):
super().__init__()
self.input_size = input_size
self.s2 = s2
if s2:
self.encoding_size = 3
else:
self.encoding_size = 2
self.enc1 = nn.Linear(self.input_size, int(self.input_size / 2))
self.enc1_f = nn.PReLU()
self.enc2 = nn.Linear(int(self.input_size / 2), self.encoding_size * 32)
self.enc2_f = nn.PReLU()
self.enc3 = nn.Linear(self.encoding_size * 32, self.encoding_size)
self.enc3_f = nn.PReLU()
self.dec1 = nn.Linear(self.encoding_size, self.encoding_size * 32)
self.dec1_f = nn.PReLU()
self.dec2 = nn.Linear(self.encoding_size * 32, int(self.input_size / 2))
self.dec2_f = nn.PReLU()
self.dec3 = nn.Linear(int(self.input_size / 2), self.input_size)
self.dec3_f = nn.Sigmoid()
def encoder(self, x):
"""
Encoder component.
"""
x = self.enc1_f(self.enc1(x))
x = self.enc2_f(self.enc2(x))
x = self.enc3_f(self.enc3(x))
if self.s2:
x = nn.functional.normalize(x, p=2, dim=1)
return x
def decoder(self, x):
"""
Decoder component.
"""
x = self.dec1_f(self.dec1(x))
x = self.dec2_f(self.dec2(x))
x = self.dec3_f(self.dec3(x))
return x
def forward(self, x):
"""
Forward pass.
"""
x = self.encoder(x)
x = self.decoder(x)
return x
def save_checkpoint(net, optimizer, filename):
"""
Saves a PyTorch checkpoint.
Args:
net (torch network)
ANN network (nn.Module)
optimizer (torch optimizer)
optimizer for SGD
filename (string)
filename (without extension)
Returns:
Nothing.
"""
torch.save({'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
filename+'.pt')
def load_checkpoint(url, filename):
"""
Loads a PyTorch checkpoint from URL is local file not present.
Args:
url (string)
URL location of PyTorch checkpoint
filename (string)
filename (without extension)
Returns:
PyTorch checkpoint of saved model.
"""
if not os.path.isfile(filename+'.pt'):
os.system(f"wget {url}.pt")
return torch.load(filename+'.pt')
def reset_checkpoint(net, optimizer, checkpoint):
"""
Resets PyTorch model to checkpoint.
Args:
net (torch network)
ANN network (nn.Module)
optimizer (torch optimizer)
optimizer for SGD
checkpoint (torch checkpoint)
checkpoint of saved model
Returns:
Nothing.
"""
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
###Output
_____no_output_____
###Markdown
--- Section 1: Download and prepare MNIST datasetWe use the helper function `downloadMNIST` to download the dataset and transform it into `torch.Tensor` and assign train and test sets to (`x_train`, `y_train`) and (`x_test`, `y_test`).The variable `input_size` stores the length of *vectorized* versions of the images `input_train` and `input_test` for training and test images.**Instructions:*** Please execute the cell below
###Code
# Download MNIST
x_train, y_train, x_test, y_test = downloadMNIST()
x_train = x_train / 255
x_test = x_test / 255
image_shape = x_train.shape[1:]
input_size = np.prod(image_shape)
input_train = x_train.reshape([-1, input_size])
input_test = x_test.reshape([-1, input_size])
test_selected_idx = np.random.choice(len(x_test), 10, replace=False)
train_selected_idx = np.random.choice(len(x_train), 10, replace=False)
test_subset_idx = np.random.choice(len(x_test), 500, replace=False)
print(f'shape image \t\t {image_shape}')
print(f'shape input_train \t {input_train.shape}')
print(f'shape input_test \t {input_test.shape}')
###Output
_____no_output_____
###Markdown
--- Section 2: Download a pre-trained modelThe class `AutoencoderClass` implements the autoencoder architectures introduced in the previous tutorial. The design of this class follows the object-oriented programming (OOP) style from tutorial W3D4. Setting the boolean parameter `s2=True` specifies the model with projection onto the $S_2$ sphere.We trained both models for `n_epochs=25` and saved the weights to avoid a lengthy initial training period - these will be our reference model states.Experiments are run from the identical initial conditions by resetting the autoencoder to the reference state at the beginning of each exercise. The mechanism for loading and storing models from PyTorch is the following:```model = nn.Sequential(...)ormodel = AutoencoderClass()torch.save({'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, filename_path)checkpoint = torch.load(filename_path)model.load_state_dict(checkpoint['model_state_dict'])optimizer.load_state_dict(checkpoint['optimizer_state_dict'])```See additional [PyTorch instructions](https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html), and when to use `model.eval()` and `model.train()` for more complex models.We provide the functions `save_checkpoint`, `load_checkpoint`, and `reset_checkpoint` to implement the steps above and download pre-trained weights from the GitHub repo.If downloading from GitHub fails, please uncomment the 3rd cell bellow to train the model for `n_epochs=10` and save it locally.**Instructions:*** Please execute the cell(s) below
###Code
root = 'https://github.com/mpbrigham/colaboratory-figures/raw/master/nma/autoencoders'
filename = 'ae_6h_prelu_bce_adam_25e_32b'
url = os.path.join(root, filename)
s2 = True
if s2:
filename += '_s2'
url += '_s2'
model = AutoencoderClass(s2=s2)
optimizer = optim.Adam(model.parameters())
encoder = model.encoder
decoder = model.decoder
checkpoint = load_checkpoint(url, filename)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# Please uncomment and execute this cell if download of
# pre-trained weights fail
# model = AutoencoderClass(s2=s2)
# encoder = model.encoder
# decoder = model.decoder
# n_epochs = 10
# batch_size = 128
# runSGD(model, input_train, input_test,
# n_epochs=n_epochs, batch_size=batch_size)
# save_checkpoint(model, optimizer, filename)
# checkpoint = load_checkpoint(url, filename)
with torch.no_grad():
output_test = model(input_test)
latent_test = encoder(input_test)
plot_row([input_test[test_selected_idx], output_test[test_selected_idx]],
image_shape=image_shape)
plot_latent_generative(latent_test, y_test, decoder,
image_shape=image_shape, s2=s2)
###Output
_____no_output_____
|
flight_price.ipynb
|
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel(r"F:\Projects\fight_price_prediction\Data_Train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"F:\Projects\fight_price_prediction\Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1165.606162629916
MSE: 4062650.6911608884
RMSE: 2015.6018186042818
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(reg_rf, file)
model = open('flight_price_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset
###Code
train_data = pd.read_excel("Data_Train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical Data1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel("Test_ser.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature Selection
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1165.606162629916
MSE: 4062650.6911608884
RMSE: 2015.6018186042818
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(reg_rf, file)
model = open('flight_price_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
!pip install xlrd
!pip install openpyxl
###Output
Requirement already satisfied: openpyxl in c:\users\this pc\appdata\local\programs\python\python38\lib\site-packages (3.0.7)
Requirement already satisfied: et-xmlfile in c:\users\this pc\appdata\local\programs\python\python38\lib\site-packages (from openpyxl) (1.1.0)
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel("Data_Train.xlsx", engine='openpyxl')
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
train_data.shape
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# Destination vs Price
sns.catplot(y = "Price", x = "Destination", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
sns.barplot(x=data_train['Total_Stops'], y = data_train['Price'])
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"Test_set.xlsx", engine = "openpyxl")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1162.89870035855
MSE: 4039324.8061176543
RMSE: 2009.8071564500049
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(rf_random, file)
model = open('flight_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel(r"C:\Users\balay\OneDrive\Bureau\Projects\Flight-Price-Prediction-master\train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"C:\Users\balay\OneDrive\Bureau\Projects\Flight-Price-Prediction-master\test.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 40)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1190.0600768861957
MSE: 4292463.491558876
RMSE: 2071.8261248374283
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(rf_random, file)
model = open('flight_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
df = pd.read_excel(r"/content/drive/MyDrive/Flight Fare Prediction/Data_Train.xlsx")
pd.set_option('display.max_columns', None)
df.head()
df.info()
df.dropna(inplace = True)
df.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
df["Journey_day"] = pd.to_datetime(df.Date_of_Journey, format="%d/%m/%Y").dt.day
df["Journey_month"] = pd.to_datetime(df["Date_of_Journey"], format = "%d/%m/%Y").dt.month
df.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
df.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
df["Dep_hour"] = pd.to_datetime(df["Dep_Time"]).dt.hour
# Extracting Minutes
df["Dep_min"] = pd.to_datetime(df["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
df.drop(["Dep_Time"], axis = 1, inplace = True)
df.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
df["Arrival_hour"] = pd.to_datetime(df.Arrival_Time).dt.hour
# Extracting Minutes
df["Arrival_min"] = pd.to_datetime(df.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
df.drop(["Arrival_Time"], axis = 1, inplace = True)
df.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(df["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
df["Duration_hours"] = duration_hours
df["Duration_mins"] = duration_mins
df.drop(["Duration"], axis = 1, inplace = True)
df.head()
#price outlier check
Q1=df['Price'].quantile(0.25)
Q3=df['Price'].quantile(0.75)
IQR=Q3-Q1
print(Q1)
print(Q3)
print(IQR)
df=df[~((df['Price']>Q3+1.5*IQR)|(df['Price']<Q1-1.5*IQR))]
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
df["Airline"].value_counts()
plt.figure(figsize=(12,6))
sns.countplot(df['Airline'])
plt.title('Count of Airlines', size=30)
plt.xticks(rotation=90)
plt.show()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
# sns.catplot(y = "Price", x = "Airline", data = df.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
# plt.show()
#or
plt.figure(figsize=(12,6))
sns.boxenplot(df['Airline'], df['Price'], palette='Set3')
plt.title('Airlines vs Price', size=30)
plt.xticks(rotation=90)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = df[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
df["Source"].value_counts()
plt.figure(figsize=(12,6))
sns.countplot(df['Source'], palette='Set2')
plt.title('Count of Source', size=30)
plt.xticks(rotation=90)
plt.show()
# Source vs Price
plt.figure(figsize=(12,6))
sns.boxenplot(df['Source'], df['Price'], palette='Set3')
plt.title('Source vs Price', size=30)
plt.xticks(rotation=90)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = df[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
df["Destination"].value_counts()
plt.figure(figsize=(12,6))
sns.countplot(df['Destination'], palette='Set2')
plt.title('Count of Destination', size=30)
plt.xticks(rotation=90)
plt.show()
plt.figure(figsize=(12,6))
sns.boxenplot(df['Destination'], df['Price'], palette='Set3')
plt.title('Destination vs Price', size=30)
plt.xticks(rotation=90)
plt.show()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = df[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
df["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
df.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
df["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
df.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
df.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([df, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
x = X
X=np.array(X)
y=np.array(y)
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=x.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from scipy.stats import chi2_contingency
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 7)
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
def predict(ml_model):
print('Model is: {}'.format(ml_model))
model= ml_model.fit(X_train,y_train)
print("Training score: {}".format(model.score(X_train,y_train)))
predictions = model.predict(X_test)
print("Predictions are: {}".format(predictions))
print('\n')
r2score=r2_score(y_test,predictions)
print("r2 score is: {}".format(r2score))
print('MAE:{}'.format(mean_absolute_error(y_test,predictions)))
print('MSE:{}'.format(mean_squared_error(y_test,predictions)))
print('RMSE:{}'.format(np.sqrt(mean_squared_error(y_test,predictions))))
sns.distplot(y_test-predictions)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor
predict(RandomForestRegressor())
predict(KNeighborsRegressor())
predict(DecisionTreeRegressor())
from sklearn.svm import SVR
predict(SVR())
predict(GradientBoostingRegressor())
predict(XGBRegressor())
###Output
Model is: XGBRegressor()
[12:18:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
Training score: 0.7839373290588117
Predictions are: [ 9795.106 12220.209 4386.884 ... 7211.0806 3861.917 10528.835 ]
r2 score is: 0.7743877747415018
MAE:1471.9932010639827
MSE:3819256.5251765046
RMSE:1954.2918219080038
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
reg_rf = RandomForestRegressor()
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1100.6526257768312
MSE: 2582077.497154234
RMSE: 1606.8844069049378
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('model.pkl', 'wb')
# dump information to that file
pickle.dump(rf_random, file)
model = open('/content/model.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel(r"C:\Users\Jagadeesh Himayan\Desktop\SC\FPP\Data_Train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
###Output
_____no_output_____
###Markdown
Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
###Code
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"C:\Users\Jagadeesh Himayan\Desktop\SC\FPP\Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1166.293199169378
MSE: 4053297.8975700624
RMSE: 2013.2803822543106
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you want to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(reg_rf, file)
model = open('flight_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Data_Train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1165.606162629916
MSE: 4062650.6911608884
RMSE: 2015.6018186042818
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(reg_rf, file)
model = open('flight_price_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel('Data_Train.xlsx', engine='openpyxl')
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data.shape
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
"5h 25m".split(sep = "m")[0].split()[-1]
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"Test_set.xlsx", engine='openpyxl')
test_data.head()
test_data.shape
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1166.1077346954548
MSE: 4073186.164461353
RMSE: 2018.2136072431365
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('fflight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(rf_random, file)
model = open('fflight_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Data_Train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1165.606162629916
MSE: 4062650.6911608884
RMSE: 2015.6018186042818
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(reg_rf, file)
model = open('flight_price_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Data_Train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1165.606162629916
MSE: 4062650.6911608884
RMSE: 2015.6018186042818
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(reg_rf, file)
model = open('flight_price_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method
###Code
train_data = pd.read_excel(r"/home/adarshsrivastava/Github/Flight_Fare_Prediction-/dataset/Data_Train.xlsx")
#pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum() #To check if there is any NaN value in any of the column
###Output
_____no_output_____
###Markdown
EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 12, aspect = 2)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 2)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"/home/adarshsrivastava/Github/Flight_Fare_Prediction-/dataset/Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:,1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True)
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
y_pred=rf.predict(X_test)
rf.score(X_train,y_train)
rf.score(X_test,y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test,y_pred)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print("MAE:",metrics.mean_absolute_error(y_test,y_pred))
print("MSE:",metrics.mean_squared_error(y_test,y_pred))
rmse=np.sqrt(metrics.mean_squared_error(y_test,y_pred))
print("RMSE:",rmse)
rmse/(max(y)-min(y))
metrics.r2_score(y_test,y_pred)
import pickle
# open a file, where you ant to store the data
file = open('flight_fare_pred.pkl', 'wb')
# dump information to that file
pickle.dump(rf, file)
model = open('flight_fare_pred.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
###Markdown
Flight Price Prediction---
###Code
# -*- coding: utf-8 -*
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Importing dataset1. Since data is in form of excel file we have to use pandas read_excel to load the data2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row3. Check whether any null values are there or not. if it is present then following can be done, 1. Imputing data using Imputation method in sklearn 2. Filling NaN values with mean, median and mode using fillna() method4. Describe data --> which can give statistical analysis
###Code
train_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Data_Train.xlsx")
pd.set_option('display.max_columns', None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.dropna(inplace = True)
train_data.isnull().sum()
###Output
_____no_output_____
###Markdown
--- EDA From description we can see that Date_of_Journey is a object data type,\Therefore, we have to convert this datatype into timestamp so as to use this column properly for predictionFor this we require pandas **to_datetime** to convert object data type to datetime dtype.**.dt.day method will extract only day of that date**\**.dt.month method will extract only month of that date**
###Code
train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day
train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
train_data.head()
# Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
# Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it is of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute
# Now we can drop Arrival_Time as it is of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] = duration_mins
train_data.drop(["Duration"], axis = 1, inplace = True)
train_data.head()
###Output
_____no_output_____
###Markdown
--- Handling Categorical DataOne can find many ways to handle categorical data. Some of them categorical data are,1. **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case2. **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case
###Code
train_data["Airline"].value_counts()
# From graph we can see that Jet Airways Business have the highest Price.
# Apart from the first Airline almost all are having similar median
# Airline vs Price
sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first= True)
Airline.head()
train_data["Source"].value_counts()
# Source vs Price
sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3)
plt.show()
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source = train_data[["Source"]]
Source = pd.get_dummies(Source, drop_first= True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination = train_data[["Destination"]]
Destination = pd.get_dummies(Destination, drop_first = True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical type we perform LabelEncoder
# Here Values are assigned with corresponding keys
train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
train_data.head()
# Concatenate dataframe --> train_data + Airline + Source + Destination
data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1)
data_train.head()
data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
data_train.head()
data_train.shape
###Output
_____no_output_____
###Markdown
--- Test set
###Code
test_data = pd.read_excel(r"E:\MachineLearning\EDA\Flight_Price\Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
###Output
_____no_output_____
###Markdown
--- Feature SelectionFinding out the best feature which will contribute and have good relation with target variable.Following are some of the feature selection methods,1. **heatmap**2. **feature_importance_**3. **SelectKBest**
###Code
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:, 1]
y.head()
# Finds correlation between Independent and dependent attributes
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn")
plt.show()
# Important feature using ExtraTreesRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection = ExtraTreesRegressor()
selection.fit(X, y)
print(selection.feature_importances_)
#plot graph of feature importances for better visualization
plt.figure(figsize = (12,8))
feat_importances = pd.Series(selection.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
###Output
_____no_output_____
###Markdown
--- Fitting model using Random Forest1. Split dataset into train and test set in order to prediction w.r.t X_test2. If needed do scaling of data * Scaling is not done in Random forest3. Import model4. Fit the data5. Predict w.r.t X_test6. In regression check **RSME** Score7. Plot graph
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
reg_rf.score(X_train, y_train)
reg_rf.score(X_test, y_test)
sns.distplot(y_test-y_pred)
plt.show()
plt.scatter(y_test, y_pred, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# RMSE/(max(DV)-min(DV))
2090.5509/(max(y)-min(y))
metrics.r2_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
--- Hyperparameter Tuning* Choose following method for hyperparameter tuning 1. **RandomizedSearchCV** --> Fast 2. **GridSearchCV*** Assign hyperparameters in form of dictionery* Fit the model* Check best paramters and best score
###Code
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize = (8,8))
sns.distplot(y_test-prediction)
plt.show()
plt.figure(figsize = (8,8))
plt.scatter(y_test, prediction, alpha = 0.5)
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print('MAE:', metrics.mean_absolute_error(y_test, prediction))
print('MSE:', metrics.mean_squared_error(y_test, prediction))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
###Output
MAE: 1165.606162629916
MSE: 4062650.6911608884
RMSE: 2015.6018186042818
###Markdown
--- Save the model to reuse it again
###Code
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(reg_rf, file)
model = open('flight_price_rf.pkl','rb')
forest = pickle.load(model)
y_prediction = forest.predict(X_test)
metrics.r2_score(y_test, y_prediction)
###Output
_____no_output_____
|
plot_star_back_free.ipynb
|
###Markdown
Setup
###Code
# Python 3 compatability
from __future__ import division, print_function
# system functions that are always useful to have
import time, sys, os
# basic numeric setup
import numpy as np
import math
from numpy import linalg
import scipy
from scipy import stats
# plotting
import matplotlib
from matplotlib import pyplot as plt
# fits data
from astropy.io import fits
# inline plotting
%matplotlib inline
# re-defining plotting defaults
from matplotlib import rcParams
rcParams.update({'xtick.major.pad': '7.0'})
rcParams.update({'xtick.major.size': '7.5'})
rcParams.update({'xtick.major.width': '1.5'})
rcParams.update({'xtick.minor.pad': '7.0'})
rcParams.update({'xtick.minor.size': '3.5'})
rcParams.update({'xtick.minor.width': '1.0'})
rcParams.update({'ytick.major.pad': '7.0'})
rcParams.update({'ytick.major.size': '7.5'})
rcParams.update({'ytick.major.width': '1.5'})
rcParams.update({'ytick.minor.pad': '7.0'})
rcParams.update({'ytick.minor.size': '3.5'})
rcParams.update({'ytick.minor.width': '1.0'})
rcParams.update({'axes.titlepad': '15.0'})
rcParams.update({'axes.labelpad': '15.0'})
rcParams.update({'font.size': 30})
###Output
_____no_output_____
###Markdown
Star with Free Background Load data.
###Code
nruns, nsizes, ntrials = 9, 5, 100000
sigclip = 5.
# true values
f, ferr, ftrials, imgsize = np.zeros((4, nruns, nsizes))
# extract data
flux, fluxerr, x, y, b = np.zeros((5, nruns, nsizes, ntrials))
for i in range(nruns):
for j in range(nsizes):
fname = 'data/flt_back/run{0}.fits'.format(i * nsizes + j) # run
if os.path.isfile(fname):
hdul = fits.open(fname)
# grab true values
f[i, j] = hdul[0].header['TRUEFLUX'] # true flux
psfwidth = hdul[0].header['PSFWIDTH'] # Gaussian PSF width
noise = hdul[0].header['NOISE'] # iid Gaussian noise
aeff = 4. * np.pi * psfwidth**2 # effective area
imgsize[i, j] = hdul[0].header['IMGSIZE'] # image area
ferr[i, j] = 1./np.sqrt(1./(aeff) - 1./(imgsize[i, j]**2)) # true error
# grab trials
data = hdul[1].data
flux[i, j] = data['Flux'] # fluxes
fluxerr[i, j] = data['Fluxerr'] # flux errors
x[i, j], y[i, j] = data['X'], data['Y'] # positions
b[i, j] = data['Back']
# clip suspicious trials
pos = np.c_[x[i, j], y[i, j]]
cinv = np.linalg.inv(np.cov(pos, rowvar=False)) # inv-cov
sqdist = np.array([np.dot(np.dot(p, cinv), p)
for p in pos]) # normalized distance
sel = (sqdist <= sigclip**2) & (flux[i, j] / fluxerr[i, j] > 0.2) # clip outliers
flux[i, j, ~sel], fluxerr[i, j, ~sel] = np.nan, np.nan
x[i, j, ~sel], y[i, j, ~sel] = np.nan, np.nan
b[i, j, ~sel] = np.nan
ftrials[i, j] = len(sel)
else:
print(fname + ' not found.')
# define relevant quantities
snr = f / ferr # true SNR
favg, fstd = np.nanmean(flux, axis=2), np.nanstd(flux, axis=2)
fbias_avg = (favg - f) / f # fractional bias
fbias_err = fstd / f / np.sqrt(ftrials) # uncertainty
flux_snr = flux / fluxerr # measured SNR
flux_debias = flux * (1 - flux_snr**-2 - flux_snr**-4) # de-biased flux (2nd order)
fdebias_avg = np.nanmean(flux_debias, axis=2) # average
fdebias_std = np.nanstd(flux_debias, axis=2) # scatter
# derive error via bootstrapping
nbootstrap = 250
np.random.seed(2764) # declare seed
fdebias_std_err = np.array([[np.nanstd([np.nanstd(np.random.choice(flux_debias[j, k],
size=ntrials))
for i in range(nbootstrap)])
for k in range(nsizes)]
for j in range(nruns)]) # error on scatter
###Output
/home/joshspeagle/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:3: RuntimeWarning: invalid value encountered in true_divide
This is separate from the ipykernel package so we can avoid doing imports until
/home/joshspeagle/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in true_divide
"""
/home/joshspeagle/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in true_divide
/home/joshspeagle/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:8: RuntimeWarning: invalid value encountered in true_divide
/home/joshspeagle/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:10: RuntimeWarning: Mean of empty slice
# Remove the CWD from sys.path while we load stuff.
/home/joshspeagle/anaconda3/lib/python3.6/site-packages/numpy/lib/nanfunctions.py:1545: RuntimeWarning: Degrees of freedom <= 0 for slice.
keepdims=keepdims)
###Markdown
Plot bias vs SNR.
###Code
snr_grid = np.linspace(np.nanmin(snr), np.nanmax(snr), 1000)
# plot flux bias + variance
plt.figure(figsize=(24, 10))
plt.suptitle('Star: Variable Background', y=1.02)
# flux
plt.subplot(1, 2, 1)
plt.errorbar(snr.flatten(), fbias_avg.flatten() * 100.,
yerr=fbias_err.flatten() * 100.,
marker='o', color='black',
linestyle='none', markersize=12,
elinewidth=2) # avg fractional bias
plt.plot(snr_grid, snr_grid**-2 * 100.,
linestyle='-', color='red',
label='1st-order', lw=3) # 1st-order correction
plt.plot(snr_grid, (snr_grid**-2 + snr_grid**-4) * 100.,
linestyle='-', color='dodgerblue',
label='2nd-order', lw=3) # 2nd-order correction
# label
plt.text(6, 1.7, 'First-order\ncorrection',
horizontalalignment='center', verticalalignment='center',
color='red')
plt.text(5.25, 6, 'Second-order\ncorrection',
horizontalalignment='center', verticalalignment='center',
color='dodgerblue')
# prettify
plt.text(8.8, 7.5, 'Overestimated',
horizontalalignment='center', verticalalignment='center',
color='black', alpha=0.8)
plt.xlabel(r'Effective SNR', labelpad=10)
plt.ylabel(r'Flux Bias [%]', labelpad=10)
plt.xlim(np.nanmin(snr) / 1.05, np.nanmax(snr) * 1.05)
plt.tight_layout()
# errors
plt.subplot(1, 2, 2)
plt.errorbar(snr.flatten(), (1. - fdebias_std / ferr).flatten() * 100.,
yerr=(fdebias_std_err / ferr).flatten() * 100., marker='o',
color='black', linestyle='none', markersize=12,
elinewidth=2) # avg fractional error bias
plt.plot(snr_grid, (1. - np.sqrt(1 + snr_grid**-2)) * 100.,
linestyle='-', color='red',
label='1st-order', lw=3) # 1st-order correction
# prettify
plt.text(4.8, -0.15, 'Underestimated',
horizontalalignment='center', verticalalignment='center',
color='black', alpha=0.8)
plt.xlabel(r'Effective SNR', labelpad=10)
plt.ylabel(r'Error Bias [%]', labelpad=10)
plt.xlim(np.nanmin(snr) / 1.05, np.nanmax(snr) * 1.05)
plt.ylim([-4, None])
plt.tight_layout()
# save figure
plt.savefig('plots/star_free_back.png', bbox_inches='tight')
###Output
_____no_output_____
|
real-estate-price/notebooks/02-model-training.ipynb
|
###Markdown
Export to PMML
###Code
from sklearn2pmml import sklearn2pmml
from sklearn2pmml.pipeline import PMMLPipeline
pipeline = PMMLPipeline([
("classifier", RandomForestRegressor(verbose=True, n_jobs=-1))
])
pipeline.fit(X_train, y_train)
pipeline.verify(X_test.sample(n = 10))
sklearn2pmml(pipeline, "/tmp/model.pmml")
###Output
_____no_output_____
|
dd_1/Part 4/Section 08 - Descriptors/02 - Getters and Setters.ipynb
|
###Markdown
Getters and Setters So far we have seen how the `__get__` method is called when we assign an instance of a descriptors to a class attribute.But we can access that attribute either from the class itself, or the instance - as we saw in the last lecture, both accesses end up calling the `__get__` method. But what changes are the arguments passed to the method. Let's explore this:
###Code
from datetime import datetime
class TimeUTC:
def __get__(self, instance, owner_class):
print(f'__get__ called, self={self}, instance={instance}, owner_class={owner_class}')
return datetime.utcnow().isoformat()
class Logger1:
current_time = TimeUTC()
class Logger2:
current_time = TimeUTC()
###Output
_____no_output_____
###Markdown
Now let's access `current_time` from the class itself:
###Code
Logger1.current_time
###Output
__get__ called, self=<__main__.TimeUTC object at 0x7f83d035be48>, instance=None, owner_class=<class '__main__.Logger1'>
###Markdown
As you can see, the `instance` was `None` - this was because we called the descriptor from the `Logger1` class, not an instance of it. The `owner_class` tells us this descriptor instance is defined in the `Logger1` class.The same holds if we use `Logger2`:
###Code
Logger2.current_time
###Output
__get__ called, self=<__main__.TimeUTC object at 0x7f83d035be80>, instance=None, owner_class=<class '__main__.Logger2'>
###Markdown
But if we call the descriptor via an instance instead:
###Code
l1 = Logger1()
print(hex(id(l1)))
l1.current_time
###Output
__get__ called, self=<__main__.TimeUTC object at 0x7f83d035be48>, instance=<__main__.Logger1 object at 0x7f83d03864a8>, owner_class=<class '__main__.Logger1'>
###Markdown
As you can see, `instance` is now the `l1` instance, and the owner class is still `Logger1`.The sme holds for instance of `Logger2`:
###Code
l2 = Logger2()
print(hex(id(l2)))
l2.current_time
###Output
0x7f83d0386b38
__get__ called, self=<__main__.TimeUTC object at 0x7f83d035be80>, instance=<__main__.Logger2 object at 0x7f83d0386b38>, owner_class=<class '__main__.Logger2'>
###Markdown
This means that we can differentiate, inside our `__get__` method whether the descriptor was accessed via the class or via an instance.Typically when a descriptor is access from the class we return the descriptor instance, and when accessed from the instance we return the instance specific value we want:
###Code
from datetime import datetime
class TimeUTC:
def __get__(self, instance, owner_class):
if instance is None:
# called from class
return self
else:
# called from instance
return datetime.utcnow().isoformat()
class Logger:
current_time = TimeUTC()
Logger.current_time
l = Logger()
l.current_time
###Output
_____no_output_____
###Markdown
This is consistent with the way properties work:
###Code
class Logger:
@property
def current_time(self):
return datetime.utcnow().isoformat()
Logger.current_time
###Output
_____no_output_____
###Markdown
This returned the property instance, whereas calling it from an instance:
###Code
l = Logger()
l.current_time
###Output
_____no_output_____
###Markdown
Now, there is one subtle point we have to understand when we create multiple instances of a class that uses a descriptor as a class attribute. Since the descriptor is assigned to an **class attribute**, all instances of the class will **share** the same descriptor instance!
###Code
class TimeUTC:
def __get__(self, instance, owner_class):
if instance is None:
# called from class
return self
else:
# called from instance
print(f'__get__ called in {self}')
return datetime.utcnow().isoformat()
class Logger:
current_time = TimeUTC()
l1 = Logger()
l2 = Logger()
###Output
_____no_output_____
###Markdown
But look at the `current_time` for each of those instances
###Code
l1.current_time, l2.current_time
###Output
__get__ called in <__main__.TimeUTC object at 0x7f83d039aeb8>
__get__ called in <__main__.TimeUTC object at 0x7f83d039aeb8>
###Markdown
As you can see the **same** instance of `TimeUTC` was used. This does not matter in this particular example, since we just return the current time, but watch what happens if our property relies on some kind of state in the descriptor:
###Code
class Countdown:
def __init__(self, start):
self.start = start + 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
self.start -= 1
return self.start
class Rocket:
countdown = Countdown(10)
###Output
_____no_output_____
###Markdown
Now let's say we want to launch two rockets:
###Code
rocket1 = Rocket()
rocket2 = Rocket()
###Output
_____no_output_____
###Markdown
And let's start the countdown for each one:
###Code
rocket1.countdown
rocket2.countdown
rocket1.countdown
###Output
_____no_output_____
###Markdown
As you can see, the current countdown value is shared by both `rocket1` and `rocket2` instances of `Rocket` - this is because the `Countdown` instance is a class attribute of `Rocket`. So we have to be careful how we deal with instance level state. The `__set__` method works in a similar way to `__get__` but it is used when we assign a value to the class attribute.
###Code
class IntegerValue:
def __set__(self, instance, value):
print(f'__set__ called, instance={instance}, value={value}')
def __get__(self, instance, owner_class):
if instance is None:
print('__get__ called from class')
else:
print(f'__get__ called, instance={instance}, owner_class={owner_class}')
class Point2D:
x = IntegerValue()
y = IntegerValue()
Point2D.x
p = Point2D()
p.x
p.x = 100
###Output
__set__ called, instance=<__main__.Point2D object at 0x7f83d03a8f28>, value=100
###Markdown
So, where should we store the values `x` and `y`? Many "tutorials" I see on the web naively store the value in the descriptor itself:
###Code
class IntegerValue:
def __set__(self, instance, value):
self._value = int(value)
def __get__(self, instance, owner_class):
if instance is None:
return self
else:
return self._value
class Point2D:
x = IntegerValue()
y = IntegerValue()
###Output
_____no_output_____
###Markdown
At first blush, this seems to work just fine:
###Code
p1 = Point2D()
p1.x = 1.1
p1.y = 2.2
p1.x, p1.y
###Output
_____no_output_____
###Markdown
But, remember the point I was making about the instance of the descriptor (`IntegeraValue` in this case) being shared by all instances of the class (`Point2D` in this case)?
###Code
p2 = Point2D()
p2.x, p2.y
###Output
_____no_output_____
###Markdown
And of course if we set the value:
###Code
p2.x = 100.9
p2.x, p1.x
###Output
_____no_output_____
|
examples/Digits - Single Layer.ipynb
|
###Markdown
A single layer neural network predicting MNIST hand-written digits
###Code
from simpledl.DLTrainer import DLTrainer
from simpledl.ModelManager import ModelManager
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import numpy as np
# Create our own trainer with its own load_data function
class MyDLTrainer(DLTrainer):
def load_data(self, test_size=0.1):
"""load digits dataset"""
src_X, src_Y = load_digits(return_X_y=True)
X = src_X / np.max(src_X) # normalize
Y = OneHotEncoder(sparse=False, categories='auto').fit_transform(src_Y.reshape(-1, 1))
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size)
src_X = src_X.T
src_Y = src_Y.T
X = X.T
Y = Y.T
X_train = X_train.T
X_test = X_test.T
Y_train = Y_train.T
Y_test = Y_test.T
return src_X, src_Y, X, Y, X_train, Y_train, X_test, Y_test
trainer = MyDLTrainer()
trainer.load_data()
src_X, src_Y, X, Y, X_train, Y_train, X_test, Y_test = trainer.load_data()
dim_input, dim_output = X.shape[0], Y.shape[0]
# Create our model
mgr = ModelManager()
mgr.create_model(dims=[dim_input, 25, dim_output],
activations=[DLTrainer.nonlin_relu, DLTrainer.nonlin_sigmoid],
default_alpha=0.003,
default_lambda=0.001)
# Train the model
updated_model, costs, accuracy = trainer.train(mgr, X_train, Y_train, 10000, 2500)
f, ax = plt.subplots()
ax.plot(costs)
ax.set_yscale('log')
ax.set_title("Cost v epoch")
mgr.update_model(updated_model)
print("ModelManager updated with trained model. Dev accuracy: {}".format(trainer.correct(mgr.model, X_test, Y_test)))
# Visualize a few examples
def visualize(x, title):
f, ax = plt.subplots(figsize=(2,2))
ax.imshow(x.reshape(8, 8), cmap=plt.cm.gray_r)
ax.set_title(title)
def show_generic_with_prediction(x, y, trainer):
y_hat = trainer.predict(mgr.model, x).ravel()[0]
msg = "Correctly inferred!" if y == y_hat else "Incorrectly inferred."
title = "Y: {}, Y_HAT: {} -- {}".format(y, y_hat, msg)
visualize(x, title)
def show_example_with_prediction(index, trainer):
x = X_test[:,index].reshape(-1, 1)
y = np.argmax(Y_test[:,index])
show_generic_with_prediction(x, y, trainer)
for i in range(5):
show_example_with_prediction(np.random.choice(100), trainer)
###Output
_____no_output_____
|
.ipynb_checkpoints/test_invoke_job_run-checkpoint.ipynb
|
###Markdown
Test JOB RUN from HTTP
###Code
# the last steps to invoke the model deployment REST service
import requests
import oci
from oci.signer import Signer
%%time
endpoint = "https://datascience.eu-frankfurt-1.oci.oraclecloud.com/20190101/jobRuns"
# again using RP
rps = oci.auth.signers.get_resource_principals_signer()
# payload goes here
body = {}
print("These are the probs from the deployed model:")
print(requests.post(endpoint, json=body, auth=rps).json())
###Output
These are the probs from the deployed model:
{'code': 'InvalidParameter', 'message': 'compartmentId is not available'}
CPU times: user 631 ms, sys: 17.6 ms, total: 648 ms
Wall time: 1.93 s
|
notebooks/batch-regression-gradient.ipynb
|
###Markdown
Gradient of Cost on a Batch of Data In this notebook, we expand on the partial derivative calculus of the [*Single Point Regression Gradient* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/single-point-regression-gradient.ipynb) to: * Calculate the gradient of mean squared error on a batch of data* Visualize gradient descent in action
###Code
import torch
import matplotlib.pyplot as plt
xs = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7.])
ys = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
def regression(my_x, my_m, my_b):
return my_x*my_m + my_b
m = torch.tensor([0.9]).requires_grad_()
b = torch.tensor([0.1]).requires_grad_()
###Output
_____no_output_____
###Markdown
**Step 1**: Forward pass
###Code
yhats = regression(xs, m, b)
yhats
###Output
_____no_output_____
###Markdown
**Step 2**: Compare $\hat{y}$ with true $y$ to calculate cost $C$ As in the [*Regression in PyTorch* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/regression-in-pytorch.ipynb), let's use mean squared error, which averages quadratic cost across multiple data points: $$C = \frac{1}{n} \sum_{i=1}^n (\hat{y_i}-y_i)^2 $$
###Code
def mse(my_yhat, my_y):
sigma = torch.sum((my_yhat - my_y)**2)
return sigma/len(my_y)
C = mse(yhats, ys)
C
###Output
_____no_output_____
###Markdown
**Step 3**: Use autodiff to calculate gradient of $C$ w.r.t. parameters
###Code
C.backward()
m.grad
b.grad
###Output
_____no_output_____
###Markdown
**Return to *Calculus II* slides here to derive $\frac{\partial C}{\partial m}$ and $\frac{\partial C}{\partial b}$.** $$ \frac{\partial C}{\partial m} = \frac{2}{n} \sum (\hat{y}_i - y_i) \cdot x_i $$
###Code
2*1/len(ys)*torch.sum((yhats - ys)*xs)
###Output
_____no_output_____
###Markdown
$$ \frac{\partial C}{\partial b} = \frac{2}{n} \sum (\hat{y}_i - y_i) $$
###Code
2*1/len(ys)*torch.sum(yhats - ys)
###Output
_____no_output_____
###Markdown
We don't need to explicitly create a standalone $\nabla C$ object (Greek inverted delta is called *nabla* for "harp" but w.r.t. gradient is *del* as in "del C") for the remainder of the code in this notebook to run, but let's create it for fun now anyway and we'll make use of it in a later, related notebook:
###Code
gradient = torch.tensor([[b.grad.item(), m.grad.item()]]).T
gradient
###Output
_____no_output_____
###Markdown
Let's visualize the most pertinent metrics in a single plot:
###Code
def labeled_regression_plot(my_x, my_y, my_m, my_b, my_C, include_grad=True):
title = 'Cost = {}'.format('%.3g' % my_C.item())
if include_grad:
xlabel = 'm = {}, m grad = {}'.format('%.3g' % my_m.item(), '%.3g' % my_m.grad.item())
ylabel = 'b = {}, b grad = {}'.format('%.3g' % my_b.item(), '%.3g' % my_b.grad.item())
else:
xlabel = 'm = {}'.format('%.3g' % my_m.item())
ylabel = 'b = {}'.format('%.3g' % my_b.item())
fig, ax = plt.subplots()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
ax.scatter(my_x, my_y)
x_min, x_max = ax.get_xlim()
y_min, y_max = my_m*x_min + my_b, my_m*x_max + my_b
ax.set_xlim([x_min, x_max])
_ = ax.plot([x_min, x_max], [y_min, y_max], c='C01')
labeled_regression_plot(xs, ys, m, b, C)
###Output
_____no_output_____
###Markdown
**Step 4**: Gradient descent $\frac{\partial C}{\partial m} = 36.3$ indicates that an increase in $m$ corresponds to a large increase in $C$. Meanwhile, $\frac{\partial C}{\partial b} = 6.26$ indicates that an increase in $b$ also corresponds to an increase in $C$, though much less so than $m$.In the first round of training, the lowest hanging fruit with respect to reducing cost $C$ is therefore to decrease the slope of the regression line, $m$. There will also be a relatively small decrease in the $y$-intercept of the line, $b$.
###Code
optimizer = torch.optim.SGD([m, b], lr=0.01)
optimizer.step()
C = mse(regression(xs, m, b), ys)
labeled_regression_plot(xs, ys, m, b, C, include_grad=False) # Gradient of C hasn't been recalculated
###Output
_____no_output_____
###Markdown
Rinse and Repeat Observe further rounds of training:
###Code
epochs = 8
for epoch in range(epochs):
optimizer.zero_grad() # Reset gradients to zero; else they accumulate
yhats = regression(xs, m, b) # Step 1
C = mse(yhats, ys) # Step 2
C.backward() # Step 3
labeled_regression_plot(xs, ys, m, b, C)
optimizer.step() # Step 4
###Output
_____no_output_____
###Markdown
Gradient of Cost on a Batch of Data In this notebook, we expand on the partial derivative calculus of the [*Single Point Regression Gradient* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/single-point-regression-gradient.ipynb) to: * Calculate the gradient of mean squared error on a batch of data* Visualize gradient descent in action
###Code
import torch
import matplotlib.pyplot as plt
xs = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7.])
ys = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
def regression(my_x, my_m, my_b):
return my_m*my_x + my_b
m = torch.tensor([0.9]).requires_grad_()
b = torch.tensor([0.1]).requires_grad_()
###Output
_____no_output_____
###Markdown
**Step 1**: Forward pass
###Code
yhats = regression(xs, m, b)
yhats
###Output
_____no_output_____
###Markdown
**Step 2**: Compare $\hat{y}$ with true $y$ to calculate cost $C$ As in the [*Regression in PyTorch* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/regression-in-pytorch.ipynb), let's use mean squared error, which averages quadratic cost across multiple data points: $$C = \frac{1}{n} \sum_{i=1}^n (\hat{y_i}-y_i)^2 $$
###Code
def mse(my_yhat, my_y):
sigma = torch.sum((my_yhat - my_y)**2)
return sigma/len(my_y)
C = mse(yhats, ys)
C
###Output
_____no_output_____
###Markdown
**Step 3**: Use autodiff to calculate gradient of $C$ w.r.t. parameters
###Code
C.backward()
m.grad
b.grad
###Output
_____no_output_____
###Markdown
**Return to *Calculus II* slides here to derive $\frac{\partial C}{\partial m}$ and $\frac{\partial C}{\partial b}$.** $$ \frac{\partial C}{\partial m} = \frac{2}{n} \sum (\hat{y}_i - y_i) \cdot x_i $$
###Code
2*1/len(ys)*torch.sum((yhats - ys)*xs)
###Output
_____no_output_____
###Markdown
$$ \frac{\partial C}{\partial b} = \frac{2}{n} \sum (\hat{y}_i - y_i) $$
###Code
2*1/len(ys)*torch.sum(yhats - ys)
###Output
_____no_output_____
###Markdown
We don't need to explicitly create a standalone $\nabla C$ object (Greek inverted delta is called *nabla* for "harp" but w.r.t. gradient is *del* as in "del C") for the remainder of the code in this notebook to run, but let's create it for fun now anyway and we'll make use of it in a later, related notebook:
###Code
gradient = torch.tensor([[b.grad.item(), m.grad.item()]]).T
gradient
###Output
_____no_output_____
###Markdown
Let's visualize the most pertinent metrics in a single plot:
###Code
def labeled_regression_plot(my_x, my_y, my_m, my_b, my_C, include_grad=True):
title = 'Cost = {}'.format('%.3g' % my_C.item())
if include_grad:
xlabel = 'm = {}, m grad = {}'.format('%.3g' % my_m.item(), '%.3g' % my_m.grad.item())
ylabel = 'b = {}, b grad = {}'.format('%.3g' % my_b.item(), '%.3g' % my_b.grad.item())
else:
xlabel = 'm = {}'.format('%.3g' % my_m.item())
ylabel = 'b = {}'.format('%.3g' % my_b.item())
fig, ax = plt.subplots()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
ax.scatter(my_x, my_y, zorder=3)
x_min, x_max = ax.get_xlim()
y_min = regression(x_min, my_m, my_b).detach().item()
y_max = regression(x_max, my_m, my_b).detach().item()
ax.set_xlim([x_min, x_max])
_ = ax.plot([x_min, x_max], [y_min, y_max], c='C01')
labeled_regression_plot(xs, ys, m, b, C)
###Output
_____no_output_____
###Markdown
**Step 4**: Gradient descent $\frac{\partial C}{\partial m} = 36.3$ indicates that an increase in $m$ corresponds to a large increase in $C$. Meanwhile, $\frac{\partial C}{\partial b} = 6.26$ indicates that an increase in $b$ also corresponds to an increase in $C$, though much less so than $m$.In the first round of training, the lowest hanging fruit with respect to reducing cost $C$ is therefore to decrease the slope of the regression line, $m$. There will also be a relatively small decrease in the $y$-intercept of the line, $b$.
###Code
optimizer = torch.optim.SGD([m, b], lr=0.01)
optimizer.step()
C = mse(regression(xs, m, b), ys)
labeled_regression_plot(xs, ys, m, b, C, include_grad=False) # Gradient of C hasn't been recalculated
###Output
_____no_output_____
###Markdown
Rinse and Repeat Observe further rounds of training:
###Code
epochs = 8
for epoch in range(epochs):
optimizer.zero_grad() # Reset gradients to zero; else they accumulate
yhats = regression(xs, m, b) # Step 1
C = mse(yhats, ys) # Step 2
C.backward() # Step 3
labeled_regression_plot(xs, ys, m, b, C)
optimizer.step() # Step 4
###Output
_____no_output_____
###Markdown
Gradient of Cost on a Batch of Data In this notebook, we expand on the partial derivative calculus of the [*Single Point Regression Gradient* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/single-point-regression-gradient.ipynb) to: * Calculate the gradient of mean squared error on a batch of data* Visualize gradient descent in action
###Code
import torch
import matplotlib.pyplot as plt
xs = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7.])
ys = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
def regression(my_x, my_m, my_b):
return my_m*my_x + my_b
m = torch.tensor([0.9]).requires_grad_()
b = torch.tensor([0.1]).requires_grad_()
###Output
_____no_output_____
###Markdown
**Step 1**: Forward pass
###Code
yhats = regression(xs, m, b)
yhats
###Output
_____no_output_____
###Markdown
**Step 2**: Compare $\hat{y}$ with true $y$ to calculate cost $C$ As in the [*Regression in PyTorch* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/regression-in-pytorch.ipynb), let's use mean squared error, which averages quadratic cost across multiple data points: $$C = \frac{1}{n} \sum_{i=1}^n (\hat{y_i}-y_i)^2 $$
###Code
def mse(my_yhat, my_y):
sigma = torch.sum((my_yhat - my_y)**2)
return sigma/len(my_y)
C = mse(yhats, ys)
C
###Output
_____no_output_____
###Markdown
**Step 3**: Use autodiff to calculate gradient of $C$ w.r.t. parameters
###Code
C.backward()
m.grad
b.grad
###Output
_____no_output_____
###Markdown
**Return to *Calculus II* slides here to derive $\frac{\partial C}{\partial m}$ and $\frac{\partial C}{\partial b}$.** $$ \frac{\partial C}{\partial m} = \frac{2}{n} \sum (\hat{y}_i - y_i) \cdot x_i $$
###Code
2*1/len(ys)*torch.sum((yhats - ys)*xs)
###Output
_____no_output_____
###Markdown
$$ \frac{\partial C}{\partial b} = \frac{2}{n} \sum (\hat{y}_i - y_i) $$
###Code
2*1/len(ys)*torch.sum(yhats - ys)
###Output
_____no_output_____
###Markdown
We don't need to explicitly create a standalone $\nabla C$ object (Greek inverted delta is called *nabla* for "harp" but w.r.t. gradient is *del* as in "del C") for the remainder of the code in this notebook to run, but let's create it for fun now anyway and we'll make use of it in a later, related notebook:
###Code
gradient = torch.tensor([[b.grad.item(), m.grad.item()]]).T
gradient
###Output
_____no_output_____
###Markdown
Let's visualize the most pertinent metrics in a single plot:
###Code
def labeled_regression_plot(my_x, my_y, my_m, my_b, my_C, include_grad=True):
title = 'Cost = {}'.format('%.3g' % my_C.item())
if include_grad:
xlabel = 'm = {}, m grad = {}'.format('%.3g' % my_m.item(), '%.3g' % my_m.grad.item())
ylabel = 'b = {}, b grad = {}'.format('%.3g' % my_b.item(), '%.3g' % my_b.grad.item())
else:
xlabel = 'm = {}'.format('%.3g' % my_m.item())
ylabel = 'b = {}'.format('%.3g' % my_b.item())
fig, ax = plt.subplots()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
ax.scatter(my_x, my_y, zorder=3)
x_min, x_max = ax.get_xlim()
y_min = regression(x_min, my_m, my_b)
y_max = regression(x_max, my_m, my_b)
ax.set_xlim([x_min, x_max])
_ = ax.plot([x_min, x_max], [y_min, y_max], c='C01')
labeled_regression_plot(xs, ys, m, b, C)
###Output
_____no_output_____
###Markdown
**Step 4**: Gradient descent $\frac{\partial C}{\partial m} = 36.3$ indicates that an increase in $m$ corresponds to a large increase in $C$. Meanwhile, $\frac{\partial C}{\partial b} = 6.26$ indicates that an increase in $b$ also corresponds to an increase in $C$, though much less so than $m$.In the first round of training, the lowest hanging fruit with respect to reducing cost $C$ is therefore to decrease the slope of the regression line, $m$. There will also be a relatively small decrease in the $y$-intercept of the line, $b$.
###Code
optimizer = torch.optim.SGD([m, b], lr=0.01)
optimizer.step()
C = mse(regression(xs, m, b), ys)
labeled_regression_plot(xs, ys, m, b, C, include_grad=False) # Gradient of C hasn't been recalculated
###Output
_____no_output_____
###Markdown
Rinse and Repeat Observe further rounds of training:
###Code
epochs = 8
for epoch in range(epochs):
optimizer.zero_grad() # Reset gradients to zero; else they accumulate
yhats = regression(xs, m, b) # Step 1
C = mse(yhats, ys) # Step 2
C.backward() # Step 3
labeled_regression_plot(xs, ys, m, b, C)
optimizer.step() # Step 4
###Output
_____no_output_____
###Markdown
Gradient of Cost on a Batch of Data In this notebook, we expand on the partial derivative calculus of the [*Single Point Regression Gradient* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/single-point-regression-gradient.ipynb) to: * Calculate the gradient of mean squared error on a batch of data* Visualize gradient descent in action
###Code
import torch
import matplotlib.pyplot as plt
xs = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7.])
ys = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
def regression(my_x, my_m, my_b):
return my_x*my_m + my_b
m = torch.tensor([0.9]).requires_grad_()
b = torch.tensor([0.1]).requires_grad_()
###Output
_____no_output_____
###Markdown
**Step 1**: Forward pass
###Code
yhats = regression(xs, m, b)
yhats
###Output
_____no_output_____
###Markdown
**Step 2**: Compare $\hat{y}$ with true $y$ to calculate cost $C$ As in the [*Regression in PyTorch* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/regression-in-pytorch.ipynb), let's use mean squared error, which averages quadratic cost across multiple data points: $$C = \frac{1}{n} \sum_{i=1}^n (\hat{y_i}-y_i)^2 $$
###Code
def mse(my_yhat, my_y):
sigma = torch.sum((my_yhat - my_y)**2)
return sigma/len(my_y)
C = mse(yhats, ys)
C
###Output
_____no_output_____
###Markdown
**Step 3**: Use autodiff to calculate gradient of $C$ w.r.t. parameters
###Code
C.backward()
m.grad
b.grad
###Output
_____no_output_____
###Markdown
**Return to *Calculus II* slides here to derive $\frac{\partial C}{\partial m}$ and $\frac{\partial C}{\partial b}$.** $$ \frac{\partial C}{\partial m} = \frac{2}{n} \sum (\hat{y}_i - y_i) \cdot x_i $$
###Code
2*1/len(ys)*torch.sum((yhats - ys)*xs)
###Output
_____no_output_____
###Markdown
$$ \frac{\partial C}{\partial b} = \frac{2}{n} \sum (\hat{y}_i - y_i) $$
###Code
2*1/len(ys)*torch.sum(yhats - ys)
###Output
_____no_output_____
###Markdown
We don't need to explicitly create a standalone $\nabla C$ object for the remainder of the code in this notebook to run, but let's create it for fun anyway:
###Code
nabla_C = torch.tensor([m.grad.item(), b.grad.item()]).T
nabla_C
###Output
_____no_output_____
###Markdown
Let's visualize the most pertinent metrics in a single plot:
###Code
def labeled_regression_plot(my_x, my_y, my_m, my_b, my_C, include_grad=True):
title = 'Cost = {}'.format('%.3g' % my_C.item())
if include_grad:
xlabel = 'm = {}, m grad = {}'.format('%.3g' % my_m.item(), '%.3g' % my_m.grad.item())
ylabel = 'b = {}, b grad = {}'.format('%.3g' % my_b.item(), '%.3g' % my_b.grad.item())
else:
xlabel = 'm = {}'.format('%.3g' % my_m.item())
ylabel = 'b = {}'.format('%.3g' % my_b.item())
fig, ax = plt.subplots()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
ax.scatter(my_x, my_y)
x_min, x_max = ax.get_xlim()
y_min, y_max = my_m*x_min + my_b, my_m*x_max + my_b
ax.set_xlim([x_min, x_max])
_ = ax.plot([x_min, x_max], [y_min, y_max])
labeled_regression_plot(xs, ys, m, b, C)
###Output
_____no_output_____
###Markdown
**Step 4**: Gradient descent $\frac{\partial C}{\partial m} = 36.3$ indicates that an increase in $m$ corresponds to a large increase in $C$. Meanwhile, $\frac{\partial C}{\partial b} = 6.26$ indicates that an increase in $b$ also corresponds to an increase in $C$, though much less so than $m$.In the first round of training, the lowest hanging fruit with respect to reducing cost $C$ is therefore to decrease the slope of the regression line, $m$. There will also be a relatively small decrease in the $y$-intercept of the line, $b$.
###Code
optimizer = torch.optim.SGD([m, b], lr=0.01)
optimizer.step()
C = mse(regression(xs, m, b), ys)
labeled_regression_plot(xs, ys, m, b, C, include_grad=False) # Gradient of C hasn't been recalculated
###Output
_____no_output_____
###Markdown
Rinse and Repeat Observe further rounds of training:
###Code
epochs = 8
for epoch in range(epochs):
optimizer.zero_grad() # Reset gradients to zero; else they accumulate
yhats = regression(xs, m, b) # Step 1
C = mse(yhats, ys) # Step 2
C.backward() # Step 3
labeled_regression_plot(xs, ys, m, b, C)
optimizer.step() # Step 4
###Output
_____no_output_____
|
Pandas_For_Data_Science/Pandas_For_Data_Science_exercise.ipynb
|
###Markdown
1. You need to give prizes to the five students taking Physics with the top mean marks over all four modules. Which students get the prizes?
###Code
Mean_marks = data[['Quantum Mechanics', 'Relativity', 'Waves', 'Lab work']][data['Programme']=='Physics'].mean(1, skipna=False)
data['Mean marks'] = Mean_marks
data.sort_values('Mean marks', ascending=False).head(5)
###Output
_____no_output_____
###Markdown
The above 5 students are the ones who won the prize 2. The staff member running the tutor group with the highest mean mark gets a beer. Which group's tutor gets the beer?
###Code
data.groupby('Tutor group').mean().sort_values('Mean marks', ascending=False).head(5)
###Output
_____no_output_____
###Markdown
Tutor group number 12 has the highest mean on the 4 modules, so tutor 12 gets a free beer 3. You need to report the mean mark for each course to the faculty. List the four courses by order of mean mark. Plot these on a bar chart so they can understand it
###Code
fig, ax = plt.subplots(1, 1, figsize=(6, 4.5))
data[['Quantum Mechanics', 'Relativity', 'Waves', 'Lab work']].mean().plot(kind='bar',
ax=ax, color='blue')
plt.xticks(rotation=0)
plt.show()
###Output
_____no_output_____
###Markdown
4. Scores above 70% are a 'first'. Scores between 60 and 69% are an 'upper second', between 50 and 59% a 'lower second', between 40 and 49% a 'third', and 39% and below is a fail. For Quantum Mechanics, plot a pie chart showing the number of students who fall in each of these categories
###Code
bins = [0, 39, 49, 59, 69, 100]
labels = ['Fail', 'Third', 'Lower second', 'Upper second', 'First']
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.set_title('Quantum mechanics\nscores', fontsize=20)
explode = (0.1, 0.1, 0.1, 0.1, 0.1)
pd.cut(data['Quantum Mechanics'], bins, labels=labels).value_counts().plot(kind='pie', ax=ax, fontsize=15, shadow=True, explode=explode)
ax.set_ylabel(' ')
plt.show()
###Output
_____no_output_____
###Markdown
5. Students on the Physics programme pass the year if they score more than 40% on three out of four modules. Otherwise they fail. How many students failed? Loop through the failing students, printing out a personalised statement (imagine that you will code it so Python emails it to them) telling them they've failed
###Code
import smtplib
sender = '[email protected]'
for idx, row in data.iterrows():
courses_failed = 0
if (row['Quantum Mechanics']<40):
courses_failed+=1
if (row['Relativity']<40):
courses_failed+=1
if (row['Waves']<40):
courses_failed+=1
if (row['Lab work']<40):
courses_failed+=1
if (courses_failed>1):
print('Dear {}, we are sorry to inform you that you have failed'.format(row['Forename 1']))
###Output
Dear Meagan, we are sorry to inform you that you have failed
Dear Buford, we are sorry to inform you that you have failed
Dear Londa, we are sorry to inform you that you have failed
Dear Zona, we are sorry to inform you that you have failed
Dear Zita, we are sorry to inform you that you have failed
Dear Rhett, we are sorry to inform you that you have failed
Dear Melva, we are sorry to inform you that you have failed
Dear Zella, we are sorry to inform you that you have failed
Dear Wai, we are sorry to inform you that you have failed
Dear Tammy, we are sorry to inform you that you have failed
Dear Benedict, we are sorry to inform you that you have failed
Dear Arianne, we are sorry to inform you that you have failed
Dear Jamal, we are sorry to inform you that you have failed
Dear Sheree, we are sorry to inform you that you have failed
Dear Sharon, we are sorry to inform you that you have failed
Dear Jerome, we are sorry to inform you that you have failed
Dear Yesenia, we are sorry to inform you that you have failed
###Markdown
6. Rumour has it the scores for Lab Work have been made up. Create a scatter matrix for the four courses. What does this tell you?
###Code
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
scatter_matrix(data[['Quantum Mechanics', 'Relativity', 'Waves', 'Lab work']], ax=ax,
diagonal='kde', color='blue', density_kwds=dict(color='blue'))
plt.show()
data[['Quantum Mechanics', 'Relativity', 'Waves', 'Lab work']].corr()
###Output
_____no_output_____
###Markdown
It is clear from both the scatter matrix and the correlation table above that all the courses' scores are correlated to each other, except for the lab work scores. This could be a red flag! 7. Do a linear regression analysis to come up with a linear model for the Waves score based on the Relativity score.
###Code
data[['Waves', 'Relativity']].plot(kind='scatter', x='Relativity', y='Waves', color='blue')
data_clean = data.dropna() #sm.OLS does not work with NaNs
import statsmodels.api as sm
x = data_clean['Relativity']
y = data_clean['Waves']
X = sm.add_constant(x)
model = sm.OLS(y, X).fit() #Ordinary least squares
predictions = model.predict(X)
model.summary()
model.params
import numpy as np
import seaborn as sns
fig, ax = plt.subplots(1, 1, figsize=(6, 4.5))
ci = 95
sns.regplot(x='Waves', y='Relativity', data=data, ci=ci, color='blue')
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
ax.set_xlabel('Relativity exam scores', fontsize=15)
ax.set_ylabel('Waves exam scores', fontsize=15)
plt.show()
###Output
_____no_output_____
|
workshop-resources/data-science-and-machine-learning/Data_Science_1/loan-project/loan-notebook.ipynb
|
###Markdown
Loan Qualification Activity Contoso Data helps customers qualify for home loans. They are a global data science company that works in all sectors of housing. Customers first apply for a home loan after Contoso Data validates the customer eligibility for a loan.In this project, Contoso Data wants to automate the loan eligibility process (real-time) based on customer details provided while filling out an online application form. These details are listed below in the table with the variable names and descriptions. The variables are listed exactly how they are in the data.> A data dictionary is a description of data in business terms, also including information about the data such as data types, details of structure, and security restrictions. Data Dictionary - Variable Descriptions|VARIABLE | DESCRIPTION||---|---||Loan_ID|Unique Loan ID||Gender|Male/Female||Married|Applicant married (Y/N)||Dependents|Number of dependents||Education| Applicant Education (Graduate/ Under Graduate)||Self_Employed|Self employed (Y/N)||ApplicantIncome|Applicant income||CoapplicantIncome|Coapplicant income||LoanAmount|Loan amount in thousands||Loan_Amount_Term|Term of loan in months||Credit_History|Credit History meets guidelines||Property_Area|Urban/ Semi Urban/ Rural||Loan_Status|Loan approved (Y/N)| The ProblemWe will assume to role of data scientist for Contoso Data and approach this problem exactly how we would do it in the real world. A data scientist with expertise in home mortgages would often be called on to create models that classify or determine specific outcomes based on some data they are given. These could be logistic regression models, linear regression, or a custom model that would take a team of experts to develop the inputs, weights, and outcomes. Just as if we actually worked in the Contoso Data Center, we'll create a common yet valuable tool that a loan officer with thousands of applications would use to quickly determine the best candidates for loans. The Loan Officer is our domain expert and would consult a data scientist, such as yourself, to come up with ways to make accurate pre-selections, and also save a great deal of time. Our task will be to create a machine learning model that will predict whether or not a loan will be approved. Data Science Ethical PracticesBoth the Certified Analytics Professional (CAP) and the United Nations Statistics Division have released official codes and declarations of ethics to address data science directives. The purpose of these guidelines are to clarify crucially important ethical requirements that set standards, help in deterring deceitful behavior, and keep individuals and organizations accountable for the ways they collect and use data-driven information. It's important that data scientists learn and find training to address ethical issues in data science and within their industries.We can start by taking some responsibility and set some guidelines. Here are some common guidelines that we should use when working with data: 1) collect minimal data and aggregate what is there, in other words, only collect what's needed. 2) Identify and scrub sensitive data, and 3) have a plan in case you make a mistake. Removing the Gender and Married fieldsSince we would not discriminate against people based on their gender, we wil remove that field. Also, instead of basing our decision on whether or not someone is married, we'll remove it and use something like income since it would be a more objective variable to base a loan decision on. The Data Science Process 1. Understanding the domain2. Making a plan3. Exploring the data4. Preparing the data5. Training your model6. Review your results7. DeploymentDuring our workshop we've talked a lot about why understanding the domain you are working in data science is so critical to uncovering the most valuable insights in your data. In other words, you know the business **inside and out**. By knowing your business your experiements will be more efficient and tackle the known issues in your domain. Does this mean that we shouldn't *play* with other datasets? Of course not, in fact, using other data and exploring it can uncover insight and be a great tool for learning. The Plan This dataset provides you a taste of working on data sets from insurance companies โ what challenges are faced there, what strategies are used, which variables influence the outcome, etc. The data has 615 rows and 13 columns. Our plan will be to approach this problem as a **logistical regression** study. Importing Libraries and Data To begin the loan qualification activity we should first load the dataset and the libraries that we will be using. We will import the following libraries to aid in our study and activity.* `numpy`* `matplotlib`* `pandas` For more information on using `numpy` and `pandas` the pandas website has a tutorial that will cover their usage in [10 minutes.]( https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html)
###Code
import pandas as pd
import numpy as np
import matplotlib as plt
%matplotlib inline
#Reading the dataset in a dataframe using pandas
df = pd.read_csv("Data/loan_prediction_training_data.csv")
###Output
_____no_output_____
###Markdown
Checkpoint > Remember that you can use a question mark after any method or function in the Jupyter notebook. To read more about how to use the `read_csv` function, use the code below:
###Code
pd.read_csv?
###Output
_____no_output_____
###Markdown
Exercise: Explore the Data **You can scroll to the right as well to see all the fields in the dataset.**
###Code
df.describe()
df.info()
###Output
_____no_output_____
###Markdown
Checkpoint: describe() function The `describe` function provides count, mean, standard deviation, min, quartiles, and max in its' output. If you need a refresher on some of these values review them in this [article.](https://www.analyticsvidhya.com/blog/2014/07/statistics/) What are some inferences you can make about the data looking at the output from the describe function? You should be looking for things like missing values, inconsistencies, and other problems with your data. You might have to reference an domain expert (if you're not one), and find out more information about the data. From the `describe` function we can see that LoanAmount, Loan_Amount_Term and Credit_History are all missing values. Also, if we recall exploring the output of the `head` function, we saw that Credit_History only has 1 or 0 as values. Looking at the mean value, we see that only 84% of the applicants have credit histories. Do you notice anything else? Non-numerical values First, let's deal with the `Gender` and `Married` columns. We will drop them.
###Code
df.drop(columns=['Gender', 'Married'], inplace=True)
###Output
_____no_output_____
###Markdown
Okay, now that's taken care of--Well Done!
###Code
# Let's save the data to a new file
df.to_csv('Data/loan_prediction_training_data_no_G_M.csv')
###Output
_____no_output_____
###Markdown
For dealing with non-numerical data (e.g. Property_Area, Education, Self_Employed) we can examine the frequency distribution and try to understand the data more. We can use the `.value_counts()` function.
###Code
df['Self_Employed'].value_counts()
df['Property_Area'].value_counts()
df['Education'].value_counts()
###Output
_____no_output_____
###Markdown
Distribution Analysis After a cursory exploration of the data, we should now look at the distribution of the numeric data, specifically ApplicantIncome and LoanAmount. Let's use a histogram to plot a histogram of ApplicantIncome. We know there is a wide range of variation here in the values, so we'll use 50 bins to properly depict the distribution.
###Code
df['ApplicantIncome'].hist(bins=50)
###Output
_____no_output_____
###Markdown
Now we can use boxplots to better understand the distributions.
###Code
df.boxplot(column='ApplicantIncome')
###Output
_____no_output_____
###Markdown
The boxplot confirms that we have a lot of outliers in our data. We already know that there is a huge difference between how much money different individuals make, and much of it is due to their education level. Let's break college graduates and non-college graduates out into groups
###Code
df.boxplot(column='ApplicantIncome', by = 'Education')
###Output
_____no_output_____
###Markdown
Now let's do a similar review of the LoanAmount variable.
###Code
df['LoanAmount'].hist(bins=50)
df.boxplot(column='LoanAmount')
df.boxplot(column='LoanAmount', by = 'Education')
###Output
_____no_output_____
###Markdown
Extremely interesting and clear results. It's obvious that ApplicantIncome and LoanAmount are going to require some amount of data processing and munging to give us better results. One more plot we should look at before we go is whether or not being self-employed is a factor in a loan approval.
###Code
df.boxplot(column='LoanAmount', by = 'Self_Employed')
###Output
_____no_output_____
###Markdown
Exercise What are some other boxplots and histograms you would like to examine? Take some time now to do your own investigations on some variables. Categorical Data Analysis What's categorical data? In this case we are referring to non-numerical data such as words as values. What can we do with this sort of data to gain a better understanding? Let's look at the chances a person has getting a loan based solely on credit history. We will look at the Loan_Status data which is a boolean value, 0 or 1. We can assume that 0 is for a No, and 1 is for a Yes. We will use a pivot table in pandas to make this visual.
###Code
# First we will create a frequency table on the Credit_History column to see how it is distributed.
temp1 = df['Credit_History'].value_counts(ascending=True)
# Let's also create a pivot table that shows us the probability of loan approval when you factor in credit history.
# Loan_Status is coded 1 for approved and 0 for unapproved, this means the average of all
# the values is the probablity of getting a loan.
temp2 = df.pivot_table(values='Loan_Status',index=['Credit_History'],aggfunc=lambda x: x.map({'Y':1,'N':0}).mean())
print ('Frequency Table for Credit History:')
print (temp1)
print ('\nProbability of getting loan based on Credit_History:')
print (temp2)
###Output
_____no_output_____
###Markdown
Challenge: Find the Average of the Loan_Status variable How can we find the average of the Loan_Status column? Hint: The data is categorical. One way to solve this is to find out how many 'Y' answers there are and then divide that by the total number of values in the column. Hint: Use the `value_counts` function and then just divide the values. We can also look at plots of this data. Let's create more plots and gain more understanding about the data.
###Code
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(121)
ax1.set_xlabel('Credit_History')
ax1.set_ylabel('Count of Applicants')
ax1.set_title("Applicants by Credit_History")
temp1.plot(kind='bar')
ax2 = fig.add_subplot(122)
ax2.set_xlabel('Credit_History')
ax2.set_ylabel('Probability of Approval')
ax2.set_title("Probability of Approval Based on Credit History")
temp2 = temp2.squeeze()
temp2.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Now try changing the Credit_History variable to something else like Married, Self_Employed, or Property Area. Do any of the variable closely correlate? Stacked ChartsWe can also create a stacked chart that combines the plots above.
###Code
temp3 = pd.crosstab(df['Credit_History'], df['Loan_Status'])
temp3.plot(kind='bar', stacked=True, color=['red','blue'], grid=False)
pd.crosstab?
temp3 = pd.crosstab([df['Credit_History'], df['Education']], df['Loan_Status'],)
temp3.plot(kind='bar', stacked=True, color=['red','blue'], )
temp3
###Output
_____no_output_____
###Markdown
**Congratulations!** Did you realize what we just did? We just created two basic **classification algorithms!** We created the first one that is based just on Credit_History, and then we created on based on Credit_History and Education. This is the last step that we'll do in exploring our dataset. From the exploration we know that we need to deal with some messy data. Let's move on to some **Data Munging.** Data Munging and Preparing our Data In the previous part of our process we learned about some of the problems with our data, specifically that we have some missing values. If we are going to "fix" these problems we need to be extremely careful and thoughtful about what type of data we will use to fill in the empty values. We noticed that in the ApplicantIncome and LoanAmount columns there was also outlying data on the extreme ends of the ranges. Even though we understand why the values are the way they are, we should do something with the outlying values as well.Let's see what we can do with the `apply` function, first let's examine what it does.
###Code
# The apply function applies a function across the axis of a dataframe.
df.apply?
df.apply(lambda x: sum(x.isnull()),axis=0)
###Output
_____no_output_____
###Markdown
The `lambda` function allows us to scan through the dataframe and find all the fields that contain a null value.Note: Even though we don't have a huge number of missing data, we should still do a little more work and estimating if we can fill though values with fill data that makes sense. We should also check for thing that we notice. For example, Loan_Amount_Term has 14 null values. If a loan is made with a loan period of 0, is it null? Let's explore a bit more. Filling null values in LoanAmount Figuring what to fill null values with can be tricky, and you should always refer to a domain expert if possible. But in this case, we can probably use the average loan amout to fill the null values. We will do that first.
###Code
df['LoanAmount'].fillna(df['LoanAmount'].mean(), inplace=True)
# Let's check and see, we shouldn't see any null values in the LoanAmount column
df.apply(lambda x: sum(x.isnull()),axis=0)
# Now we can figure out what the fill value for loan amount is using value_counts
df['LoanAmount'].value_counts()
###Output
_____no_output_____
###Markdown
Filling values in the Self_Employed ColumnFrom our exploration step, we know that there are quite a few null values in the self employed column. Let's figure out a good way to fill the null values based on what we know about the data we do have.
###Code
print(df.boxplot(column='LoanAmount', by = 'Education'))
print(df.boxplot(column='LoanAmount', by = 'Self_Employed'))
###Output
_____no_output_____
###Markdown
We can use these boxplots to spot trends in our data, and we see some variation between the median loan amount so that can be used to impute the data. Let's verify that Self_Employed and Education should not have any null or missing values.
###Code
df['Self_Employed'].value_counts()
# Now we can calculate the percentage of 'No' responses
print('Percentage of "No" values in the data is:', 500/582*100,'%')
###Output
_____no_output_____
###Markdown
Since about 86% of the responses in this column are a No, it is safe for us to impute the missing values as "no" values. We can do that with this code:
###Code
df['Self_Employed'].fillna('No',inplace=True)
df.info()
###Output
_____no_output_____
###Markdown
The next step is to create a pivot table, that will provide us the median values for all the groups of unique values of Self_Employed and Education features. Next, we define a function, which returns the values of these cells and apply it to fill the missing values of loan amount:
###Code
table = df.pivot_table(values='LoanAmount', index='Self_Employed', columns='Education', aggfunc=np.median)
table
# Define function to return value of this pivot_table
def fage(x):
return table.loc[x['Self_Employed'],x['Education']]
# Replace missing values
df['LoanAmount'].fillna(df.apply(fage, axis=1), inplace=True)
df.info()
###Output
_____no_output_____
###Markdown
Using a log transformation to nullify the effect of extreme valuesLet's look at LoanAmount first. We already know that people apply for loans in all ranges, including high value loans for specific properties. Instead of treating them as outliers, we can use a log transformation to reduce the effect they have on representing the data.
###Code
# Let's pull the histgram again
df['LoanAmount_log'] = np.log(df['LoanAmount'])
df['LoanAmount_log'].hist(bins=20)
###Output
_____no_output_____
###Markdown
Now this distribution looks better. The effect that the higher limit values has been considerably reduced. One more thing to note when considering ApplicantIncome. Did you notice that there was also a CoapplicantIncome? It might be a good idea to combine these columns into a TotalIncome column and do a log transformation.
###Code
df['TotalIncome'] = df['ApplicantIncome'] + df['CoapplicantIncome']
df['TotalIncome_log'] = np.log(df['TotalIncome'])
df['TotalIncome_log'].hist(bins=20)
###Output
_____no_output_____
###Markdown
The distribution again is better than before. You can decide whether or not you will continue the munging exercise with Dependents or the other variables. Building a predictive model in PythonSo far we've spent a lot of time prepping our data getting it ready for our model. We'll be using a new library (for us) to code our model. Scikit-learn (sklearn) is the most commonly used data science library in Python for this purpose.Skicit-Learn requires that all inputs be numeric, but first let's quickly fill in all the null values within our data. For the sake of time, we've prepared all the code for you here.
###Code
# Quick Fill of all the null values in the data
df['Dependents'].fillna(df['Dependents'].mode()[0], inplace=True)
df['Loan_Amount_Term'].fillna(df['Loan_Amount_Term'].mode()[0], inplace=True)
df['Credit_History'].fillna(df['Credit_History'].mode()[0], inplace=True)
# Here were are using LabelEncoder to transform non-numerical labels (as long as they are hashable and comparable) to numerical labels.
from sklearn.preprocessing import LabelEncoder
var_mod = ['Dependents','Education','Self_Employed','Property_Area','Loan_Status']
le = LabelEncoder()
for i in var_mod:
df[i] = le.fit_transform(df[i])
#fancy huh?
df.dtypes
# Let's check! All taken care of.
df.info()
# Now we can finish the model
# Import models from scikit learn module:
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
#Generic function for making a model and accessing performance:
def loan_model(model, data, predictors, outcome):
#Fit the model:
model.fit(data[predictors],data[outcome])
#Make predictions on training set:
predictions = model.predict(data[predictors])
#Print accuracy
accuracy = metrics.accuracy_score(predictions,data[outcome])
print ("Accuracy : %s" % "{0:.3%}".format(accuracy))
#Fit the model again so that it can be refered outside the function:
model.fit(data[predictors],data[outcome])
###Output
_____no_output_____
###Markdown
Building a logistical regression modelRemember that logistical regression is a model that returns a binary answer. In this case it will return whether or not a loan is approved based on the parameters provided. We want to create a model that generalizes well. If we take all the data and use that to train our model, we run into the risk of 'overfitting' the model. Let's first start by making some simple hypothesis about how someone's chances of getting a loan will be higher.1. We know already having a credit history is huge.2. Higher incomes, combining coapplicant and applicant incomes will help.3. We also saw that applicants with higher education get loans.4. We also know properties in high growth locations will make better loans.
###Code
# Let's first work with Credit_History - We start by assigning Loan_Status as the outcome variable
outcome_var = 'Loan_Status'
# Select the Model
model = LogisticRegression()
# Use credit history
predictor_var = ['Credit_History']
# call the model
loan_model(model, df, predictor_var, outcome_var)
###Output
_____no_output_____
###Markdown
BONUS: Decision Tree and Random ForestA decision tree is another predictive model. We can easily import the model from the sklearn library. In addition we can also do the same thing for the Random Forest model, which is a classification model.
###Code
from sklearn.tree import DecisionTreeClassifier, export_graphviz
model = DecisionTreeClassifier()
predictor_var = ['Credit_History','Dependents','Self_Employed','Education']
loan_model(model, df,predictor_var,outcome_var)
###Output
_____no_output_____
###Markdown
Can you do any better? Play with the code and try to get a higher accuracy value One possible solution
###Code
# Let's try using the RandomForestClassifier model. Random Forest Classifier
# A random forest is a meta estimator that fits a number of decision tree classifiers
# on various sub-samples of the dataset and uses averaging to improve the predictive
# accuracy and control over-fitting.
model = RandomForestClassifier(n_estimators=10) # n_estimators == number of trees in the forest
predictor_var = ['Dependents', 'Education',
'Self_Employed', 'Credit_History', 'Property_Area',
'LoanAmount_log']
loan_model(model, df,predictor_var,outcome_var)
###Output
_____no_output_____
###Markdown
Loan Qualification Activity Contoso Data helps customers qualify for home loans. They are a global data science company that works in all sectors of housing. Customers first apply for a home loan after Contoso Data validates the customer eligibility for a loan.In this project, Contoso Data wants to automate the loan eligibility process (real-time) based on customer details provided while filling out an online application form. These details are listed below in the table with the variable names and descriptions. The variables are listed exactly how they are in the data.> A data dictionary is a description of data in business terms, also including information about the data such as data types, details of structure, and security restrictions. Data Dictionary - Variable Descriptions|VARIABLE | DESCRIPTION||---|---||Loan_ID|Unique Loan ID||Gender|Male/Female||Married|Applicant married (Y/N)||Dependents|Number of dependents||Education| Applicant Education (Graduate/ Under Graduate)||Self_Employed|Self employed (Y/N)||ApplicantIncome|Applicant income||CoapplicantIncome|Coapplicant income||LoanAmount|Loan amount in thousands||Loan_Amount_Term|Term of loan in months||Credit_History|Credit History meets guidelines||Property_Area|Urban/ Semi Urban/ Rural||Loan_Status|Loan approved (Y/N)| The ProblemWe will assume to role of data scientist for Contoso Data and approach this problem exactly how we would do it in the real world. A data scientist with expertise in home mortgages would often be called on to create models that classify or determine specific outcomes based on some data they are given. These could be logistic regression models, linear regression, or a custom model that would take a team of experts to develop the inputs, weights, and outcomes. Just as if we actually worked in the Contoso Data Center, we'll create a common yet valuable tool that a loan officer with thousands of applications would use to quickly determine the best candidates for loans. The Loan Officer is our domain expert and would consult a data scientist, such as yourself, to come up with ways to make accurate pre-selections, and also save a great deal of time. Our task will be to create a machine learning model that will predict whether or not a loan will be approved. Data Science Ethical PracticesBoth the Certified Analytics Professional (CAP) and the United Nations Statistics Division have released official codes and declarations of ethics to address data science directives. The purpose of these guidelines are to clarify crucially important ethical requirements that set standards, help in deterring deceitful behavior, and keep individuals and organizations accountable for the ways they collect and use data-driven information. It's important that data scientists learn and find training to address ethical issues in data science and within their industries.We can start by taking some responsibility and set some guidelines. Here are some common guidelines that we should use when working with data: 1) collect minimal data and aggregate what is there, in other words, only collect what's needed. 2) Identify and scrub sensitive data, and 3) have a plan in case you make a mistake. Removing the Gender and Married fieldsSince we would not discriminate against people based on their gender, we wil remove that field. Also, instead of basing our decision on whether or not someone is married, we'll remove it and use something like income since it would be a more objective variable to base a loan decision on. The Data Science Process 1. Understanding the domain2. Making a plan3. Exploring the data4. Preparing the data5. Training your model6. Review your results7. DeploymentDuring our workshop we've talked a lot about why understanding the domain you are working in data science is so critical to uncovering the most valuable insights in your data. In other words, you know the business **inside and out**. By knowing your business your experiements will be more efficient and tackle the known issues in your domain. Does this mean that we shouldn't *play* with other datasets? Of course not, in fact, using other data and exploring it can uncover insight and be a great tool for learning. The Plan This dataset provides you a taste of working on data sets from insurance companies โ what challenges are faced there, what strategies are used, which variables influence the outcome, etc. The data has 615 rows and 13 columns. Our plan will be to approach this problem as a **logistical regression** study. Importing Libraries and Data To begin the loan qualification activity we should first load the dataset and the libraries that we will be using. We will import the following libraries to aid in our study and activity.* `numpy`* `mathplotlib`* `pandas` For more information on using `numpy` and `pandas` the pandas website has a tutorial that will cover their usage in [10 minutes.]( https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html)
###Code
import pandas as pd
import numpy as np
import matplotlib as plt
%matplotlib inline
#Reading the dataset in a dataframe using pandas
df = pd.read_csv("Data/loan_prediction_training_data.csv")
###Output
_____no_output_____
###Markdown
Checkpoint > Remember that you can use a question mark after any method or function in the Jupyter notebook. To read more about how to use the `read_csv` function, use the code below:
###Code
pd.read_csv?
###Output
[1;31mSignature:[0m
[0mpd[0m[1;33m.[0m[0mread_csv[0m[1;33m([0m[1;33m
[0m [0mfilepath_or_buffer[0m[1;33m:[0m [0mUnion[0m[1;33m[[0m[0mstr[0m[1;33m,[0m [0mpathlib[0m[1;33m.[0m[0mPath[0m[1;33m,[0m [0mIO[0m[1;33m[[0m[1;33m~[0m[0mAnyStr[0m[1;33m][0m[1;33m][0m[1;33m,[0m[1;33m
[0m [0msep[0m[1;33m=[0m[1;34m','[0m[1;33m,[0m[1;33m
[0m [0mdelimiter[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mheader[0m[1;33m=[0m[1;34m'infer'[0m[1;33m,[0m[1;33m
[0m [0mnames[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mindex_col[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0musecols[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0msqueeze[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mprefix[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mmangle_dupe_cols[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mdtype[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mengine[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mconverters[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mtrue_values[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mfalse_values[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mskipinitialspace[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mskiprows[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mskipfooter[0m[1;33m=[0m[1;36m0[0m[1;33m,[0m[1;33m
[0m [0mnrows[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mna_values[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mkeep_default_na[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mna_filter[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mverbose[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mskip_blank_lines[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mparse_dates[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0minfer_datetime_format[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mkeep_date_col[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mdate_parser[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mdayfirst[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mcache_dates[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0miterator[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mchunksize[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mcompression[0m[1;33m=[0m[1;34m'infer'[0m[1;33m,[0m[1;33m
[0m [0mthousands[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mdecimal[0m[1;33m:[0m [0mstr[0m [1;33m=[0m [1;34m'.'[0m[1;33m,[0m[1;33m
[0m [0mlineterminator[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mquotechar[0m[1;33m=[0m[1;34m'"'[0m[1;33m,[0m[1;33m
[0m [0mquoting[0m[1;33m=[0m[1;36m0[0m[1;33m,[0m[1;33m
[0m [0mdoublequote[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mescapechar[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mcomment[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mencoding[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0mdialect[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m [0merror_bad_lines[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mwarn_bad_lines[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mdelim_whitespace[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mlow_memory[0m[1;33m=[0m[1;32mTrue[0m[1;33m,[0m[1;33m
[0m [0mmemory_map[0m[1;33m=[0m[1;32mFalse[0m[1;33m,[0m[1;33m
[0m [0mfloat_precision[0m[1;33m=[0m[1;32mNone[0m[1;33m,[0m[1;33m
[0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m
[1;31mDocstring:[0m
Read a comma-separated values (csv) file into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handler (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default ','
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32,
'c': 'Int64'}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {'c', 'python'}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan',
'1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a',
'nan', 'null'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparseable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\na,b,c\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``' '``) will be
used as the sep. Equivalent to setting ``sep='\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are `None` for the ordinary converter,
`high` for the high-precision converter, and `round_trip` for the
round-trip converter.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.read_csv('data.csv') # doctest: +SKIP
[1;31mFile:[0m c:\users\sarah\appdata\local\programs\python\python38-32\lib\site-packages\pandas\io\parsers.py
[1;31mType:[0m function
###Markdown
Exercise: Explore the Data **You can scroll to the right as well to see all the fields in the dataset.**
###Code
df.describe()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 614 entries, 0 to 613
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Loan_ID 614 non-null object
1 Gender 601 non-null object
2 Married 611 non-null object
3 Dependents 599 non-null object
4 Education 614 non-null object
5 Self_Employed 582 non-null object
6 ApplicantIncome 614 non-null int64
7 CoapplicantIncome 614 non-null float64
8 LoanAmount 592 non-null float64
9 Loan_Amount_Term 600 non-null float64
10 Credit_History 564 non-null float64
11 Property_Area 614 non-null object
12 Loan_Status 614 non-null object
dtypes: float64(4), int64(1), object(8)
memory usage: 43.2+ KB
###Markdown
Checkpoint: describe() function The `describe` function provides count, mean, standard deviation, min, quartiles, and max in its' output. If you need a refresher on some of these values review them in this [article.](https://www.analyticsvidhya.com/blog/2014/07/statistics/) What are some inferences you can make about the data looking at the output from the describe function? You should be looking for things like missing values, inconsistencies, and other problems with your data. You might have to reference an domain expert (if you're not one), and find out more information about the data. From the `describe` function we can see that LoanAmount, Loan_Amount_Term and Credit_History are all missing values. Also, if we recall exploring the output of the `head` function, we saw that Credit_History only has 1 or 0 as values. Looking at the mean value, we see that only 84% of the applicants have credit histories. Do you notice anything else? Non-numerical values First, let's deal with the `Gender` and `Married` columns. We will drop them.
###Code
df.drop(columns=['Gender', 'Married'])
###Output
_____no_output_____
###Markdown
Okay, now that's taken care of--Well Done!
###Code
# Let's save the data to a new file
df.to_csv('Data/loan_prediction_training_data_no_G_M.csv')
###Output
_____no_output_____
###Markdown
For dealing with non-numerical data (e.g. Property_Area, Education, Self_Employed) we can examine the frequency distribution and try to understand the data more. We can use the `.value_counts()` function.
###Code
df['Self_Employed'].value_counts()
df['Property_Area'].value_counts()
df['Education'].value_counts()
###Output
_____no_output_____
###Markdown
Distribution Analysis After a cursory exploration of the data, we should now look at the distribution of the numeric data, specifically ApplicantIncome and LoanAmount. Let's use a histogram to plot a histogram of ApplicantIncome. We know there is a wide range of variation here in the values, so we'll use 50 bins to properly depict the distribution.
###Code
df['ApplicantIncome'].hist(bins=50)
###Output
_____no_output_____
###Markdown
Now we can use boxplots to better understand the distributions.
###Code
df.boxplot(column='ApplicantIncome')
###Output
_____no_output_____
###Markdown
The boxplot confirms that we have a lot of outliers in our data. We already know that there is a huge difference between how much money different individuals make, and much of it is due to their education level. Let's break college graduates and non-college graduates out into groups
###Code
df.boxplot(column='ApplicantIncome', by = 'Education')
###Output
_____no_output_____
###Markdown
Now let's do a similar review of the LoanAmount variable.
###Code
df['LoanAmount'].hist(bins=50)
df.boxplot(column='LoanAmount')
df.boxplot(column='LoanAmount', by = 'Education')
###Output
_____no_output_____
###Markdown
Extremely interesting and clear results. It's obvious that ApplicantIncome and LoanAmount are going to require some amount of data processing and munging to give us better results. One more plot we should look at before we go is whether or not being self-employed is a factor in a loan approval.
###Code
df.boxplot(column='LoanAmount', by = 'Self_Employed')
###Output
_____no_output_____
###Markdown
Exercise What are some other boxplots and histograms you would like to examine? Take some time now to do your own investigations on some variables. Categorical Data Analysis What's categorical data? In this case we are referring to non-numerical data such as words as values. What can we do with this sort of data to gain a better understanding? Let's look at the chances a person has getting a loan based solely on credit history. We will look at the Loan_Status data which is a boolean value, 0 or 1. We can assume that 0 is for a No, and 1 is for a Yes. We will use a pivot table in pandas to make this visual.
###Code
# First we will create a frequency table on the Credit_History column to see how it is distributed.
temp1 = df['Credit_History'].value_counts(ascending=True)
# Let's also create a pivot table that shows us the probability of loan approval when you factor in credit history.
# Loan_Status is coded 1 for approved and 0 for unapproved, this means the average of all
# the values is the probablity of getting a loan.
temp2 = df.pivot_table(values='Loan_Status',index=['Credit_History'],aggfunc=lambda x: x.map({'Y':1,'N':0}).mean())
print ('Frequency Table for Credit History:')
print (temp1)
print ('\nProbability of getting loan based on Credit_History:')
print (temp2)
###Output
Frequency Table for Credit History:
0.0 89
1.0 475
Name: Credit_History, dtype: int64
Probability of getting loan based on Credit_History:
Loan_Status
Credit_History
0.0 0.078652
1.0 0.795789
###Markdown
Exercise: Find the Average of the Loan_Status variable How can we find the average of the Loan_Status column? Hint: The data is categorical. One way to solve this is to find out how many 'Y' answers there are and then divide that by the total number of values in the column. Hint: Use the `value_counts` function and then just divide the values. **Calling this note out again***Note from Sarah to Dan* Is the empty cell below for them to fill in? For each empty cell, we will need to add the "answer" in a separate cell. Could you make sure to add those in? This will be useful for instructors as they are prepping and for if/when we convert these to Learn modules. We can also look at plots of this data. Let's create more plots and gain more understanding about the data.
###Code
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(121)
ax1.set_xlabel('Credit_History')
ax1.set_ylabel('Count of Applicants')
ax1.set_title("Applicants by Credit_History")
temp1.plot(kind='bar')
ax2 = fig.add_subplot(122)
ax2.set_xlabel('Credit_History')
ax2.set_ylabel('Probability of Approval')
ax2.set_title("Probability of Approval Based on Credit History")
temp2.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Now try changing the Credit_History variable to something else like Married, Self_Employed, or Property Area. Do any of the variable closely correlate? Stacked ChartsWe can also create a stacked chart that combines the plots above.
###Code
temp3 = pd.crosstab(df['Credit_History'], df['Loan_Status'])
temp3.plot(kind='bar', stacked=True, color=['red','blue'], grid=False)
pd.crosstab?
temp3 = pd.crosstab([df['Credit_History'], df['Gender']], df['Loan_Status'],)
temp3.plot(kind='bar', stacked=True, color=['red','blue'], )
temp3
###Output
_____no_output_____
###Markdown
**Congratulations!** Did you realize what we just did? We just created two basic **classification algorithms!** We created the first one that is based just on Credit_History, and then we created on based on Credit_History and Gender. This is the last step that we'll do in exploring our dataset. From the exploration we know that we need to deal with some messy data. Let's move on to some **Data Munging.** Data Munging and Preparing our Data In the previous part of our process we learned about some of the problems with our data, specifically that we have some missing values. If we are going to "fix" these problems we need to be extremely careful and thoughtful about what type of data we will use to fill in the empty values. We noticed that in the ApplicantIncome and LoanAmount columns there was also outlying data on the extreme ends of the ranges. Even though we understand why the values are the way they are, we should do something with the outlying values as well.Let's see what we can do with the `apply` function, first let's examine what it does.
###Code
# The apply function applies a function across the axis of a dataframe.
df.apply?
df.apply(lambda x: sum(x.isnull()),axis=0)
###Output
_____no_output_____
###Markdown
The `lambda` function allows us to scan through the dataframe and find all the fields that contain a null value.Note: Even though we don't have a huge number of missing data, we should still do a little more work and estimating if we can fill though values with fill data that makes sense. We should also check for thing that we notice. For example, Loan_Amount_Term has 14 null values. If a loan is made with a loan period of 0, is it null? Let's explore a bit more. Filling null values in LoanAmount Figuring what to fill null values with can be tricky, and you should always refer to a domain expert if possible. But in this case, we can probably use the average loan amout to fill the null values. We will do that first.
###Code
df['LoanAmount'].fillna(df['LoanAmount'].mean(), inplace=True)
# Let's check and see, we shouldn't see any null values in the LoanAmount column
df.apply(lambda x: sum(x.isnull()),axis=0)
# Now we can figure out what the fill value for loan amount is using value_counts
df['LoanAmount'].value_counts()
###Output
_____no_output_____
###Markdown
Filling values in the Self_Employed ColumnFrom our exploration step, we know that *Note from Sarah to Dan* Did you forget to finish the sentence above?
###Code
print(df.boxplot(column='LoanAmount', by = 'Education'))
print(df.boxplot(column='LoanAmount', by = 'Self_Employed'))
###Output
AxesSubplot(0.1,0.15;0.8x0.75)
AxesSubplot(0.1,0.15;0.8x0.75)
###Markdown
We can use these boxplots to spot trends in our data, and we see some variation between the median loan amount so that can be used to impute the data. Let's verify that Self_Employed and Education should not have any null or missing values.
###Code
df['Self_Employed'].value_counts()
# Now we can calculate the percentage of 'No' responses
print('Percentage of "No" values in the data is:', 500/582*100,'%')
###Output
Percentage of "No" values in the data is: 85.91065292096219 %
###Markdown
Since about 86% of the responses in this column are a No, it is safe for us to impute the missing values as "no" values. We can do that with this code:
###Code
df['Self_Employed'].fillna('No',inplace=True)
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 614 entries, 0 to 613
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Loan_ID 614 non-null object
1 Gender 601 non-null object
2 Married 611 non-null object
3 Dependents 599 non-null object
4 Education 614 non-null object
5 Self_Employed 614 non-null object
6 ApplicantIncome 614 non-null int64
7 CoapplicantIncome 614 non-null float64
8 LoanAmount 614 non-null float64
9 Loan_Amount_Term 600 non-null float64
10 Credit_History 564 non-null float64
11 Property_Area 614 non-null object
12 Loan_Status 614 non-null object
dtypes: float64(4), int64(1), object(8)
memory usage: 43.2+ KB
###Markdown
The next step is to create a pivot table, that will provide us the median values for all the groups of unique values of Self_Employed and Education features. Next, we define a function, which returns the values of these cells and apply it to fill the missing values of loan amount:
###Code
table = df.pivot_table(values='LoanAmount', index='Self_Employed', columns='Education', aggfunc=np.median)
table
# Define function to return value of this pivot_table
def fage(x):
return table.loc[x['Self_Employed'],x['Education']]
# Replace missing values
df['LoanAmount'].fillna(df.apply(fage, axis=1), inplace=True)
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 614 entries, 0 to 613
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Loan_ID 614 non-null object
1 Gender 601 non-null object
2 Married 611 non-null object
3 Dependents 599 non-null object
4 Education 614 non-null object
5 Self_Employed 614 non-null object
6 ApplicantIncome 614 non-null int64
7 CoapplicantIncome 614 non-null float64
8 LoanAmount 614 non-null float64
9 Loan_Amount_Term 600 non-null float64
10 Credit_History 564 non-null float64
11 Property_Area 614 non-null object
12 Loan_Status 614 non-null object
dtypes: float64(4), int64(1), object(8)
memory usage: 43.2+ KB
###Markdown
Using a log transformation to nullify the effect of extreme valuesLet's look at LoanAmount first. We already know that people apply for loans in all ranges, including high value loans for specific properties. Instead of treating them as outliers, we can use a log transformation to reduce the effect they have on representing the data.
###Code
# Let's pull the histgram again
df['LoanAmount_log'] = np.log(df['LoanAmount'])
df['LoanAmount_log'].hist(bins=20)
###Output
_____no_output_____
###Markdown
Now this distribution looks better. The effect that the higher limit values has been considerably reduced. *Note from Sarah to Dan* Can you add more explanation for why the distribution looks "better". Even though it might be obvious, always good to qualify words like "better" One more thing to note when considering ApplicantIncome. Did you notice that there was also a CoapplicantIncome? It might be a good idea to combine these columns into a TotalIncome column and do a log transformation.
###Code
df['TotalIncome'] = df['ApplicantIncome'] + df['CoapplicantIncome']
df['TotalIncome_log'] = np.log(df['TotalIncome'])
df['LoanAmount_log'].hist(bins=20)
###Output
_____no_output_____
###Markdown
The distribution again is better than before. You can decide whether or not you will continue the munging exercise with Gender, Married, Dependents, or the other variables. *Note from Sarah to Dan* Same comment as above, how is it "better"? Building a predictive model in PythonSo far we've spent a lot of time prepping our data getting it ready for our model. We'll be using a new library (for us) to code our model. Skicit-Learn (sklearn) is the most commonly used data science library in Python for this purpose.Skicit-Learn requires that all inputs be numeric, but first let's quickly fill in all the null values within our data. For the sake of time, we've prepared all the code for you here.
###Code
# Quick Fill
df['Gender'].fillna(df['Gender'].mode()[0], inplace=True)
df['Married'].fillna(df['Married'].mode()[0], inplace=True)
df['Dependents'].fillna(df['Dependents'].mode()[0], inplace=True)
df['Loan_Amount_Term'].fillna(df['Loan_Amount_Term'].mode()[0], inplace=True)
df['Credit_History'].fillna(df['Credit_History'].mode()[0], inplace=True)
###Output
_____no_output_____
###Markdown
*Note from Sarah to Dan* Can you add comments to the next cell?
###Code
# Here were are using LabelEncoder to transform non-numerical labels (as long as they are hashable and comparable) to numerical labels.
from sklearn.preprocessing import LabelEncoder
var_mod = ['Gender','Married','Dependents','Education','Self_Employed','Property_Area','Loan_Status']
le = LabelEncoder()
for i in var_mod:
df[i] = le.fit_transform(df[i])
#fancy huh?
df.dtypes
# Let's check! All taken care of.
df.info()
# Now we can finish the model
# Import models from scikit learn module:
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
#Generic function for making a model and accessing performance:
def loan_model(model, data, predictors, outcome):
#Fit the model:
model.fit(data[predictors],data[outcome])
#Make predictions on training set:
predictions = model.predict(data[predictors])
#Print accuracy
accuracy = metrics.accuracy_score(predictions,data[outcome])
print ("Accuracy : %s" % "{0:.3%}".format(accuracy))
#Fit the model again so that it can be refered outside the function:
model.fit(data[predictors],data[outcome])
###Output
_____no_output_____
###Markdown
Building a logistical regression modelRemember that logistical regression is a model that returns a binary answer. In this case it will return whether or not a loan is approved based on the parameters provided. We want to create a model that generalizes well. If we take all the data and use that to train our model, we run into the risk of 'overfitting' the model. Let's first start by making some simple hypothesis about how someone's chances of getting a loan will be higher.1. We know already having a credit history is huge.2. Higher incomes, combining coapplicant and applicant incomes will help.3. We also saw that applicants with higher education get loans.4. We also know properties in high growth locations will make better loans.
###Code
# Let's first work with Credit_History - We start by assigning Loan_Status as the outcome variable
outcome_var = 'Loan_Status'
# Select the Model
model = LogisticRegression()
# Use credit history
predictor_var = ['Credit_History']
# call the model
loan_model(model, df, predictor_var, outcome_var)
###Output
Accuracy : 80.945%
###Markdown
BONUS: Decision Tree and Random ForestA decision tree is another predictive model. We can easily import the model from the sklearn library. In addition we can also do the same thing for the Random Forest model, which is a classification model.
###Code
from sklearn.tree import DecisionTreeClassifier, export_graphviz
model = DecisionTreeClassifier()
predictor_var = ['Credit_History','Gender','Married','Education']
loan_model(model, df,predictor_var,outcome_var)
###Output
Accuracy : 80.945%
###Markdown
Can you do any better? Play with the code and try to get a higher accuracy value One possible solution
###Code
# Let's try using the RandomForestClassifier model. Random Forest Classifier
# A random forest is a meta estimator that fits a number of decision tree classifiers
# on various sub-samples of the dataset and uses averaging to improve the predictive
# accuracy and control over-fitting.
model = RandomForestClassifier(n_estimators=10) # n_estimators == number of trees in the forest
predictor_var = ['Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'Credit_History', 'Property_Area',
'LoanAmount_log']
loan_model(model, df,predictor_var,outcome_var)
###Output
Accuracy : 96.743%
|
Day18/EDA/titanic_eda/Titanic_Dataset.ipynb
|
###Markdown
Filling the missing values
###Code
M1=train.fillna(100)
M1.isna().sum()
train.shape, test.shape
print(train.describe())
print('_____________________________________________________________________________________________________________________')
print(test.describe())
print(train.info())
print('_____________________________________________________________________________________________________________________')
print(test.info())
#dropping the columns those serve little to no purpose
train = train.drop(['PassengerId', 'Name', 'Ticket'], axis = 1)
test = test.drop(['Name', 'Ticket'], axis = 1)
#Let's check Embarked column now
train.Embarked.value_counts()
#Checking null values in the Embarked column in both dataframes
print(train[train['Embarked'].isnull()])
print(test[test['Embarked'].isnull()])
#filling the two values with S as it is the most frequent
train['Embarked'] = train['Embarked'].fillna('S')
#Let's check how Embarked affect the survival rate of the passengers
Emb_Sur = train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
#plotting the Emb_Sur
sns.barplot(data = Emb_Sur, x= 'Embarked', y = 'Survived', order = ['S', 'C', 'Q'] )
#Let's plotanalysis of the Embarked
sns.factorplot('Embarked', 'Survived', data= train,size = 4, aspect = 3)
fig, (axis1, axis2, axis3) = plt.subplots(1,3,figsize=(15,5))
sns.countplot(data = train, x='Embarked', ax = axis1)
sns.countplot(data = train, x= 'Survived', hue = 'Embarked',order=[1,0], ax = axis2)
sns.barplot(data = Emb_Sur, x= 'Embarked', y = 'Survived', order = ['S', 'C', 'Q'] )
###Output
_____no_output_____
###Markdown
Embarked has 3 valueswe can create dummy variable for the three values but the survival rate of only C and Q seems goodand it seems Embarked won't help in making any better predictions, so we are going to drop it.
###Code
#Goodbye Embarked
train = train.drop(['Embarked'], axis =1)
test = test.drop(['Embarked'], axis = 1)
#Let's analyse Fare
#First we will check missing values
print(train[train['Fare'].isnull()])
print('_______________________________________________________________________________________________________________')
print(test[test['Fare'].isnull()])
#Only test has an missing value
#let's plot fare to find how we are going to fill that missing value
#can't use test dataset as it has a nan value
sns.distplot(train['Fare'])
#It's not quite normally distributed to use mean, hence we will use median here to fill up the missing value
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
#let's check test fare's distribution now
sns.distplot(test['Fare'])
#Let's do some farenalysis
train['Fare'] = train['Fare'].astype('int')
test['Fare'] = test['Fare'].astype('int')
#Checking Fare for Survived and Not Survived
Survived_Fare = train['Fare'] [train['Survived'] == 1]
Not_Survived_Fare = train['Fare'] [train['Survived'] == 0]
#plotting the Fair
train['Fare'].plot(kind='hist', figsize=(15, 3), bins=100, xlim=(0,50))
#getting average and standard deviation for fare of survived and not survived
average_fare = pd.DataFrame([Not_Survived_Fare.mean(), Survived_Fare.mean()])
std_fare = pd.DataFrame([Not_Survived_Fare.std(), Survived_Fare.std()])
#plot
average_fare.index.names = std_fare.index.names = ['Survived']
average_fare.plot(yerr=std_fare, kind='bar', legend=False)
#Age Analysis
fig, (axis1, axis2) = plt.subplots(1,2,figsize=(15,4))
axis1.set_title('Original Age')
axis2.set_title('New Age')
#Average, standard deviation and NaN values in train dataset
average_age_train = train['Age'].mean()
std_age_train = train['Age'].std()
nan_counts_train = train['Age'].isnull().sum()
#Average, standard deviation and NaN values in test dataset
average_age_test = test['Age'].mean()
std_age_test = test['Age'].std()
nan_counts_test = test['Age'].isnull().sum()
#filling NaN values using random numbers between (mean - std) & (mean + std)
rand_1 = np.random.randint(average_age_train - std_age_train, average_age_train + std_age_train, size = nan_counts_train)
rand_2 = np.random.randint(average_age_test - std_age_test, average_age_test + std_age_test, size = nan_counts_test)
#Drop all null values and convert it to int to plot
train['Age'].dropna().astype(int).hist(bins=70, ax = axis1)
#filling NaN values in Age columns with random values
train['Age'][np.isnan(train['Age'])] = rand_1
test['Age'][np.isnan(test['Age'])] = rand_2
#convert float to int
train['Age'] = train['Age'].astype(int)
test['Age'] = test['Age'].astype(int)
train['Age'].hist(bins = 70, ax=axis2)
#survived/not survived peaks by their age
facet = sns.FacetGrid(train, hue = 'Survived', aspect=4)
facet.map(sns.kdeplot, 'Age', shade = True)
facet.set(xlim=(0, train['Age'].max()))
facet.add_legend()
#average survived passenger by age
fig, axis1 = plt.subplots(1,1, figsize=(18,4))
average_age = train[["Age", "Survived"]].groupby(["Age"], as_index = False).mean()
sns.barplot(x='Age', y='Survived', data = average_age)
#Cabin Analysis
#Too many NaN values, hence not very useful any prediction
train.drop('Cabin', axis=1, inplace = True)
test.drop('Cabin', axis=1, inplace = True)
#Family Analysis
#Using Parch and sibsp to create one feature to represent if the passenger has any family member aboard or not
#or did it affect the chances of survival
train['Family'] = train['Parch'] + train['SibSp']
train['Family'].loc[train['Family'] > 0] =1
train['Family'].loc[train['Family'] == 0] = 0
test['Family'] = test['Parch'] + train['SibSp']
test['Family'].loc[test['Family'] > 0] = 1
test['Family'].loc[test['Family'] == 0] = 0
#Drop Parch and SibSp
train = train.drop(["SibSp", "Parch"], axis=1)
test = test.drop(["SibSp", "Parch"], axis=1)
#Plot
fig, (axis1, axis2) = plt.subplots(1,2,sharex=True,figsize=(10, 5))
sns.countplot(x='Family', data=train, order=[1,0], ax=axis1)
#average of survived eho don't have family member
fam_perc = train[["Family", "Survived"]].groupby(["Family"], as_index=False).mean()
sns.barplot(x='Family', y='Survived', data = fam_perc, order=[1,0], ax=axis2)
axis1.set_xticklabels(["With Family", "Alone"], rotation=0)
train.head()
#Sex
#Classifying passengers as male, female and child
def getperson(passenger):
age,sex = passenger
return 'child' if age < 16 else sex
train['Person'] = train[['Age', 'Sex']].apply(getperson, axis=1)
test['Person'] = test[['Age', 'Sex']].apply(getperson, axis=1)
#Creating dummy variable for person column and dropping make as it has lowest average of survived passengers
person_dummies_train = pd.get_dummies(train["Person"])
person_dummies_columns = ["Child", "Female", "Male"]
person_dummies_test = pd.get_dummies(test["Person"])
person_dummies_columns = ["Child", "Female", "Male"]
train = train.join(person_dummies_train)
test = test.join(person_dummies_test)
train.head()
#average of survival for each person
person_perc = train[["Person", "Survived"]].groupby(["Person"], as_index=False).mean()
person_perc
fig, (axis1, axis2) = plt.subplots(1,2,figsize=(10,5))
sns.countplot(x="Person", data=train, ax=axis1)
sns.barplot(x="Person", y="Survived", data= person_perc, ax=axis2, order=["male", "female", "child"])
test.drop(["Sex","Person","male"], axis=1, inplace=True)
train.drop(["Sex","Person", "male"], axis=1, inplace=True)
test.head()
#pclass analysis
#Let's plot it first
sns.factorplot('Pclass', 'Survived', order = [1,2,3], data=train, size=5)
#Creating dummy variables for class and dropping 3rd class as it has the lowest survival average
pclass_dummies_train = pd.get_dummies(train['Pclass'])
pclass_dummies_train.columns = ["Class_1", "Class_2", "Class_3"]
pclass_dummies_train.drop(['Class_3'], axis=1, inplace=True)
pclass_dummies_test = pd.get_dummies(test['Pclass'])
pclass_dummies_test.columns = ["Class_1", "Class_2", "Class_3"]
pclass_dummies_test.drop(['Class_3'], axis=1, inplace=True)
#dropping Pclass
train.drop(['Pclass'], axis=1, inplace=True)
test.drop(['Pclass'], axis=1, inplace=True)
#appending dummy variables to main dataset
train = train.join(pclass_dummies_train)
test = test.join(pclass_dummies_test)
train.head()
test.head()
###Output
_____no_output_____
###Markdown
------------------------X---------------------------------X------------------------------X-------------------------------------- Let's start predicting Logistic Regression
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
X_train = train.drop(["Survived"], axis=1)
y_train = train["Survived"]
X_test = test.drop("PassengerId", axis=1).copy()
X_train.head()
y_train.head()
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
logreg.score(X_train, y_train)
###Output
_____no_output_____
###Markdown
Support Vector Machines
###Code
svc = SVC()
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
svc.score(X_train, y_train)
###Output
_____no_output_____
###Markdown
Random Forests
###Code
random_forests = RandomForestClassifier(n_estimators = 100)
random_forests.fit(X_train, y_train)
y_pred = random_forests.predict(X_test)
random_forests.score(X_train, y_train)
###Output
_____no_output_____
|
Fraud/Feature_Selection.ipynb
|
###Markdown
Drop first week of data
###Code
data[data['date']=='2016-01-08']
vdata=vars_keep[vars_keep['record']>=19277]
vdata.head()
Y=vdata['fraud_label']
vdata=vdata.drop(columns=['record','fraud_label'])
vdata.shape
###Output
_____no_output_____
###Markdown
Feature Selection
###Code
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
%%time
model= LogisticRegression()
rfecv = RFECV(estimator=model, step=1, cv=3, verbose=2, n_jobs=-1, scoring='roc_auc')
rfecv.fit(vdata,Y)
print('Optimal number of feature:', rfecv.n_features_)
var_selected=pd.DataFrame(sorted(zip(map(lambda x: round(x), rfecv.ranking_), vdata.columns)),
columns=['ranking','variable'])
pd.options.display.max_rows = 150
print(var_selected)
rfecv.ranking_
rfecv.grid_scores_
plt.figure()
plt.xlabel("Numbers of features selected")
plt.ylabel("Cross Validation Score (nb of correct classification)")
plt.plot(range(1,len(rfecv.grid_scores_) + 1),rfecv.grid_scores_)
###Output
_____no_output_____
###Markdown
Choose to keep the first 20 of KS FDR ranking
###Code
mydata=data[vars_keep['record']>=19277]
mydata.head()
mydata=mydata[['fraud_label','record','Days_fulladdress','fulladdress_30_count','fulladdress_14_count',
'fulladdress_7_count','fulladdress_3_count','fulladdress_1_count','Days_ssn',
'ssn_30_count','Days_firstname_ssn','lastnamessn_30_count','firstnamessn_30_count',
'Days_lastname_ssn','Days_fulladdress_homephone','fulladdresshomephone_30_count',
'Days_namedob','namedob_30_count','Days_ssn_namedob','ssnnamedob_30_count',
'ssn_14_count','fulladdresshomephone_14_count']]
mydata.head()
mydata.shape
###Output
_____no_output_____
###Markdown
Z scale 20 variables we selected
###Code
cols = list(mydata.columns)
cols.remove('fraud_label')
cols.remove('record')
cols
zsvar=mydata.copy()
from scipy.stats import zscore
## z scale 20 variables we selected
for col in cols:
zsvar[col]=zscore(zsvar[col],axis=None)
zsvar.head()
zsvar.describe()
zsvar.to_csv('vars_final_zscale.csv',index=False)
###Output
_____no_output_____
###Markdown
split into oot, trte data (2016-11-01)
###Code
oot_df=zsvar[zsvar['record']>833508]
trte_df=zsvar[zsvar['record']<=833508]
###Output
_____no_output_____
|
keras_tuner/hyperparamter_search.ipynb
|
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Hyperparameter Search Run in Google Colab View source on GitHub OverviewHyperparamter tuning or search is somewhat of a black box, an art as it is so often referred to as is the process of choosing some of the parameters of a deep learning model in order to obtain the best possible performance for that architecture. There are quite a few tools out there that do a decent job of tuning parameters, but none are as straightforward, robust and state-of-the-art as Keras-Tuner. This notebook will show how the parameters can be tuned manually and using Keras-Tuner. But first, here's a peek at few of the tools: HyperParameter Tuning search- `Hyperopt`: a popular Python library for optimizing over all sorts of complexsearch spaces (including real values such as the learning rate, or discrete valuessuch as the number of layers).- `Hyperas, kopt or Talos`: optimizing hyperparameters for Keras model (the firsttwo are based on Hyperopt).- `Scikit-Optimize (skopt)`: a general-purpose optimization library. The BayesSearchCV class performs Bayesian optimization using an interface similar to GridSearchCV .- `Spearmint`: a Bayesian optimization library.- `Sklearn-Deap`: a hyperparameter optimization library based on evolutionaryalgorithms, also with a GridSearchCV -like interface. [Link](https://github.com/rsteca/sklearn-deap)- `keras-tuner`: Bayesian as well as RandomSearch based tuning library that is known as "Hypertuning for humans" Setup
###Code
! pip install -q tensorflow-gpu==2.0.0-rc0
import tensorflow as tf
assert tf.__version__.startswith('2')
print(f'{tf.__version__}')
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RandomizedSearchCV
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Dense, Dropout, Conv2D, Flatten, Activation
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
###Output
_____no_output_____
###Markdown
Loading the datatset
###Code
mnist = tf.keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = tf.cast(np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)), tf.float64)
X_test = tf.cast(np.reshape(X_test, (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)), tf.float64)
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
###Output
_____no_output_____
###Markdown
Manual Hyperparameter Tuning
###Code
model = tf.keras.models.Sequential()
model.add(Conv2D(32, (3,3), activation='relu', kernel_initializer='he_uniform', input_shape=(28,28,1)))
model.add(Conv2D(64, (3,3), activation='relu', kernel_initializer='he_uniform'))
model.add(Flatten())
model.add(Dense(20))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, epochs=5, batch_size=128)
###Output
_____no_output_____
###Markdown
Although this works, there is an element of luck and expertise to tune hyperparameters effectively. The use of Keras-Tuner is discussed below that performs the tuning effectively. Keras-Tuner - Hyperparameter Tuning--- Features of Keras-Tuner- **Intuitive API**: As easy as 1,2,3- **State of the art hypertuner algorithms**- **Tunable architectures ready to go**- **Seamless experiments recording**: Automatic recording to analyse and reproduce your results**NOTE**: Do not download the Pypi version of keras-tuner. Follow the steps in the cell below for downloading.
###Code
!git clone https://github.com/keras-team/keras-tuner.git
!cd keras-tuner
!pip install -q '/content/keras-tuner/'
import kerastuner
from kerastuner.tuners import RandomSearch
# Step 1: Wrap model in a function
def model_fn(hp):
# Step 2: Define the hyper-parameters
LR = hp.Choice('learning_rate', [0.001, 0.0005, 0.0001])
DROPOUT_RATE = hp.Float('dropout_rate', 0.0, 0.5, 5)
NUM_DIMS = hp.Int('num_dims', 8, 32, 8)
NUM_LAYERS = hp.Int('num_layers', 1, 3)
L2_NUM_FILTERS = hp.Int('l2_num_filters', 8, 64, 8)
L1_NUM_FILTERS = hp.Int('l1_num_filters', 8, 64, 8)
# Step 3: Replace static values with hyper-parameters
model = tf.keras.models.Sequential()
model.add(Conv2D(L1_NUM_FILTERS, (3,3), activation='relu', kernel_initializer='he_uniform', input_shape=(28,28,1)))
model.add(Conv2D(L2_NUM_FILTERS, (3,3), activation='relu', kernel_initializer='he_uniform'))
model.add(Flatten())
for _ in range(NUM_LAYERS):
model.add(Dense(NUM_DIMS))
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'])
return model
tuner = RandomSearch(
model_fn,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='temp_dir')
tuner.search_space_summary()
tuner.search(X_train, y_train, epochs=5, validation_data=(X_test, y_test))
models = tuner.get_best_models(num_models=3)
tuner.results_summary()
###Output
_____no_output_____
|
Notebooks/ExtractingFilesFromFolder.ipynb
|
###Markdown
Extract all Files From all Subfolders
###Code
import os
import shutil
folder='/Users/mrinaliniluthra/2021-national-archives-data-annotation-project-basic-stats/data/Roos/'
subfolders = [f.path for f in os.scandir(folder) if f.is_dir()]
for sub in subfolders:
for f in os.listdir(sub):
src = os.path.join(sub, f)
dst = os.path.join(folder, f)
shutil.move(src, dst)
shutil.rmtree(sub)
###Output
_____no_output_____
###Markdown
Total Annotated Pages
###Code
with open(folder) as handle:
for filename in os.walk(folder):
if filename.endswith('.conf'):
os.remove(filename)
###Output
_____no_output_____
|
basics/.ipynb_checkpoints/basics-checkpoint.ipynb
|
###Markdown
You can also concatenate strings with the + command
###Code
first_part = "this is the first part ... "
second_part = "and this is the second part of the text"
concat = first_part + second_part
print(concat)
###Output
this is the first part ... and this is the second part of the text
###Markdown
If you want a specific number of repetitions of a text you can use the * command followed by a number.
###Code
"example " * 3
###Output
_____no_output_____
###Markdown
List An other data type is the list or array. you can initialise it like:
###Code
list_var = []
print (list_var)
###Output
[]
###Markdown
but you can also fill it directly with entries.
###Code
list_var = [1,2,3,4]
print (list_var)
###Output
[1, 2, 3, 4]
###Markdown
indexing and slicing Lists can be indexed or sliced. Important for indexing is, that the index of a list starts with 0. Indexing and slicing works for build-in sequence types like lists or strings.
###Code
list_var[0]
list_var[-1]
print (list_var[0:2])
print (list_var[:2])
list_var[-2:-1]
list_var[0:-1:2]
###Output
_____no_output_____
###Markdown
You can use indexing to change entries, but this works only for mutable object types like lists not for immutable like strings
###Code
list_var[0] = [1,2,3]
print (list_var)
list_var[::-1]
###Output
_____no_output_____
###Markdown
append/extend You can also append or extend your list
###Code
list_var.append("wuff")
print (list_var)
list_extention = ["a","b","c"]
list_var.extend(list_extention)
print (list_var)
###Output
[[1, 2, 3], 2, 3, 4, 'wuff', 'a', 'b', 'c']
###Markdown
Occurrence testing
###Code
'wuff' in list_var
'wiff' in list_var
###Output
_____no_output_____
###Markdown
Dictionaries Dictionaries are an other data type object structure. A dictionary contains key / value pairs. Instead of the position you use the key to access the respective value.
###Code
dictionary = {'one_key': 1, 'second_key': 2}
print (dictionary)
dict(one_key=1, second_key=2)
dictionary['one_key']
dictionary['one_key'] = 21
print (dictionary)
dictionary['third_key'] = 3
print (dictionary)
new_dict = {'one_key':12, 'third_key':14, 'fourth_key': 4}
dictionary.update(new_dict)
print (dictionary)
###Output
{'one_key': 12, 'fourth_key': 4, 'third_key': 14, 'second_key': 2}
###Markdown
Occurrence testing
###Code
'one_key' in dictionary
###Output
_____no_output_____
###Markdown
Control structure if/else/elif BoolValues: True, False - [] False - [a, b] True - 0 False - all other True NoneNone is used to present the absence of a value Intendation
###Code
test_case = [1,2,3,4,5]
if test_case:
print ('test_case != []')
print ('always true')
if len(test_case) == 0:
print ("length is 0")
elif len(test_case) >0 and len(test_case) <= 4:
print ('length test_case is between 1 and 4')
else:
print ('length test_case is larger then 4')
###Output
length test_case is larger then 4
###Markdown
For loop and while loop
###Code
iter_list = [1,2,3,4]
for i in iter_list:
print ("i=%s" % i)
step = 0
while step != len(iter_list):
print ("step:%s i:%s" % (step, iter_list[step]) )
step += 1
step = 0
while step != len(iter_list):
print (step, iter_list[step])
step += 1
###Output
0 1
1 2
2 3
3 4
###Markdown
range and enumerate
###Code
for i in range(3):
print (i)
for i in range(len(iter_list)):
print ("i:%s value:%s" % (i, iter_list[i]))
for i, value in enumerate(iter_list):
print (i, value)
###Output
0 1
1 2
2 3
3 4
###Markdown
Dictionaries and For loops
###Code
for key in dictionary:
print (key, dictionary[key])
for key, value in dictionary.items():
print (key, value)
for i, key in enumerate(dictionary):
print (i, key)
###Output
0 one_key
1 fourth_key
2 third_key
3 second_key
###Markdown
break, continue, pass
###Code
for i in iter_list:
print (i)
if i == 3:
print ('i equal 3 - break')
break
for i in iter_list:
print (i)
if i == 3:
print ("i equal 3 - continue")
continue
print ("behind continue")
for i in iter_list:
print (i)
if i == 3:
pass
print ("behind pass")
###Output
1
2
3
behind pass
4
###Markdown
Functions
###Code
def fib(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n.""" # docstring
a, b = 0, 1 # multiple assignement
while a < n:
print (a), # prevents the newline
a, b = b, a+b # another multiple assignement
fib(500)
###Output
0
1
1
2
3
5
8
13
21
34
55
89
144
233
377
###Markdown
The keyword def initialized a function followed by the name and a list of parameters Documentation stringsYou should put a triple quoted string into the first line after the function definition, containing a description of the function. This is called doc string, and can be used to automatically produce documentation. return statement
###Code
def fib(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n.""" # docstring
result = []
a, b = 0, 1 # multiple assignement
while a < n:
result.append(a)
a, b = b, a+b # another multiple assignement
return result
fib_result = fib(500)
print (fib_result)
###Output
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377]
###Markdown
The `return` statement returns a value from a function. `return` without an expression argument returns `None` as well as falling off the end of a function. Default Arguments and Keyword Arguments
###Code
def extended_fib(number, default=True, first_number=0, second_number=1):
"""Print a Fibonacci series up to n.""" # docstring
if default:
a, b = 0, 1
else:
a, b = first_number, second_number # multiple assignement
while a < number:
print (a, end=' ') # space instead of newline
a, b = b, a+b # another multiple assignement
extended_fib(500)
extended_fib(500, first_number=55, second_number=89)
extended_fib(500, default=False, first_number=55, second_number=89)
###Output
55 89 144 233 377
###Markdown
Import of Modules You can import other python packages/modules quite easy with the import function.
###Code
import math
###Output
_____no_output_____
###Markdown
Afterwards you can use specific functions of the modules.
###Code
math.cos(1)
math.exp(1)
###Output
_____no_output_____
###Markdown
Coding style***โThe best programs are written so that computing machines can perform them quickly and so that human beings can understand them clearly." - Donald E. Knuth, Selected Papers on Computer Science *** PEP8Style guide for Python codeA style guide is about consistency. Consistency with this style guide is important. Consistency within a project is more important. Consistency within one module or function is most important. A few rules- never use tabs, always 4 spaces- try to limit lines to 79 characters- use whitespace to make your code more readable
###Code
spam(ham[1], {eggs: 2}) # YES!
spam( ham[ 1 ], { eggs: 2 } ) # NO!!
x, y = y, x # YES!
x , y = y , x # NO!!
counter = counter + 1 # YES!
counter=counter+1 # NO!!
result = add(x+1, 3) # YES!
result = add(x + 1, 3) # YES!
def complex(real, imag=0.0): # YES!
return magic(r=real, i=imag)
def complex(real, imag = 0.0): # NO!!
return magic(r = real, i = imag)
###Output
_____no_output_____
###Markdown
- Follow these naming conventions: - lower_case_under for variables and functions and methods - WordCap for classes - ALL_CAPS for constantsAnd of course, there is more: https://www.python.org/dev/peps/pep-0008/ The Zen of Python
###Code
import this
###Output
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
###Markdown
Python Introduction Python is a clear and powerful object-oriented programming language, comparable to Perl, Ruby and Java.- [python](https://www.python.org/) - [Monty Python](https://en.wikipedia.org/wiki/Monty_Python)  In python we are using an elegant syntax with indentation instead of brackets for code structuring. This makes python easy to read and ideal for prototype development and scripting.A python interactive mode makes it easy to test short snippets of code, but there are also a bunch of editors like Sublime Text available as well as bundled development environments IDEs like PyCharm. SoftwareWe will use: - Python (https://www.python.org/) - IPython (http://ipython.org/) - IPython Notebook (http://ipython.org/notebook.html) You could use: - an editor of your choice like Sublime Text - an IDE (PyCharm) - Python debuggers (pdb) - Code checkers (pylint) We will discuss:- Variables- Objects- Strings- Lists- Dictionaries- Functions- Import of modules Interactive modeStart the python command line interpreter by typing `python`, or the Python interactive shell by typing `ipython` into your terminal.You can use this a calculator:
###Code
2 + 3
###Output
_____no_output_____
###Markdown
or print this:
###Code
print ("Hello World")
###Output
Hello World
###Markdown
Variables you can assign number to variables like:
###Code
height = 1
width = 2
print (height)
print (width)
###Output
1
2
###Markdown
and you can reuse this variables
###Code
add_height_width = height + width
print (add_height_width)
###Output
3
###Markdown
you can change variables
###Code
width = 12
add_height_width = height + width
print (add_height_width)
###Output
13
###Markdown
Naming Rules- Variables can only contain letters, numbers, and underscores. - Variable names cannot start with a number but should start with a letter or an underscore.- Spaces are not allowed in variable names -> use underscores instead of spaces.- It should be avoided to use Python keywords as variable names like int, float, list, input- Variable names should be descriptive, without being too long. For example `n_dog_legs` is better than just `dog` or `number_of_legs_of_a_dog`.- Never use singe letters like a, b, c as variable names Datatypes int/floats
###Code
height
height/2
###Output
_____no_output_____
###Markdown
type conversion
###Code
float(height)
2.
###Output
_____no_output_____
###Markdown
String Strings are sets of characters and are contained either in single or double quotes.
###Code
text = "this is a string"
print (text)
###Output
this is a string
###Markdown
Through this we are able to create strings which contains quotes.
###Code
text = 'this is a string "containing a quote"'
print (text)
###Output
_____no_output_____
###Markdown
We can also use multiple line strings in triple quotes ''' or """
###Code
text = """this is a string
over
more
than one line
"""
print (text)
###Output
this is a string
over
more
than one line
###Markdown
Blockchain functionality Block linking process will have few tasks such as clubbing all the information to create a structure, calculating the hash of the block and appending it to the blockchain. Let's break down each of these functionalities into blockchain methods.
###Code
# -*- coding: utf-8 -*-
import json
from Crypto.Hash import SHA256
from datetime import datetime
class Block(object):
"""A class representing the block for the blockchain"""
def __init__(self, index, previous_hash, timestamp, data, hash):
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.data = data
self.hash = hash
###Output
_____no_output_____
###Markdown
Above code snippet has python class called Block which has all the basic attributes to represent a block. Usually a block will contain both header and a body where header part will have metadata about the block. But, above example doesnโt distinguish between the header and body. A typical blockchain application such as bitcoin will have a huge set of data such as transactions, but we will consider data as just string type.
###Code
class Blockchain(object):
"""A class representing list of blocks"""
def __init__(self):
self._chain = [self.get_genesis_block()]
self.timestamp = datetime.now().strftime("%s")
def get_genesis_block(self):
"""creates first block of the chain"""
return Block(0, "0", 1465154705, "my genesis block!!",
"816534932c2b7154836da6afc367695e6337db8a921823784c14378abed4f7d7")
def calculate_hash(self, index, previous_hash, timestamp, data):
"""calculates SHA256 hash value"""
hash_object = SHA256.new(data=(str(index) + previous_hash + str(timestamp) + data).encode())
return hash_object.hexdigest()
def get_latest_block(self):
"""gets the last block from the blockchain"""
try:
return self._chain[-1]
except IndexError as e:
return None
def create_block(self, block_data):
"""creates a new block with the given block data"""
previous_block = self.get_latest_block()
next_index = previous_block.index + 1
next_timestamp = self.timestamp
next_hash = self.calculate_hash(next_index, previous_block.hash, next_timestamp, block_data)
return Block(next_index, previous_block.hash, next_timestamp, block_data, next_hash)
@property
def chain(self):
"""created a dict containing list of block objects to view"""
return self.dict(self._chain)
def dict(self, chain):
"""converts list of block objects to dictionary"""
return json.loads(json.dumps(chain, default=lambda o: o.__dict__))
def reset(self):
"""resets the blockchain blocks except genesis block"""
self._chain = [self._chain[0]]
def add_block(self, data):
"""appends a new block to the blockchain"""
self._chain.append(self.create_block(data))
###Output
_____no_output_____
###Markdown
Above class is a collection of class methods to create a valid blockchain by using hash function. Constructor of the Blockchain will initialize a chain by appending genesis block, which is the first block of the blockchain which doesn't have any reference to previous block. get_genesis_block(self)A genesis block is hardcoded block which will be appended to the beginning of the blockchain. It is created with all static contents. Above genesis block has a hardcoded hash value which is created using SHA256 as follows.SHA256.new(data=(str(0) + "0"+ str(1465154705) +"my genesis block!!").encode()).hexdigest() calculate_hash(self, index, previous_hash, timestamp, data):calculate_hash is a crucial method of the blockchain as this method creates a hash value which will bind all the blocks together. SHA256 hash value is created by using PyCryptodome package as shown in the chapter 2. This method will concatenate block index, hash value of previous block, timestamp and data to create a string to be hashed. SHA256 hash function will generate a digest which will be the hash value of that block. get_latest_block(self)This function identifies the last block which is appended to the chain which is required while creating each block to find the hash value of previous block. create_block(self, block_data):This function will build a block by constructing all the required attributes to create a Block object. It will also calculate the hash value for the current block. A new Block object consisting of the block structure will be finally created. chain(self), dict(self, chain), reset(self), add_block(self, data) All the above functions are used to add blocks, reset and read the blocks of the blockchain. Method add_block and the attribute chain are the only required class members that needs to be exposed to the user. Creating a blockchain Now that we have defined all the required functionalities of a simple blockchain linker, let's emulate a blockchain linker by creating few blocks and adding them to the blockchain. The below code snippet creates a Blockchain object and adds three blocks to the blockchain along with an existing genesis block. This operation is performed again after resetting the blockchain. The important observation here is that both the output of new_chain.chain would produce same list of blocks containing same block hashes as below. This is due to the fact that all the attributes contributing to the hash value creation are same during both the execution, and hash function always produces the same hash value if the fed input is the same. The time stamp is intentionally kept constant for all the blocks to showcase the feature of the hash function.
###Code
new_chain = Blockchain()
new_chain.add_block(data="modified first block data")
new_chain.add_block(data="second block data")
new_chain.add_block(data="third block data")
print(json.dumps(new_chain.chain))
###Output
[{"index": 0, "previous_hash": "0", "data": "my genesis block!!", "hash": "816534932c2b7154836da6afc367695e6337db8a921823784c14378abed4f7d7", "timestamp": 1465154705}, {"index": 1, "previous_hash": "816534932c2b7154836da6afc367695e6337db8a921823784c14378abed4f7d7", "data": "modified first block data", "hash": "e4af23719273b397c766d1950de88c13d4e18c29e9f2e6305294064b53461eef", "timestamp": "1542418211"}, {"index": 2, "previous_hash": "e4af23719273b397c766d1950de88c13d4e18c29e9f2e6305294064b53461eef", "data": "second block data", "hash": "a452f4397a51d56f1e1b709bdc3390c6d0d0e2c2d5545eec4de9fd1491e43749", "timestamp": "1542418211"}, {"index": 3, "previous_hash": "a452f4397a51d56f1e1b709bdc3390c6d0d0e2c2d5545eec4de9fd1491e43749", "data": "third block data", "hash": "1482a7da74367526ed48a6521c596052f5fff886d8180b03009141b5c9a59477", "timestamp": "1542418211"}]
###Markdown
An example implementation of proof-of-work. Example for brute forcing with nonce Below code snippet is a simple example for generating hashes to solve the proof-of-work puzzle. The nonce is created in an incremental fashion and appended to the input data. Hash value is computed using SHA256 algorithm and this is repeated for all the nonce values. The program will generate below hashes for each of the nonce appended data.
###Code
from __future__ import print_function
from Crypto.Hash import SHA256
text = "I am Satoshi Nakamoto"
# iterate nonce from 0 to 19
for nonce in range(20):
# add the nonce to the end of the text
input_data = text + str(nonce)
# calculate the SHA-256 hash of the input (text+nonce)
hash_data = SHA256.new(input_data.encode()).hexdigest()
# show the input and hash result
print((input_data + '=>' + hash_data)[:64] + "...")
###Output
I am Satoshi Nakamoto0=>a80a81401765c8eddee25df36728d732acb6d135...
I am Satoshi Nakamoto1=>f7bc9a6304a4647bb41241a677b5345fe3cd30db...
I am Satoshi Nakamoto2=>ea758a8134b115298a1583ffb80ae62939a2d086...
I am Satoshi Nakamoto3=>bfa9779618ff072c903d773de30c99bd6e2fd70b...
I am Satoshi Nakamoto4=>bce8564de9a83c18c31944a66bde992ff1a77513...
I am Satoshi Nakamoto5=>eb362c3cf3479be0a97a20163589038e4dbead49...
I am Satoshi Nakamoto6=>4a2fd48e3be420d0d28e202360cfbaba410bedde...
I am Satoshi Nakamoto7=>790b5a1349a5f2b909bf74d0d166b17a333c7fd8...
I am Satoshi Nakamoto8=>702c45e5b15aa54b625d68dd947f1597b1fa571d...
I am Satoshi Nakamoto9=>7007cf7dd40f5e933cd89fff5b791ff0614d9c60...
I am Satoshi Nakamoto10=>c2f38c81992f4614206a21537bd634af7178964...
I am Satoshi Nakamoto11=>7045da6ed8a914690f087690e1e8d662cf9e56f...
I am Satoshi Nakamoto12=>60f01db30c1a0d4cbce2b4b22e88b9b93f58f10...
I am Satoshi Nakamoto13=>0ebc56d59a34f5082aaef3d66b37a661696c2b6...
I am Satoshi Nakamoto14=>27ead1ca85da66981fd9da01a8c6816f54cfa0d...
I am Satoshi Nakamoto15=>394809fb809c5f83ce97ab554a2812cd901d3b1...
I am Satoshi Nakamoto16=>8fa4992219df33f50834465d30474298a7d5ec7...
I am Satoshi Nakamoto17=>dca9b8b4f8d8e1521fa4eaa46f4f0cdf9ae0e69...
I am Satoshi Nakamoto18=>9989a401b2a3a318b01e9ca9a22b0f39d82e48b...
I am Satoshi Nakamoto19=>cda56022ecb5b67b2bc93a2d764e75fc6ec6e6e...
###Markdown
Example for finding a nonce to solve proof-of-work This example will illustrate on finding a nonce by brute forcing with the help of SHA256 to find a hash value that will satisfy the target hash. Target hash value is determined by setting the difficulty bits in the proof-of-work algorithm. We will use the same blockchain linker created in the earlier section and will modify that example to include proof-of-work algorithm while creating new block. Letโs modify few functions of the blockchain example to include the consensus algorithm.
###Code
import json
from Crypto.Hash import SHA256
from datetime import datetime
max_nonce = 2 ** 32 # 4 billion
class Block(object):
"""A class representing the block for the blockchain"""
def __init__(self, index, previous_hash, timestamp, data,
difficulty_bits, nonce, hash):
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.data = data
self.difficulty_bits = difficulty_bits
self.nonce = nonce
self.hash = hash
class Blockchain(object):
"""A class representing list of blocks"""
def __init__(self):
self._chain = [self.get_genesis_block()]
self.timestamp = datetime.now().strftime("%s")
self.difficulty_bits = 0
@property
def chain(self):
"""created a dict containing list of block objects to view"""
return self.dict(self._chain)
def dict(self, chain):
"""converts list of block objects to dictionary"""
return json.loads(json.dumps(chain, default=lambda o: o.__dict__))
def reset(self):
"""resets the blockchain blocks except genesis block"""
self._chain = [self._chain[0]]
def get_genesis_block(self):
"""creates first block of the chain"""
# SHA256.new(data=(str(0) + "0"+ str(1465154705) +"my genesis block!!"+"0").encode()).hexdigest()
return Block(0, "0", 1465154705, "my genesis block!!", 0, 0,
"f6b3fd6d417048423692c275deeaa010d4174bd680635d3e3cb0050aa46401cb")
def add_block(self, data):
"""appends a new block to the blockchain"""
self._chain.append(self.create_block(data))
def create_block(self, block_data):
"""creates a new block with the given block data"""
previous_block = self.get_latest_block()
next_index = previous_block.index + 1
next_timestamp = self.timestamp
next_hash, next_nonce = self.calculate_hash(next_index, previous_block.hash, next_timestamp, block_data)
return Block(next_index, previous_block.hash, next_timestamp, block_data, self.difficulty_bits, next_nonce, next_hash)
def get_latest_block(self):
"""gets the last block from the blockchain"""
try:
return self._chain[-1]
except IndexError as e:
return None
def calculate_hash(self, index, previous_hash, timestamp, data):
"""calculates SHA256 hash value by solving hash puzzle"""
header = str(index) + previous_hash + str(timestamp) + data + str(self.difficulty_bits)
hash_value, nonce = self.proof_of_work(header)
return hash_value, nonce
def proof_of_work(self, header):
target = 2 ** (256 - difficulty_bits)
for nonce in xrange(max_nonce):
hash_result = SHA256.new(data=(str(header) + str(nonce)).encode()).hexdigest()
if int(hash_result, 16) < target:
print("Success with nonce %d" % nonce)
print("Hash is %s" % hash_result)
return (hash_result, nonce)
print("Failed after %d (max_nonce) tries" % nonce)
return nonce
if __name__ == '__main__':
new_chain = Blockchain()
for difficulty_bits in range(32):
difficulty = 2 ** difficulty_bits
new_chain.difficulty_bits = difficulty_bits
print("Difficulty: %ld (%d bits)" % (difficulty, difficulty_bits))
print("Starting search...")
start_time = datetime.now()
new_block_data = 'test block with transactions'
new_chain.add_block(data=new_block_data)
end_time = datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
print("Elapsed Time: %.4f seconds" % elapsed_time)
if elapsed_time > 0:
hash_power = float(int(new_chain.chain[-1].get("nonce")) / elapsed_time)
print("Hashing Power: %ld hashes per second" % hash_power)
###Output
Difficulty: 1 (0 bits)
Starting search...
Success with nonce 0
Hash is d5a17fc5c25bd24f5ac9d023a667fcc3f7460e38060707d4c015c8bccc483f55
Elapsed Time: 0.0008 seconds
Hashing Power: 0 hashes per second
Difficulty: 2 (1 bits)
Starting search...
Success with nonce 2
Hash is 76b18b8e1b2fd63211ba45b2f00012aa1e3e95d8e0bff7a5c704b2ae1f621e7e
Elapsed Time: 0.0013 seconds
Hashing Power: 1502 hashes per second
Difficulty: 4 (2 bits)
Starting search...
Success with nonce 2
Hash is 10665e7604c4370e11597b3ffde55d3256297f2cf4cdddc3d591aa9ed98968fe
Elapsed Time: 0.0027 seconds
Hashing Power: 728 hashes per second
Difficulty: 8 (3 bits)
Starting search...
Success with nonce 16
Hash is 0ef6ae98fc2f80044d25f2a6ec2538ef88b463780481fc0e7742aa53072a4855
Elapsed Time: 0.0031 seconds
Hashing Power: 5143 hashes per second
Difficulty: 16 (4 bits)
Starting search...
Success with nonce 27
Hash is 076d59bc4c553a735f93d3ea0104b2d80c0447712e606338002ae39dc5fd41f8
Elapsed Time: 0.0041 seconds
Hashing Power: 6645 hashes per second
Difficulty: 32 (5 bits)
Starting search...
Success with nonce 46
Hash is 00f29be1690dd76e320576d990749a49ba92bb5d3cff01e862e2522fb013c6ef
Elapsed Time: 0.0038 seconds
Hashing Power: 12089 hashes per second
Difficulty: 64 (6 bits)
Starting search...
Success with nonce 70
Hash is 0381d61ebdba7b7df04d4f3f6582e9bd49d8ce0218f79d8fe76cf7e6574a0f40
Elapsed Time: 0.0051 seconds
Hashing Power: 13613 hashes per second
Difficulty: 128 (7 bits)
Starting search...
Success with nonce 340
Hash is 0032a9dc072fe41b087c76f3e8d746b1485f9856f7eb5f871fd7193777a2ce69
Elapsed Time: 0.0157 seconds
Hashing Power: 21678 hashes per second
Difficulty: 256 (8 bits)
Starting search...
Success with nonce 66
Hash is 00e578d447fa83352817bce274475875411f80bf38ca76b38873525b710e3107
Elapsed Time: 0.0037 seconds
Hashing Power: 17964 hashes per second
Difficulty: 512 (9 bits)
Starting search...
Success with nonce 18
Hash is 0077ee36ace319c2bda7c18e7b559f00e4a2e21dd9c48c1a0b767989c76fc0b0
Elapsed Time: 0.0014 seconds
Hashing Power: 13264 hashes per second
Difficulty: 1024 (10 bits)
Starting search...
Success with nonce 323
Hash is 0015537cd281ba92eec941bcbb49ced08a078ffb53ad624d6ae0f3bfb6e31d44
Elapsed Time: 0.0153 seconds
Hashing Power: 21158 hashes per second
Difficulty: 2048 (11 bits)
Starting search...
Success with nonce 387
Hash is 00183311cc64fdf1cbbdce15e75e7f49a402fc609cd09202f7718f897a2dd9f2
Elapsed Time: 0.0178 seconds
Hashing Power: 21767 hashes per second
Difficulty: 4096 (12 bits)
Starting search...
Success with nonce 1469
Hash is 000d124229cd427487a9999c3be89c0ccfef4a046239412ab038b7a2f0b6aa12
Elapsed Time: 0.0643 seconds
Hashing Power: 22841 hashes per second
Difficulty: 8192 (13 bits)
Starting search...
Success with nonce 22486
Hash is 00052958fe4f168d43f678302eb8d7115dc046d72812c81e14ba5077d3ce7f41
Elapsed Time: 1.1915 seconds
Hashing Power: 18872 hashes per second
Difficulty: 16384 (14 bits)
Starting search...
Success with nonce 6925
Hash is 0001303328947225bc81b29ebd0d2a100297bb41cde71b290219909f388dc7d8
Elapsed Time: 0.4226 seconds
Hashing Power: 16387 hashes per second
Difficulty: 32768 (15 bits)
Starting search...
Success with nonce 67818
Hash is 00007e2faaee4b8f92f30d4ab3a91e9d3211e082064e453c41b42a8f049a0f14
Elapsed Time: 3.1822 seconds
Hashing Power: 21311 hashes per second
Difficulty: 65536 (16 bits)
Starting search...
Success with nonce 89491
Hash is 00003523a86e315fe733f57752248553b728a1f251b2321868bec481d5ea90d9
Elapsed Time: 4.1132 seconds
Hashing Power: 21756 hashes per second
Difficulty: 131072 (17 bits)
Starting search...
Success with nonce 37357
Hash is 000046ef6c160b58415eb69e0d9efdc7c4855258db39ec9c1924e999d92a6c6f
Elapsed Time: 1.7002 seconds
Hashing Power: 21972 hashes per second
Difficulty: 262144 (18 bits)
Starting search...
Success with nonce 167274
Hash is 00000881447594aa27bf14cb0f954ca9461e9937a33af89a71e1f27e78e2146d
Elapsed Time: 7.9563 seconds
Hashing Power: 21024 hashes per second
Difficulty: 524288 (19 bits)
Starting search...
|
Implement_SLAM/3. Landmark Detection and Tracking.ipynb
|
###Markdown
Project 3: Implement SLAM --- Project OverviewIn this project, you'll implement SLAM for robot that moves and senses in a 2 dimensional, grid world!SLAM gives us a way to both localize a robot and build up a map of its environment as a robot moves and senses in real-time. This is an active area of research in the fields of robotics and autonomous systems. Since this localization and map-building relies on the visual sensing of landmarks, this is a computer vision problem. Using what you've learned about robot motion, representations of uncertainty in motion and sensing, and localization techniques, you will be tasked with defining a function, `slam`, which takes in six parameters as input and returns the vector `mu`. > `mu` contains the (x,y) coordinate locations of the robot as it moves, and the positions of landmarks that it senses in the worldYou can implement helper functions as you see fit, but your function must return `mu`. The vector, `mu`, should have (x, y) coordinates interlaced, for example, if there were 2 poses and 2 landmarks, `mu` will look like the following, where `P` is the robot position and `L` the landmark position:```mu = matrix([[Px0], [Py0], [Px1], [Py1], [Lx0], [Ly0], [Lx1], [Ly1]])```You can see that `mu` holds the poses first `(x0, y0), (x1, y1), ...,` then the landmark locations at the end of the matrix; we consider a `nx1` matrix to be a vector. Generating an environmentIn a real SLAM problem, you may be given a map that contains information about landmark locations, and in this example, we will make our own data using the `make_data` function, which generates a world grid with landmarks in it and then generates data by placing a robot in that world and moving and sensing over some numer of time steps. The `make_data` function relies on a correct implementation of robot move/sense functions, which, at this point, should be complete and in the `robot_class.py` file. The data is collected as an instantiated robot moves and senses in a world. Your SLAM function will take in this data as input. So, let's first create this data and explore how it represents the movement and sensor measurements that our robot takes.---
###Code
import numpy as np
from helpers import make_data
# your implementation of slam should work with the following inputs
# feel free to change these input values and see how it responds!
# world parameters
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world (square)
# robot parameters
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
###Output
Landmarks: [[35, 7], [30, 55], [1, 4], [92, 92], [22, 76]]
Robot: [x=60.56704 y=66.66586]
###Markdown
A note on `make_data`The function above, `make_data`, takes in so many world and robot motion/sensor parameters because it is responsible for:1. Instantiating a robot (using the robot class)2. Creating a grid world with landmarks in it**This function also prints out the true location of landmarks and the *final* robot location, which you should refer back to when you test your implementation of SLAM.**The `data` this returns is an array that holds information about **robot sensor measurements** and **robot motion** `(dx, dy)` that is collected over a number of time steps, `N`. You will have to use *only* these readings about motion and measurements to track a robot over time and find the determine the location of the landmarks using SLAM. We only print out the true landmark locations for comparison, later.In `data` the measurement and motion data can be accessed from the first and second index in the columns of the data array. See the following code for an example, where `i` is the time step:```measurement = data[i][0]motion = data[i][1]```
###Code
# print out some stats about the data
time_step = 0
print('Example measurements: \n', data[time_step][0])
print('\n')
print('Example motion: \n', data[time_step][1])
###Output
Example measurements:
[(0, -13.510383494027904, -41.510383494027906), (1, -20.49218576514608, 4.507814234853921), (2, -47.981843669977884, -44.981843669977884), (3, 40.64266364058893, 40.64266364058893), (4, -28.945078153271574, 25.054921846728426)]
Example motion:
[11.105440160590037, 16.63337605056634]
###Markdown
TODO: Write a function that initializes omega and xiComplete the function `initialize_constraints` so that it returns `omega` and `xi` constraints for the starting position of the robot. Any values that we do not yet know should be initialized with the value `0`. You may assume that our robot starts out in exactly the middle of the world with 100% confidence (no motion or measurement noise at this point). The inputs `N` time steps, `num_landmarks`, and `world_size` should give you all the information you need to construct intial constraints of the correct size and starting values.*Depending on your approach you may choose to return one omega and one xi that hold all (x,y) positions *or* two of each (one for x values and one for y); choose whichever makes most sense to you!*
###Code
def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
size = (N + num_landmarks) * 2
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
world_center = world_size / 2
omega = np.zeros((size, size))
omega[0, 0] = 1.
omega[1, 1] = 1.
## TODO: Define the constraint *vector*, xi
## you can assume that the robot starts out in the middle of the world with 100% confidence
xi = np.zeros((size, 1))
xi[0] = world_center
xi[1] = world_center
return omega, xi
###Output
_____no_output_____
###Markdown
Test as you goIt's good practice to test out your code, as you go. Since `slam` relies on creating and updating constraint matrices, `omega` and `xi` to account for robot sensor measurements and motion, let's check that they initialize as expected for any given parameters.Below, you'll find some test code that allows you to visualize the results of your function `initialize_constraints`. We are using the [seaborn](https://seaborn.pydata.org/) library for visualization.**Please change the test values of N, landmarks, and world_size and see the results**. Be careful not to use these values as input into your final smal function.This code assumes that you have created one of each constraint: `omega` and `xi`, but you can change and add to this code, accordingly. The constraints should vary in size with the number of time steps and landmarks as these values affect the number of poses a robot will take `(Px0,Py0,...Pxn,Pyn)` and landmark locations `(Lx0,Ly0,...Lxn,Lyn)` whose relationships should be tracked in the constraint matrices. Recall that `omega` holds the weights of each variable and `xi` holds the value of the sum of these variables, as seen in Notebook 2. You'll need the `world_size` to determine the starting pose of the robot in the world and fill in the initial values for `xi`.
###Code
# import data viz resources
import matplotlib.pyplot as plt
from pandas import DataFrame
import seaborn as sns
%matplotlib inline
# define a small N and world_size (small for ease of visualization)
N_test = 5
num_landmarks_test = 2
small_world = 10
# initialize the constraints
initial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)
# define figure size
plt.rcParams["figure.figsize"] = (10,7)
# display omega
sns.heatmap(DataFrame(initial_omega), cmap='Blues', annot=True, linewidths=.5)
# define figure size
plt.rcParams["figure.figsize"] = (1,7)
# display xi
sns.heatmap(DataFrame(initial_xi), cmap='Oranges', annot=True, linewidths=.5)
###Output
_____no_output_____
###Markdown
--- SLAM inputs In addition to `data`, your slam function will also take in:* N - The number of time steps that a robot will be moving and sensing* num_landmarks - The number of landmarks in the world* world_size - The size (w/h) of your world* motion_noise - The noise associated with motion; the update confidence for motion should be `1.0/motion_noise`* measurement_noise - The noise associated with measurement/sensing; the update weight for measurement should be `1.0/measurement_noise` A note on noiseRecall that `omega` holds the relative "strengths" or weights for each position variable, and you can update these weights by accessing the correct index in omega `omega[row][col]` and *adding/subtracting* `1.0/noise` where `noise` is measurement or motion noise. `Xi` holds actual position values, and so to update `xi` you'll do a similar addition process only using the actual value of a motion or measurement. So for a vector index `xi[row][0]` you will end up adding/subtracting one measurement or motion divided by their respective `noise`. TODO: Implement Graph SLAMFollow the TODO's below to help you complete this slam implementation (these TODO's are in the recommended order), then test out your implementation! Updating with motion and measurementsWith a 2D omega and xi structure as shown above (in earlier cells), you'll have to be mindful about how you update the values in these constraint matrices to account for motion and measurement constraints in the x and y directions. Recall that the solution to these matrices (which holds all values for robot poses `P` and landmark locations `L`) is the vector, `mu`, which can be computed at the end of the construction of omega and xi as the inverse of omega times xi: $\mu = \Omega^{-1}\xi$**You may also choose to return the values of `omega` and `xi` if you want to visualize their final state!**
###Code
## TODO: Complete the code to implement SLAM
def update_matrices(omega, xi, idx1, idx2, value, noise):
weight = 1 / noise
xi[idx1] += value * weight
xi[idx2] -= value * weight
omega[idx1, idx1] -= weight
omega[idx1, idx2] += weight
omega[idx2, idx1] += weight
omega[idx2, idx2] -= weight
## slam takes in 6 arguments and returns mu,
## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations
def slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):
## TODO: Use your initilization to create constraint matrices, omega and xi
omega, xi = initialize_constraints(N, num_landmarks, world_size)
## TODO: Iterate through each time step in the data
## get all the motion and measurement data as you iterate
for idx, ts in enumerate(data):
measurements, motion = ts
Px = idx * 2
Py = Px + 1
## TODO: update the constraint matrix/vector to account for all *measurements*
## this should be a series of additions that take into account the measurement noise
for (mi, mx, my) in measurements:
Lx = (N + mi) * 2
Ly = Lx + 1
update_matrices(omega, xi, Px, Lx, mx, measurement_noise)
update_matrices(omega, xi, Py, Ly, my, measurement_noise)
## TODO: update the constraint matrix/vector to account for all *motion* and motion noise
Px_1 = (idx + 1) * 2
Py_1 = Px_1 + 1
mx, my = motion
update_matrices(omega, xi, Px, Px_1, mx, motion_noise)
update_matrices(omega, xi, Py, Py_1, my, motion_noise)
## TODO: After iterating through all the data
## Compute the best estimate of poses and landmark positions
## using the formula, omega_inverse * Xi
mu = np.linalg.inv(np.matrix(omega)) * xi
return mu # return `mu`
###Output
_____no_output_____
###Markdown
Helper functionsTo check that your implementation of SLAM works for various inputs, we have provided two helper functions that will help display the estimated pose and landmark locations that your function has produced. First, given a result `mu` and number of time steps, `N`, we define a function that extracts the poses and landmarks locations and returns those as their own, separate lists. Then, we define a function that nicely print out these lists; both of these we will call, in the next step.
###Code
# a helper function that creates a list of poses and of landmarks for ease of printing
# this only works for the suggested constraint architecture of interlaced x,y poses
def get_poses_landmarks(mu, N):
# create a list of poses
poses = []
for i in range(N):
poses.append((mu[2*i].item(), mu[2*i+1].item()))
# create a list of landmarks
landmarks = []
for i in range(num_landmarks):
landmarks.append((mu[2*(N+i)].item(), mu[2*(N+i)+1].item()))
# return completed lists
return poses, landmarks
def print_all(poses, landmarks):
print('\n')
print('Estimated Poses:')
for i in range(len(poses)):
print('['+', '.join('%.3f'%p for p in poses[i])+']')
print('\n')
print('Estimated Landmarks:')
for i in range(len(landmarks)):
print('['+', '.join('%.3f'%l for l in landmarks[i])+']')
###Output
_____no_output_____
###Markdown
Run SLAMOnce you've completed your implementation of `slam`, see what `mu` it returns for different world sizes and different landmarks! What to ExpectThe `data` that is generated is random, but you did specify the number, `N`, or time steps that the robot was expected to move and the `num_landmarks` in the world (which your implementation of `slam` should see and estimate a position for. Your robot should also start with an estimated pose in the very center of your square world, whose size is defined by `world_size`.With these values in mind, you should expect to see a result that displays two lists:1. **Estimated poses**, a list of (x, y) pairs that is exactly `N` in length since this is how many motions your robot has taken. The very first pose should be the center of your world, i.e. `[50.000, 50.000]` for a world that is 100.0 in square size.2. **Estimated landmarks**, a list of landmark positions (x, y) that is exactly `num_landmarks` in length. Landmark LocationsIf you refer back to the printout of *exact* landmark locations when this data was created, you should see values that are very similar to those coordinates, but not quite (since `slam` must account for noise in motion and measurement).
###Code
# call your implementation of slam, passing in the necessary parameters
mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)
# print out the resulting landmarks and poses
if(mu is not None):
# get the lists of poses and landmarks
# and print them out
poses, landmarks = get_poses_landmarks(mu, N)
print_all(poses, landmarks)
###Output
Estimated Poses:
[50.000, 50.000]
[62.604, 66.178]
[74.574, 82.461]
[85.787, 99.967]
[77.860, 82.259]
[69.707, 64.668]
[64.096, 46.635]
[60.258, 27.336]
[54.562, 6.404]
[47.731, 23.676]
[41.450, 41.829]
[34.330, 62.167]
[28.744, 82.948]
[11.513, 74.849]
[28.353, 79.628]
[46.171, 84.669]
[65.088, 91.133]
[84.102, 96.402]
[72.525, 81.746]
[60.102, 66.072]
Estimated Landmarks:
[34.707, 7.125]
[29.883, 55.300]
[0.548, 3.965]
[91.500, 91.823]
[21.577, 75.955]
###Markdown
Visualize the constructed worldFinally, using the `display_world` code from the `helpers.py` file (which was also used in the first notebook), we can actually visualize what you have coded with `slam`: the final position of the robot and the positon of landmarks, created from only motion and measurement data!**Note that these should be very similar to the printed *true* landmark locations and final pose from our call to `make_data` early in this notebook.**
###Code
# import the helper function
from helpers import display_world
# Display the final world!
# define figure size
plt.rcParams["figure.figsize"] = (20,20)
# check if poses has been created
if 'poses' in locals():
# print out the last pose
print('Last pose: ', poses[-1])
# display the last position of the robot *and* the landmark positions
display_world(int(world_size), poses[-1], landmarks)
# Here is the data and estimated outputs for test case 1
test_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608], [3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]], [[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]], [-12.2607279422326, -15.801093326936487]], [[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]], [[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]], [[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]], [[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]], [14.008259661173426, 14.274756084260822]], [[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]], [14.008259661173426, 14.274756084260822]], [[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]], [14.008259661173426, 14.274756084260822]], [[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]], [[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]], [[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]], [[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097], [3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409], [3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125], [2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]], [[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]], [[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]], [[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]], [[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]], [-17.82919359778298, 9.062000642947142]]]
## Test Case 1
##
# Estimated Pose(s):
# [50.000, 50.000]
# [37.858, 33.921]
# [25.905, 18.268]
# [13.524, 2.224]
# [27.912, 16.886]
# [42.250, 30.994]
# [55.992, 44.886]
# [70.749, 59.867]
# [85.371, 75.230]
# [73.831, 92.354]
# [53.406, 96.465]
# [34.370, 100.134]
# [48.346, 83.952]
# [60.494, 68.338]
# [73.648, 53.082]
# [86.733, 38.197]
# [79.983, 20.324]
# [72.515, 2.837]
# [54.993, 13.221]
# [37.164, 22.283]
# Estimated Landmarks:
# [82.679, 13.435]
# [70.417, 74.203]
# [36.688, 61.431]
# [18.705, 66.136]
# [20.437, 16.983]
### Uncomment the following three lines for test case 1 and compare the output to the values above ###
mu_1 = slam(test_data1, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_1, 20)
print_all(poses, landmarks)
# Here is the data and estimated outputs for test case 2
test_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]], [18.92765331253674, -6.460955043986683]], [[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438], [3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369], [3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381], [3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]], [-11.167066095509824, 16.592065417497455]], [[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914], [3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]], [-11.167066095509824, 16.592065417497455]], [[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595], [4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]], [[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]], [-6.595520680493778, -18.88118393939265]], [[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]], [-6.595520680493778, -18.88118393939265]], [[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]], [[], [11.492663265706092, 16.36822198838621]], [[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]], [[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]], [11.492663265706092, 16.36822198838621]], [[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614], [3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]], [11.492663265706092, 16.36822198838621]], [[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765], [3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]], [11.492663265706092, 16.36822198838621]], [[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]], [19.628527845173146, 3.83678180657467]], [[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]], [-19.841703647091965, 2.5113335861604362]], [[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]], [-5.946642674882207, -19.09548221169787]], [[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027], [4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406], [3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]], [-5.946642674882207, -19.09548221169787]]]
## Test Case 2
##
# Estimated Pose(s):
# [50.000, 50.000]
# [69.035, 45.061]
# [87.655, 38.971]
# [76.084, 55.541]
# [64.283, 71.684]
# [52.396, 87.887]
# [44.674, 68.948]
# [37.532, 49.680]
# [31.392, 30.893]
# [24.796, 12.012]
# [33.641, 26.440]
# [43.858, 43.560]
# [54.735, 60.659]
# [65.884, 77.791]
# [77.413, 94.554]
# [96.740, 98.020]
# [76.149, 99.586]
# [70.211, 80.580]
# [64.130, 61.270]
# [58.183, 42.175]
# Estimated Landmarks:
# [76.777, 42.415]
# [85.109, 76.850]
# [13.687, 95.386]
# [59.488, 39.149]
# [69.283, 93.654]
### Uncomment the following three lines for test case 2 and compare to the values above ###
mu_2 = slam(test_data2, 20, 5, 100.0, 2.0, 2.0)
poses, landmarks = get_poses_landmarks(mu_2, 20)
print_all(poses, landmarks)
###Output
Estimated Poses:
[50.000, 50.000]
[69.181, 45.665]
[87.743, 39.703]
[76.270, 56.311]
[64.317, 72.176]
[52.257, 88.154]
[44.059, 69.401]
[37.002, 49.918]
[30.924, 30.955]
[23.508, 11.419]
[34.180, 27.133]
[44.155, 43.846]
[54.806, 60.920]
[65.698, 78.546]
[77.468, 95.626]
[96.802, 98.821]
[75.957, 99.971]
[70.200, 81.181]
[64.054, 61.723]
[58.107, 42.628]
Estimated Landmarks:
[76.779, 42.887]
[85.065, 77.438]
[13.548, 95.652]
[59.449, 39.595]
[69.263, 94.240]
|
notebook/local_vdisp_check.ipynb
|
###Markdown
I pick a random red galaxy from the GAMA-Legacy overlap and simulate different velocity dispersions in order to see if there's a chance we can fit for velocity dispersion for BGS galaxies
###Code
import os
import h5py
import numpy as np
import astropy.units as u
# -- feasibgs --
from feasibgs import util as UT
from feasibgs import skymodel as Sky
from feasibgs import catalogs as Cat
from feasibgs import forwardmodel as FM
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
cata = Cat.GamaLegacy()
gleg = cata.Read('g15', dr_gama=3, dr_legacy=7, silent=True)
# extract meta-data of galaxies
redshift = gleg['gama-spec']['z']
absmag_ugriz = cata.AbsMag(gleg, kcorr=0.1, H0=70, Om0=0.3, galext=False) # ABSMAG k-correct to z=0.1
r_mag_apflux = UT.flux2mag(gleg['legacy-photo']['apflux_r'][:,1]) # aperture flux
r_mag_gama = gleg['gama-photo']['r_petro'] # r-band magnitude from GAMA (SDSS) photometry
ha_gama = gleg['gama-spec']['ha_flux'] # halpha line flux
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
match = bgs3._GamaLegacy(gleg)
hasmatch = (match != -999)
criterion = hasmatch
# lets pick a bright galaxy at z~0.2
np.random.seed(0)
igal = np.atleast_1d(np.random.choice(np.arange(len(redshift))[(r_mag_gama < 18.) & (redshift > 0.19) & (redshift < 0.2) & (absmag_ugriz[1,:] - absmag_ugriz[2,:] > 1.0)]))
print('redshift = %.2f' % redshift[igal])
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(absmag_ugriz[2,:], absmag_ugriz[1,:] - absmag_ugriz[2,:], c='k', s=1)
sub.scatter(absmag_ugriz[2,igal], absmag_ugriz[1,igal] - absmag_ugriz[2,igal], c='C1', s=5)
sub.set_xlim(-15, -25)
sub.set_ylim(-0.5, 1.5)
# generate noiseless spectra for these galaxies
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
vdisps = [50, 150, 400]
waves, fluxes = [], []
for vdisp in vdisps:
emline_flux = s_bgs.EmissionLineFlux(gleg, index=igal, dr_gama=3, silent=True) # emission lines from GAMA
flux, wave, _, magnorm_flag = s_bgs.Spectra(
r_mag_apflux[igal],
redshift[igal],
vdisp,
seed=1,
templateid=match[igal],
emflux=emline_flux,
mag_em=r_mag_gama[igal],
silent=True)
waves.append(wave)
fluxes.append(flux)
###Output
INFO:io.py:1010:read_basis_templates: Reading /Users/ChangHoon/data/desi/spectro/templates/basis_templates/v2.5/bgs_templates_v2.1.fits
###Markdown
Absorption Lines3934.777 -1.0 0.0 K3969.588 -1.0 0.0 H4305.61 -1.0 0.0 G5176.7 -1.0 0.0 Mg5895.6 -1.0 0.0 Na8500.36 -1.0 0.0 CaII8544.44 -1.0 0.0 CaII8664.52 -1.0 0.0 CaII SEDs with different velocity dispersions
###Code
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
for flux, vdisp in zip(fluxes, vdisps):
sub.plot(waves[0], flux[0], label=r'$\sigma_0 = %.2f$' % vdisp)
sub.plot([3934.777*(1.+redshift[igal]), 3934.777*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.plot([3969.588*(1.+redshift[igal]), 3969.588*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.set_xlim(3800*(1.+redshift[igal]),4100*(1.+redshift[igal]))
sub.set_ylim(0., 3)
sub.legend(loc='upper left', fontsize=15)
dir_dat = os.path.join(UT.dat_dir(), 'srp')
fexps = h5py.File(os.path.join(dir_dat, 'exposures_surveysim_fork_150sv0p5.sample.seed0.hdf5'), 'r')
texp = fexps['texp_total'][...]
airmass = fexps['airmass'][...]
moon_ill = fexps['moon_ill'][...]
moon_alt = fexps['moon_alt'][...]
moon_sep = fexps['moon_sep'][...]
sun_alt = fexps['sun_alt'][...]
sun_sep = fexps['sun_sep'][...]
seeing = fexps['seeing'][...]
transp = fexps['transp'][...]
n_sample = len(airmass)
# read in sky brightness
wave_sky = fexps['wave'][...]
u_sb = 1e-17 * u.erg / u.angstrom / u.arcsec**2 / u.cm**2 / u.second
sky_sbright = fexps['sky'][...]
iexp = 0
print('t_exp = %.f' % texp[iexp])
# iexp-th sky spectra
Isky = [wave_sky, sky_sbright[iexp]]
# simulate the exposures
fdesi = FM.fakeDESIspec()
bgses = []
for wave, flux in zip(waves, fluxes):
bgs = fdesi.simExposure(wave, flux, exptime=texp[iexp], airmass=airmass[iexp], Isky=Isky)
bgses.append(bgs)
###Output
t_exp = 540
###Markdown
BGS spectra with realistic exposure
###Code
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
for i, bgs, vdisp in zip(range(len(vdisps)), bgses, vdisps):
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b': lbl = (r'$\sigma_0=%.2f$' % vdisp)
sub.plot(bgs.wave[band], bgs.flux[band][0], c='C%i' % i, label=lbl)
sub.legend(loc='upper right', fontsize=20)
sub.plot([3934.777*(1.+redshift[igal]), 3934.777*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.plot([3969.588*(1.+redshift[igal]), 3969.588*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.set_xlim(3800*(1.+redshift[igal]),4100*(1.+redshift[igal]))
sub.set_ylim(0., 5)
sub.legend(loc='upper left', fontsize=15)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel('rest-frame wavelength [Angstrom]', labelpad=10, fontsize=25)
bkgd.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', labelpad=10, fontsize=25)
bgses = []
for wave, flux in zip(waves, fluxes):
bgs = fdesi.simExposure(wave, flux, exptime=2.*texp[iexp], airmass=airmass[iexp], Isky=Isky)
bgses.append(bgs)
###Output
/anaconda2/envs/gqp/lib/python3.7/site-packages/speclite/filters.py:1466: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
values_no_units = values_no_units[values_slice]
/anaconda2/envs/gqp/lib/python3.7/site-packages/speclite/filters.py:1466: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
values_no_units = values_no_units[values_slice]
/anaconda2/envs/gqp/lib/python3.7/site-packages/speclite/filters.py:1466: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
values_no_units = values_no_units[values_slice]
###Markdown
BGS spectra with optimistic exposure
###Code
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
for i, bgs, vdisp in zip(range(len(vdisps)), bgses, vdisps):
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b': lbl = (r'$\sigma_0=%.2f$' % vdisp)
sub.plot(bgs.wave[band], bgs.flux[band][0], c='C%i' % i, label=lbl)
sub.legend(loc='upper right', fontsize=20)
sub.plot([3934.777*(1.+redshift[igal]), 3934.777*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.plot([3969.588*(1.+redshift[igal]), 3969.588*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.set_xlim(3800*(1.+redshift[igal]),4100*(1.+redshift[igal]))
sub.set_ylim(0., 5)
sub.legend(loc='upper left', fontsize=15)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel('rest-frame wavelength [Angstrom]', labelpad=10, fontsize=25)
bkgd.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', labelpad=10, fontsize=25)
Isky = [wave_sky, np.zeros(len(wave_sky))]
bgses = []
for wave, flux in zip(waves, fluxes):
bgs = fdesi.simExposure(wave, flux, exptime=100.*texp[iexp], airmass=airmass[iexp], Isky=Isky)
bgses.append(bgs)
###Output
/anaconda2/envs/gqp/lib/python3.7/site-packages/speclite/filters.py:1466: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
values_no_units = values_no_units[values_slice]
/anaconda2/envs/gqp/lib/python3.7/site-packages/specsim-0.14.dev804-py3.7.egg/specsim/transform.py:595: UserWarning: Refraction model is inaccurate for altitudes below 5.0 deg.
.format(low_altitude_threshold))
/anaconda2/envs/gqp/lib/python3.7/site-packages/speclite/filters.py:1466: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
values_no_units = values_no_units[values_slice]
/anaconda2/envs/gqp/lib/python3.7/site-packages/specsim-0.14.dev804-py3.7.egg/specsim/transform.py:595: UserWarning: Refraction model is inaccurate for altitudes below 5.0 deg.
.format(low_altitude_threshold))
/anaconda2/envs/gqp/lib/python3.7/site-packages/speclite/filters.py:1466: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
values_no_units = values_no_units[values_slice]
/anaconda2/envs/gqp/lib/python3.7/site-packages/specsim-0.14.dev804-py3.7.egg/specsim/transform.py:595: UserWarning: Refraction model is inaccurate for altitudes below 5.0 deg.
.format(low_altitude_threshold))
###Markdown
spectra without sky noise to check that we're not limited by the spectral resolution of DESI
###Code
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
for i, bgs, vdisp in zip(range(len(vdisps)), bgses, vdisps):
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b': lbl = (r'$\sigma_0=%.2f$' % vdisp)
sub.plot(bgs.wave[band], bgs.flux[band][0], c='C%i' % i, label=lbl)
sub.legend(loc='upper right', fontsize=20)
sub.plot([3934.777*(1.+redshift[igal]), 3934.777*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.plot([3969.588*(1.+redshift[igal]), 3969.588*(1.+redshift[igal])], [0., 10.], c='k', ls='--')
sub.set_xlim(3800*(1.+redshift[igal]),4100*(1.+redshift[igal]))
sub.set_ylim(0., 5)
sub.legend(loc='upper left', fontsize=15)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel('wavelength [Angstrom]', labelpad=10, fontsize=25)
bkgd.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', labelpad=10, fontsize=25)
###Output
_____no_output_____
|
Elevation1 - Mashhad, Iran/dem-super-resolution.ipynb
|
###Markdown
MIT LicenseCopyright (c) 2019 Alexey Pechnikov, https://orcid.org/0000-0001-9626-8615 (ORCID)Build Super-resolution DEM 0.5m from DEM 1m enhanced by one orthophoto image 0.5mSource dataset: Elevation1 - Mashhad, IranElevation1 DSM + Plรฉiades Ortho 0.5m pan-sharpened (Orthoimage included)https://www.intelligence-airbusds.com/en/9317-sample-imagery-detail?product=18896&keyword=&type=366
###Code
from osgeo import gdal
import os
import numpy as np
import xarray as xr
import pandas as pd
from scipy.ndimage.filters import gaussian_filter
from scipy.spatial import cKDTree
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load DEM
###Code
# DEM image
dem = xr.open_rasterio("Mashhad-DEM.sample.tif")[0]
# invert y axis
dem.values = dem.values[::-1]
dem.y.values = dem.y.values[::-1]
del dem.attrs['units']
###Output
_____no_output_____
###Markdown
Load orthophoto image
###Code
# orthophoto image 0.5m for the same area as DEM data above
img = xr.open_rasterio("7289-40126_Mashhad.sample.tif")[0]
# invert y axis
img.values = img.values[::-1]
img.y.values = img.y.values[::-1]
###Output
_____no_output_____
###Markdown
Regrid DEM 1m on the same grid as orthophoto image 0.5mThis is nearest neighbor interpolation without any data quality enhancement
###Code
# define source values
df_dem = dem.to_dataframe(name='dem').dropna().reset_index()
# target grid
df_grid = img.to_dataframe(name='_').reset_index()[['y','x']]
# nearest neighbor interpolation
tree = cKDTree(list(zip(df_dem.x, df_dem.y)))
distance, indices = tree.query(list(zip(df_grid.x, df_grid.y)), k = 1)
values = df_dem.dem.values[indices]
dem2x = xr.DataArray(values.reshape(img.shape),
coords=[img.y,img.x])
###Output
_____no_output_____
###Markdown
Enhance DEM by orthophoto imageAs explained in the article we need to transfer spatial components 0-5mWith pixel size 0.5m the required filter radius is equal to 10 pixels because 10*0.5m = 5m
###Code
# low-pass filter
def raster_filter(src, gamma):
dst = src.copy()
dst.values = gaussian_filter(dst.values.astype(np.float32),gamma,mode='nearest')
return dst
# define spectrum components to transfer
radius = 5/img.res[0]
img_lowpass = raster_filter(img, radius)
img_hipass = img - img_lowpass
dem2x_lowpass = raster_filter(dem2x, radius)
dem2x_hipass = dem2x - dem2x_lowpass
# caclulate approximate scale factor for short wavelenghs
scale = (img_hipass.max()-img_hipass.min())/(dem2x_hipass.max()-dem2x_hipass.min())
scale
# super-resolution dataset
dem2x_hires = dem2x_lowpass + img_hipass/scale
fig, ax = plt.subplots(2,3,figsize=(16,9))
((ax1,ax2,ax3),(ax4,ax5,ax6)) = ax
dem.plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax1)
ax1.set_title('DEM 1m', fontsize=16)
img.plot(vmin=0,vmax=160,cmap='RdBu_r',ax=ax2)
ax2.set_title('Orthophoto 0.5m [Red band]', fontsize=16)
dem.sel(x=slice(730000,730100),y=slice(4011600,4011700)).plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax4)
ax4.set_title('')
img.sel(x=slice(730000,730100),y=slice(4011600,4011700)).plot(vmin=0,vmax=160,cmap='RdBu_r',ax=ax5)
ax5.set_title('')
title = """Build Super-resolution DEM 0.5m from
Elevation 1m DSM (actually, ~10m) + Plรฉiades Ortho 0.5m for Mashhad, Iran:
"""
dem2x_hires.plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax3)
ax3.set_title('Input DEM 0.5m', fontsize=16)
dem2x_hires.sel(x=slice(730000,730100),y=slice(4011600,4011700)).plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax6)
ax6.set_title('')
for _ax in ax:
for __ax in _ax:
__ax.ticklabel_format(useOffset=False)
__ax.set_xlabel('')
__ax.set_ylabel('')
plt.suptitle(title, fontsize=18)
fig.tight_layout(rect=[0.03, 0.03, .97, 0.9])
plt.savefig('Super-resolution DEM.jpg', dpi=150)
plt.show()
# compare original and target DEM
float((dem2x_hires-dem2x).mean()),float((dem2x_hires-dem2x).std())
###Output
_____no_output_____
###Markdown
Save output
###Code
# north semisphere, usually increasing x,y order
def ds2gtif_north(data, filename):
from osgeo import osr, gdal, ogr
coordz = list(data.coords)[0]
coordl = list(data.coords)[1]
shape = data.shape
pixelz = round(data[coordz].values[1]-data[coordz].values[0],5)
pixell = round(data[coordl].values[1]-data[coordl].values[0],5)
types = ['uint8','uint16','int16','uint32','int32','float32','float64']
gtypes = [gdal.GDT_Byte, gdal.GDT_UInt16, gdal.GDT_Int16, gdal.GDT_UInt32, gdal.GDT_Int32,
gdal.GDT_Float32, gdal.GDT_Float64]
dtype = data.values.dtype
tidx = types.index(dtype)
gtype = gtypes[tidx]
if tidx in [0,1,2,3,4]:
nodata = np.iinfo(dtype).min
else:
nodata = 170141000918780003225695629360656023552.000
driver = gdal.GetDriverByName("GTiff")
dst = driver.Create(filename, shape[1], shape[0], 1, gtype, options = [ 'COMPRESS=LZW' ])
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
if data[coordz].values[0] < data[coordz].values[-1]:
zlim = max(data[coordz].values)+pixelz/2
else:
zlim = min(data[coordz].values)+pixelz/2
dst.SetGeoTransform( [ min(data[coordl].values)-pixell/2, pixell, 0, zlim, 0, -pixelz ] )
if data.epsg is not None:
srs = osr.SpatialReference()
#srs.SetWellKnownGeogCS("WGS84")
srs.ImportFromEPSG(int(data.epsg))
dst.SetProjection( srs.ExportToWkt() )
arr = np.flipud(data.values.copy())
arr[np.isnan(arr)] = nodata
dst.GetRasterBand(1).SetNoDataValue(nodata)
dst.GetRasterBand(1).WriteArray(arr)
dem2x_hires.attrs['epsg']=dem.crs.split(':')[1]
ds2gtif_north(dem2x_hires,'dem2x_hires.tif')
###Output
_____no_output_____
###Markdown
MIT LicenseCopyright (c) 2019 Alexey Pechnikov, https://orcid.org/0000-0001-9626-8615 (ORCID)Build Super-resolution DEM 0.5m from DEM 1m enhanced by one orthophoto image 0.5mSource dataset: Elevation1 - Mashhad, IranElevation1 DSM + Plรฉiades Ortho 0.5m pan-sharpened (Orthoimage included)https://www.intelligence-airbusds.com/en/9317-sample-imagery-detail?product=18896&keyword=&type=366
###Code
from osgeo import gdal
import os
import numpy as np
import xarray as xr
import pandas as pd
from scipy.ndimage.filters import gaussian_filter
from scipy.spatial import cKDTree
import matplotlib.pyplot as plt
%matplotlib inline
# select work area
def crop_area(raster):
return raster.sel(x=slice(730000,730500),y=slice(4012000, 4011500))
def crop_sample(raster):
return raster.sel(x=slice(730000,730100),y=slice(4011700,4011600))
###Output
_____no_output_____
###Markdown
Load DEM
###Code
# DEM image
dem = crop_area(xr.open_rasterio("data/Mashhad-DEM.tif")[0])
del dem.attrs['units']
dem
###Output
_____no_output_____
###Markdown
Load orthophoto image
###Code
# orthophoto image 0.5m for the same area as DEM data above
img = crop_area(xr.open_rasterio("data/7289-40126_Mashhad.tif")[0])
###Output
_____no_output_____
###Markdown
Regrid DEM 1m on the same grid as orthophoto image 0.5mThis is nearest neighbor interpolation without any data quality enhancement
###Code
# define source values
df_dem = dem.to_dataframe(name='dem').dropna().reset_index()
# target grid
df_grid = img.to_dataframe(name='_').reset_index()[['y','x']]
# nearest neighbor interpolation
tree = cKDTree(list(zip(df_dem.x, df_dem.y)))
distance, indices = tree.query(list(zip(df_grid.x, df_grid.y)), k = 1)
values = df_dem.dem.values[indices]
dem2x = xr.DataArray(values.reshape(img.shape),
coords=[img.y,img.x])
###Output
_____no_output_____
###Markdown
Enhance DEM by orthophoto imageAs explained in the article we need to transfer spatial components 0-5mWith pixel size 0.5m the required filter radius is equal to 10 pixels because 10*0.5m = 5m
###Code
# low-pass filter
def raster_filter(src, gamma):
dst = src.copy()
dst.values = gaussian_filter(dst.values.astype(np.float32),gamma,mode='nearest')
return dst
# define spectrum components to transfer
radius = 5/img.res[0]
img_lowpass = raster_filter(img, radius)
img_hipass = img - img_lowpass
dem2x_lowpass = raster_filter(dem2x, radius)
dem2x_hipass = dem2x - dem2x_lowpass
# caclulate approximate scale factor for short wavelenghs
scale = (img_hipass.max()-img_hipass.min())/(dem2x_hipass.max()-dem2x_hipass.min())
scale
# super-resolution dataset
dem2x_hires = dem2x_lowpass + img_hipass/scale
fig, ax = plt.subplots(2,3,figsize=(16,9))
((ax1,ax2,ax3),(ax4,ax5,ax6)) = ax
dem.plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax1)
ax1.set_title('DEM 1m', fontsize=18)
img.plot(vmin=0,vmax=160,cmap='RdBu_r',ax=ax2)
ax2.set_title('Ortho 0.5m [Red band]', fontsize=18)
crop_sample(dem).plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax4)
ax4.set_title('')
crop_sample(img).plot(vmin=0,vmax=160,cmap='RdBu_r',ax=ax5)
ax5.set_title('')
title = """Build Super-resolution DEM 0.5m from
Elevation 1m DSM (actually, ~10m) + Plรฉiades Ortho 0.5m for Mashhad, Iran:
"""
dem2x_hires.plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax3)
ax3.set_title('Input DEM 0.5m', fontsize=18)
crop_sample(dem2x_hires).plot(vmin=1270,vmax=1400,cmap='RdBu_r',ax=ax6)
ax6.set_title('')
for _ax in ax:
for __ax in _ax:
__ax.ticklabel_format(useOffset=False, style='plain')
__ax.set_xlabel('')
__ax.set_ylabel('')
plt.suptitle(title, fontsize=20)
fig.tight_layout(rect=[0.03, 0.03, 0.97, 1])
#plt.savefig('Super-resolution DEM.jpg', dpi=150)
plt.show()
# compare original and target DEM
float((dem2x_hires-dem2x).mean()),float((dem2x_hires-dem2x).std())
###Output
_____no_output_____
|
notebooks/PIPELINE/03.10-clinical_variables_final.ipynb
|
###Markdown
clinical variable retrieval codebase.each querey is referencing a sql querey linked in my github for ALL patients in the database, then generating a dataframe, then paring that dataframe down to only the patients/icustay_id in our cohort. * 5-16-19 heavily streamlined, can now change global variables at top of page which will correspond to all variables. added all code into functions and made a composite function to run each variable. * each variable is also deleted to reduce rolling memory usage last run: * (1) 6/9/19: sensitivity analysis 1day timewindow* (2) 11/9/19: rerun 72day timewindow because 72hr has low patients on
###Code
#7-15-18
#the final version of this notebook seeks to accomplish a few tasks:
#organize and standardize all sql code so that i am running a .sql file rather than typing sql code into jupyter
#change all filepaths to match the github linked directory to ensure better version control
#extract all of the structured clinical variables we need for our project
##1/28/19: updated final version to be more generalizable and adjustable for the datewindow.
##could be cleaned up as some variabls are time-windowed in line and some are time windowed at the end, but all is accounted for and cleaned up and optimized.
#import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
import collections
import asyncio
import getpass
import re
from datetime import datetime as dt
import os,sys,re
#import urllib3
#import prettytable
from collections import Counter
import seaborn as sns
import random
from sklearn.externals.joblib import Memory
memory = Memory(cachedir='/tmp', verbose=0)
#@memory.cache above any def fxn.
%matplotlib inline
plt.style.use('ggplot')
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'width': 1024,
'height': 768,
'scroll': True,
})
%load_ext autotime
from pathlib import Path
os.chdir('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling') #use to change working directory
wd= os.getcwd() #'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling'
#patients of interest from rotation_cohort_generation
most_updated_patient_df= "04042019"
final_pt_df2 = pd.read_csv('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_final_pt_df2.csv'%(most_updated_patient_df), index_col=0)
patients= list(final_pt_df2['subject_id'].unique())
hadm_id= list(final_pt_df2['hadm_id'].unique())
icustay_id= list(final_pt_df2['icustay_id'].unique())
icustay_id= [int(x) for x in icustay_id]
final_pt_df2['final_bin'].value_counts()
###Output
_____no_output_____
###Markdown
Access MIMIC database and convert it to dataframe in Pandas
###Code
#code used to ping the postgres mimic server.
# note, all server information is stored in a config.py file that is present in the .gitignore
import config
conn = psycopg2.connect(dbname=config.dbname, user=config.user, host=config.host, port=config.port,password=config.password)
cur=conn.cursor()
query_schema = 'SET search_path to ' + "mimiciii" + ';'
#input the sql_exe_show object and get dataframe for only patients in patient list out.
def sql_exe_show(sql_sentence):
cur.execute(sql_sentence)
rows = cur.fetchall()
col = []
for i in range(len(cur.description)):
col.append(cur.description[i][0])
table = pd.DataFrame(rows,columns=col)
return table
def sql_to_df_icu(sql_exe_show_obj):
sql_exe_show_df= pd.DataFrame(data=sql_exe_show_obj)
sql_exe_show_df=sql_exe_show_df[sql_exe_show_df['icustay_id'].isin(icustay_id)]
return sql_exe_show_df
def sql_to_df_patients(sql_exe_show_obj):
sql_exe_show_df= pd.DataFrame(data=sql_exe_show_obj)
sql_exe_show_df=sql_exe_show_df[sql_exe_show_df['subject_id'].isin(patients)]
return sql_exe_show_df
def sql_to_df_hadm(sql_exe_show_obj):
sql_exe_show_df= pd.DataFrame(data=sql_exe_show_obj)
sql_exe_show_df=sql_exe_show_df[sql_exe_show_df['hadm_id'].isin(hadm_id)]
return sql_exe_show_df
def clinvar_fxn(var_name, path, subject_id_override=False):
f= open(path, 'r')
var = f.read()
cur.execute('rollback')
cur.execute(var)
if subject_id_override==True:
df= sql_to_df_patients(sql_exe_show('select * from %s;' %(var_name)))
else:
try:
df= sql_to_df_icu(sql_exe_show('select * from %s;' %(var_name)))
except KeyError or NameError:
try:
df= sql_to_df_hadm(sql_exe_show('select * from %s;' %(var_name)))
except KeyError or NameError:
df= sql_to_df_patients(sql_exe_show('select * from %s;' %(var_name)))
print(df.shape)
return(df)
###Output
time: 66.2 ms
###Markdown
extracting clinical data for our patients IMPORTANT, USE THIS TO TUNE TIMEWINDOW OF EXTRACTION AND FOLDER TO SAVE IN* clinical data window= (t0+x)- t0+y * lower_window: x, set this to offset the t_0 for lower bound of the clinical time window * upper_window: y, set this to set the upper bound of the clinical time window.* folder: folder name for data to be stored in* date: date attached in file name of all files associated with this data* time_col: the time column used to restrict data to the clinical data window.* patient_df: the cohort dataframe used, default: final_pt_df2
###Code
#72 hour data
lower_window=0
upper_window=3
folder="72_hr_window"
date='11062019'
time_col="charttime"
time_var= 't_0'
patient_df= final_pt_df2
# #48 hr sensitivity
# lower_window=0
# upper_window=2
# time_var="t_0"
# folder="48_hr_window"
# date='16052019'
# time_col="charttime"
# time_var= 't_0'
# patient_df= final_pt_df2
# ##24 hr sensitivity
# # #importing in all clinical_variable files
# lower_window=0
# upper_window=1
# time_col="charttime"
# time_var="t_0"
# folder="24_hr_window"
# timewindowdays="24"
# date= '09062019'
# patient_df= final_pt_df2
###### PIPELINE BELOW #######
###Output
time: 423 ยตs
###Markdown
do you want to save the files generated?* if yes, save_boolean=True* else save_boolean=False
###Code
save_boolean=True
###Output
time: 483 ยตs
###Markdown
rearranging pipeline so it can * extract vitals first, then will limit patients to those with appropriate vitals going forward. * save and delete after extracting to reduce unnescessary memory load for all variables:* extract variable for only patients in minimum vitals list (will be new final_pt_df)* important functions
###Code
def time_window_filter(df, final_pt_df2,timecol,upper_window, lower_window, time_var='t_0'):
"""
will take in any df and filter to only values between lower_window and upper_window.
will add delta and t_0 to df as well.
"""
#global upper_window, lower_window
try:
df= pd.merge(df, final_pt_df2[['icustay_id',time_var]], left_on= 'icustay_id', right_on = 'icustay_id') #n=240317
df['delta']= pd.to_datetime(df[timecol]) - pd.to_datetime(df[time_var])
df_after_t0= df.loc[df.loc[:,'delta']>= pd.Timedelta(days=lower_window),:]
df_after_t0= df_after_t0.loc[df_after_t0.loc[:,'delta']<= pd.Timedelta(days=upper_window),:]
except KeyError or NameError:
df= pd.merge(df, final_pt_df2[['hadm_id',time_var]], left_on= 'hadm_id', right_on = 'hadm_id') #n=240317
df['delta']= pd.to_datetime(df[timecol]) - pd.to_datetime(df[time_var])
df_after_t0= df.loc[df.loc[:,'delta']>= pd.Timedelta(days=lower_window),:]
df_after_t0= df_after_t0.loc[df_after_t0.loc[:,'delta']<= pd.Timedelta(days=upper_window),:]
return(df_after_t0)
###Output
time: 19.4 ms
###Markdown
Vital Sign
###Code
##-- This query pivots the vital signs for the first 24 hours of a patient's stay
##-- Vital signs include heart rate, blood pressure, respiration rate, and temperature
vitals_all_nosummary_df= clinvar_fxn(
'vitals_all_nosummary',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/vitals_all_nosummary.sql'
)
vitals_all_nosummary_df.head()
###Output
_____no_output_____
###Markdown
filtering to patients with bare minimum vital numbers06.1-QC_and_missingness found that 3% or so of patients don't have baseline vitals counts. this is filtering the patients to only those who have this baseline value.as of 10/12/18, this code has not been implemented in here and is performed after importing. (updated below to be self contained in 1/28/19)
###Code
#using origional criteria to find pts who have atleast 1 spo2 reading within 3 days of t_0
#The idea is that this should be the bare minimum amount of data for a patient, and without it, it's likely the physicians did not suspect an infection in these patients.
##NOTE: this should not change when the clinical timewindow of analysis interest changes.
vitals_filter = time_window_filter(vitals_all_nosummary_df, final_pt_df2, "charttime",time_var='t_0', lower_window=0,upper_window=3 )
vitals_filter= vitals_filter.loc[
vitals_filter['vitalid'].notnull(),:]#.count() #6930 NULL values
icustay_id_vitals = (vitals_filter.loc[
vitals_filter.loc[:,'vitalid']=='SpO2','icustay_id'
].unique())
len(icustay_id) #8731
len(icustay_id_vitals) #8629
subject_id_vitals=list(final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),'subject_id'])
hadm_id_vitals= list(final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),'hadm_id'])
icustay_id_vitals= list(final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),'icustay_id'])
del(vitals_filter)
##saving the patient database and reassigning patient set to the patient set with minimum vitals
final_pt_df2_v=final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),:]
if save_boolean==True:
pd.DataFrame(final_pt_df2_v).to_csv("/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_final_pt_df2_v.csv" %most_updated_patient_df) #final cohort database n=11493 subject_idโs (7/6/18)
final_pt_df2=final_pt_df2_v.copy()
###Output
time: 19.2 s
###Markdown
filtering, subset, and composite functions to be used for the rest of the notebook
###Code
def df_subset(df):
"""
was origionally how I was ensuring all df were on the minimum vitals cohort, but now this may not be needed since i reassigned the final_pt_df2 as the minimum vitals cohort.
"""
try:
df = df.loc[df.loc[:,'icustay_id'].isin(icustay_id_vitals),:]
except KeyError or NameError:
try:
df = df.loc[df.loc[:,'hadm_id'].isin(hadm_id_vitals),:]
except KeyError or NameError:
df = df.loc[df.loc[:,'subject_id'].isin(subject_id_vitals),:]
return(df)
def filter_subset_save(df, savename=None, return_df=False, save=False, time_filter_override=False):
"""
composite function, performs 1: time_window_filter() and 2:df_subset() to the input dataframe. this function links them together for simplifying code needed after each sql and formatting query.
return_df specifies if any output is spit out.
save specifies if the file will be saved with teh savename.
fxn was created on 5/16/19 and validated against the normal pipeline.
"""
global date,folder,final_pt_df2, lower_window, upper_window, time_var, timecol, time_var
#
if time_filter_override==False:
time_filtered= time_window_filter(df, final_pt_df2, timecol=time_col ,time_var=time_var, lower_window=lower_window, upper_window=upper_window)
else:
time_filtered=df
time_and_subseted= df_subset(time_filtered)
if save==True:
os.chdir('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling')
if folder != None:
address=os.getcwd()+'/data/raw/csv/%s/'%(folder)
else:
address = os.getcwd()+'/data/raw/csv/'
if not os.path.exists(address):
print(address)
os.makedirs(address)
pd.DataFrame(time_and_subseted).to_csv(address+'/%s_%s.csv' %(date, savename))
else: pass
if return_df==False:
del(df, time_filtered, time_and_subseted)
else:
return(time_and_subseted)
filter_subset_save(vitals_all_nosummary_df, savename="vitals_all_nosummary", save=save_boolean, return_df=False)
del(vitals_all_nosummary_df)
###Output
time: 3min 10s
###Markdown
testing elixhauser comobridities
###Code
elixhauser_nosummary_df= clinvar_fxn(
'elixhauser_quan',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/elixhauser_quan.sql',
subject_id_override=True
)
###Output
(21864, 33)
time: 1min 15s
###Markdown
now next task: redo same as above, BUT EXCLUDE CURRENT ROW FROM CUMMAX()
###Code
def elix(shift=False):
elix_var=list(elixhauser_nosummary_df)[3:]
elixhauser_nosummary_df2=elixhauser_nosummary_df.copy()
elixhauser_nosummary_df2[elix_var]=(elixhauser_nosummary_df
.sort_values('stay_num', ascending=True) #sorts values so stay_num is ascending
.groupby('subject_id', as_index=False)[elix_var] #groups by subject id and filters only elixhauser variable columns
.agg('cummax') #takes a cummulitive max for every row
)
if shift==True:
#now shifting the values up by 1 so the cumulitive max doesn't consider the current values: (note: couldn't get this to work in the fxn above)
elixhauser_nosummary_df2[elix_var]=(elixhauser_nosummary_df
.sort_values('stay_num', ascending=True) #sorts values so stay_num is ascending
.groupby('subject_id', as_index=False)[elix_var] #groups by subject id and filters only elixhauser variable columns
.shift(fill_value=np.nan)[:-1]) #shifts the cummulitive max up by 1 so the first row is na.
#restricting to hadm in use
elixhauser_nosummary_df3= elixhauser_nosummary_df2[elixhauser_nosummary_df2['hadm_id'].isin(hadm_id)]
#adding icustay_id
elixhauser_nosummary_df3=pd.merge(elixhauser_nosummary_df3, final_pt_df2[['hadm_id','icustay_id']], how="left", left_on='hadm_id',right_on='hadm_id')
return(elixhauser_nosummary_df3)
elixhauser_df=elix(shift=False)
cancer_elix=elixhauser_df[['subject_id','hadm_id','icustay_id']].copy()
cancer_elix['value']=elixhauser_df.loc[:,['lymphoma',"solid_tumor","metastatic_cancer"]].max(axis=1)
#adding columns
cancer_elix['label']= 'cancer_elix'
cancer_elix['delta']= 0
cancer_elix['delta']= pd.to_timedelta(cancer_elix['delta'], unit='d')
cancer_elix['uom']= 'pos/neg category'
filter_subset_save(cancer_elix, savename="cancer_elix", save=save_boolean, return_df=False,time_filter_override=True) #filtering to ppl with sufficient vitals
del(elixhauser_df,cancer_elix)
elixhauser_df=elix(shift=True)
elix_var=list(elixhauser_df)[3:-1]
sum_elix=elixhauser_df[['subject_id','hadm_id','icustay_id']].copy()
sum_elix['value']=elixhauser_df.loc[:,elix_var].sum(axis=1)
#adding columns
sum_elix['label']= 'sum_elix'
sum_elix['delta']= 0
sum_elix['delta']= pd.to_timedelta(sum_elix['delta'], unit='d')
sum_elix['uom']= 'elixhauser_comorb_sum'
filter_subset_save(sum_elix, savename="sum_elix", save=save_boolean, return_df=False,time_filter_override=True) #filtering to ppl with sufficient vitals
del(elixhauser_df,sum_elix)
###Output
time: 1.1 s
###Markdown
elix qc
###Code
#elixhauser_nosummary_df2.loc[elixhauser_nosummary_df2['subject_id']==9973.0,['subject_id','stay_num','congestive_heart_failure']]
#elixhauser_nosummary_df2.sort_values(['subject_id','stay_num'], ascending=True).head(5)
# #restricting to hadm in use
# elixhauser_nosummary_df3= elixhauser_nosummary_df2[elixhauser_nosummary_df2['hadm_id'].isin(hadm_id)]
# elix_table=round(elixhauser_nosummary_df3['stay_num'].value_counts()/len(elixhauser_nosummary_df3) *100, 2)
# elix_table[elix_table>1]
###Output
time: 1.76 ms
###Markdown
demographics
###Code
#gender & race
pt_info_sql = query_schema + """
SELECT SUBJECT_ID, INSURANCE, LANGUAGE, RELIGION, MARITAL_STATUS, ETHNICITY
from mimiciii.admissions
ORDER BY subject_id DESC
"""
pt_info_df=pd.read_sql_query(pt_info_sql,conn) #361711 patients with sterile culture -> 374643 with addn of bal and broncho... 7/16/18
def demographics():
"""
wrapping demographics code into a fxn. basically combines ethinicity, age, gender and race into one df.
note: age is read from a csv, and i need to look back at where the csv comes from (i believe it's from cohort selection in pipeline).
"""
global final_pt_df2
pt_info_sql = query_schema + """
SELECT SUBJECT_ID, INSURANCE, LANGUAGE, RELIGION, MARITAL_STATUS, ETHNICITY
from mimiciii.admissions
ORDER BY subject_id DESC
"""
pt_info_df=pd.read_sql_query(pt_info_sql,conn) #361711 patients with sterile culture -> 374643 with addn of bal and broncho... 7/16/18
ethnicity_df=(pt_info_df.loc[
pt_info_df.loc[:,"subject_id"].isin(
final_pt_df2['subject_id'].tolist()),:]).drop_duplicates(['subject_id','ethnicity'])
ethnicity_df= ethnicity_df[['subject_id','ethnicity']].sort_values('ethnicity', ascending=False).groupby('subject_id', as_index=False).first()
#gender
pt_info_sql = query_schema + """
SELECT SUBJECT_ID, GENDER
from mimiciii.patients
ORDER BY subject_id DESC
"""
#admissions
#
pt_info_df=pd.read_sql_query(pt_info_sql,conn) #361711 patients with sterile culture -> 374643 with addn of bal and broncho... 7/16/18
#combining gender, race
pt_info_df=(pt_info_df.loc[
pt_info_df.loc[:,"subject_id"].isin(
final_pt_df2['subject_id'].tolist()),:]).drop_duplicates(['subject_id','gender'])
pt_info_df= pd.merge(ethnicity_df, pt_info_df)
#age- read from csv.
age_df= pd.read_csv(Path(wd+'/data/processed/22112018_pt_age.csv'))
age_df=(age_df.loc[
age_df.loc[:,"hadm_id"].isin(
hadm_id),:])
#combining age, gender and race.
age_df=pd.merge(age_df, final_pt_df2[['hadm_id','icustay_id','t_0']])
age_df=pd.merge(age_df[['subject_id','icustay_id','first_admit_age','t_0']],pt_info_df)
age_df[age_df['first_admit_age']>89]['first_admit_age']=90
age_df=pd.melt(age_df, id_vars=['icustay_id','subject_id','t_0'])
age_df=age_df.rename(index=str, columns={'variable':'label'})
age_df['delta']=pd.to_timedelta('0 days')
age_df['uom']="N/A"
age_df.loc[age_df.loc[:,'label']=='first_admit_age','uom']='years'
age_df= age_df.loc[age_df.loc[:,"icustay_id"].isin(icustay_id),:]
###using regular expressions to reduce the # of ethinicities
age_df.loc[(age_df.loc[:,"label"]=='ethnicity') & (age_df.loc[:,"value"].str.contains(r'.*?BLACK')),'value']="black"
age_df.loc[(age_df.loc[:,"label"]=='ethnicity') & (age_df.loc[:,"value"].str.contains(r'.*?HISPANIC|PORTUGUESE')),'value']="hispanic"
age_df.loc[(age_df.loc[:,"label"]=='ethnicity') & (age_df.loc[:,"value"].str.contains(r'.*?WHITE')),'value']="white/nonhispanic"
age_df.loc[(age_df.loc[:,"label"]=='ethnicity') & (age_df.loc[:,"value"].str.contains(r'.*?ASIAN')),'value']='asian'
age_df.loc[(age_df.loc[:,"label"]=='ethnicity') & (age_df.loc[:,"value"].str.contains(r'(UNKNOWN|MULTI|UNABLE|DECLINE|OTHER)')),'value']='unknown/other'
age_df.loc[(age_df.loc[:,"label"]=='ethnicity') & (age_df.loc[:,"value"].str.contains(r'[AZ]+')),'value']="unknown/other" #lumping all other low n values into other
#age_df.loc[age_df.loc[:,"label"]=='ethnicity','value'].value_counts()
return(age_df)
age_df= demographics()
filter_subset_save(age_df, savename="pt_info", save=save_boolean, return_df=False, time_filter_override=True)
del(age_df)
###Output
time: 810 ms
###Markdown
Echodata in Noteevents- sense removed
###Code
# echodata_df= clinvar_fxn(
# 'echodata',
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/echodata.sql'
# )
###Output
_____no_output_____
###Markdown
Weight
###Code
# -- This query extracts weights for adult ICU patients on their first ICU day.
# -- It does *not* use any information after the first ICU day, as weight is
# -- sometimes used to monitor fluid balance.
weightfirstday_df= clinvar_fxn(
'weightfirstday',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/weightfirstday.sql'
)
weightfirstday_df['uom']='kg'
filter_subset_save(weightfirstday_df, savename="weightfirstday", save=save_boolean, return_df=False, time_filter_override=True)
del(weightfirstday_df)
###Output
time: 105 ms
###Markdown
Height
###Code
# -- This query extracts heights for adult ICU patients.
# -- It uses all information from the patient's first ICU day.
# -- This is done for consistency with other queries - it's not necessarily needed.
# -- Height is unlikely to change throughout a patient's stay.
heightfirstday_df= clinvar_fxn(
'heightfirstday',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/heightfirstday.sql'
)
heightfirstday_df['uom']='cm'
filter_subset_save(heightfirstday_df, savename="heightfirstday", save=save_boolean, return_df=False, time_filter_override=True)
del(heightfirstday_df)
###Output
time: 71.4 ms
###Markdown
Labs
###Code
# -- This query pivots lab values for all patients, then filtered to those in my cohort.
# -- Have already confirmed that the unit of measurement is always the same: null or the correct unit
labs_all_nosummary_df= clinvar_fxn(
'labs_all_nosummary',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/labs_all_nosummary.sql'
)
#importing unit of mesurements:
def uom_sql_import(file_path):
if isinstance(file_path, str)== True:
f = open(Path(file_path), 'r')
else:
f = open(Path(str(file_path)), 'r')
SQL = open(file_path,'r').read()
SQL_df= pd.read_sql_query(SQL,conn)
return(SQL_df)
lab_uom= uom_sql_import(Path(wd+'/src/clinical_var_sql/unit_of_mesurement/labs_uom.sql'))
labs_all_nosummary_df = pd.merge(labs_all_nosummary_df, lab_uom, left_on='label', right_on='label')
labs_all_nosummary_df[labs_all_nosummary_df['label']=='LYMPHO%']#.value_counts() #4-15-19: what is this, is this exploring absolute lymphocyte %?
#labs_all_nosummary_df['label'].value_counts()
labs_all_nosummary_df[labs_all_nosummary_df['label']=='WBC']['valuenum'].describe()
#labs_all_nosummary_df[labs_all_nosummary_df['label']=='PLATELET']['valuenum'].describe()
filter_subset_save(labs_all_nosummary_df, savename="labs_all_nosummary", save=save_boolean, return_df=False, time_filter_override=False)
del(labs_all_nosummary_df)
del(lab_uom)
###Output
time: 25.5 s
###Markdown
Glasgow Coma Scale
###Code
##depreciated 08/28/18
# gcsall_df= clinvar_fxn(
# 'gcsall',
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/gcsall.sql'
# )
#modified on 8/28/18 to have the days annotation.
##--8/28/18: added in epoch as days, in order to help determine btwn t_0 and 72 hour for pts.
gcsall_days_df= clinvar_fxn(
'gcsall_days',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/gcsall_days.sql'
)
#adding in icu_admit time and filtereing time_var to time window.
def gcs_72(gcsall_days_df,final_pt_df2, time_var='t_0', lower_window=0, upper_window=3):
"""
written a while back, aims to adding in icu_admit time and filtereing time_var to time window. will use this with time_filter_override=True in my filter_subset_save()
"""
##merging gcsall_days_df with final_pt df in order to append on icustay_id, the time var, and ICU_admit
gcsall_days_df_merge= pd.merge(
gcsall_days_df,
final_pt_df2[['icustay_id','ICU_admit',time_var]],
left_on='icustay_id',
right_on='icustay_id')
gcsall_days_df_merge['day'] = gcsall_days_df_merge['day']-1 #putting the epoch days so that 0 = the first day
#approximating the charttime of the time associated with each gcs score
gcsall_days_df_merge['approx_charttime']=pd.to_timedelta((gcsall_days_df_merge['day'])*24, unit='h') + pd.to_datetime(gcsall_days_df_merge['ICU_admit'])
# day # + ICU_admission day.
gcsall_days_df_merge['admit_plus_day']= (
pd.to_datetime(gcsall_days_df_merge['ICU_admit'])
+ pd.to_timedelta(gcsall_days_df_merge['day'], unit='D')
)
#difference between the admission+epoch day - time_var.
gcsall_days_df_merge['delta']= (
pd.to_datetime(gcsall_days_df_merge['admit_plus_day']) - pd.to_datetime(gcsall_days_df_merge[time_var])
)
#filtering day windows
gcsall_days_df_merge_72= (
gcsall_days_df_merge.loc[gcsall_days_df_merge.loc[:,'delta']>= pd.Timedelta(days=lower_window),:])
gcsall_days_df_merge_72= (
gcsall_days_df_merge_72.loc[gcsall_days_df_merge_72.loc[:,'delta']<= pd.Timedelta(days=upper_window),:])
return(gcsall_days_df_merge_72)
gcs72_df = gcs_72(gcsall_days_df,final_pt_df2, time_var=time_var, lower_window=lower_window,upper_window=upper_window )
gcs72_df['uom']='GCS_score' #adding in uom
filter_subset_save(gcs72_df, savename="gcs", save=save_boolean, return_df=False, time_filter_override=True)
del(gcs72_df)
###Output
time: 199 ms
###Markdown
Renal replacement therapy the sql code for this was not equipped to join all of the charttimes together. so i decided to do it in python below.the rrt_all_df code above was only a 1 or 0 if patient had RRT during their entire icu stay. - step 1: run all sql codes- 2: filter on only the t_0 to t_72 hour rows- 3: filter on the 1223 patients who have a positive value- 4: get the earliest incidence of rrt for each 1223 patients.
###Code
def rrt_runmerge():
"""
wrapping a lot of scripting into a function. grabs the 5 different rrt datas, filters them to timewindow, and merges them into 1 dataframe.
"""
global date,folder, patient_df,lower_window, upper_window, time_var, time_var, time_col
###5 sql queries to grab raw data
#mv_ce
f = open('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/rtt_mv_ce.sql', 'r')
rrtSQL_mv_ce = f.read()
rrtSQL_mv_ce_sql = query_schema + rrtSQL_mv_ce.format(tuple(patients))
rrtSQL_mv_ce_df=pd.read_sql_query(rrtSQL_mv_ce_sql,conn)
#cv
f = open('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/rtt_cv.sql', 'r')
rrtSQL_cv = f.read()
rrtSQL_cv_sql = query_schema + rrtSQL_cv.format(tuple(patients))
rrtSQL_cv_df=pd.read_sql_query(rrtSQL_cv_sql,conn)
#mv_ie
f = open('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/rtt_mv_ie.sql', 'r')
rrtSQL_mv_ie = f.read()
rrtSQL_mv_ie_sql = query_schema + rrtSQL_mv_ie.format(tuple(patients))
rrtSQL_mv_ie_df=pd.read_sql_query(rrtSQL_mv_ie_sql,conn)
rrtSQL_mv_ie_df['charttime']= rrtSQL_mv_ie_df['starttime']
rrtSQL_mv_ie_df=rrtSQL_mv_ie_df.drop('starttime', axis=1)
#mv_de
f = open('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/rtt_mv_de.sql', 'r')
rrtSQL_mv_de = f.read()
rrtSQL_mv_de_sql = query_schema + rrtSQL_mv_de.format(tuple(patients))
rrtSQL_mv_de_df=pd.read_sql_query(rrtSQL_mv_de_sql,conn)
#mv_pe
f = open('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/rtt_mv_pe.sql', 'r')
rrtSQL_mv_pe = f.read()
rrtSQL_mv_pe_sql = query_schema + rrtSQL_mv_pe.format(tuple(patients))
rrtSQL_mv_pe_df=pd.read_sql_query(rrtSQL_mv_pe_sql,conn)
rrtSQL_mv_pe_df['charttime']= rrtSQL_mv_pe_df['starttime']
rrtSQL_mv_pe_df=rrtSQL_mv_pe_df.drop('starttime', axis=1)
### timewindow filtering
def hour_72_window_rrt(df, final_pt_df2,timecol='charttime',time_var='t_0', lower_window=0, upper_window=3 ):
##modified to make more generalizable to easily accomidate PA cohort but default to my origional cohort.
##filters rrt to within timewindow between timecol- time_var
df= pd.merge(final_pt_df2[['icustay_id',time_var]], df, left_on= 'icustay_id', right_on = 'icustay_id', how='left') #n=240317
df['delta']= pd.to_datetime(df[timecol]) - pd.to_datetime(df[time_var])
df_after_t0= df.loc[df.loc[:,'delta']>= pd.Timedelta(days=lower_window),:]
df_after_t0= df_after_t0.loc[df_after_t0.loc[:,'delta']<= pd.Timedelta(days=upper_window),:]
#df_after_t0= df_after_t0.loc[df_after_t0.loc[:,'rrt']==1,:].groupby('icustay_id')['charttime'].min()
return(pd.DataFrame(df_after_t0))#.reset_index())
rrtSQL_mv_ce_pt =hour_72_window_rrt(rrtSQL_mv_ce_df, patient_df, timecol=time_col,time_var=time_var, lower_window=lower_window,upper_window=upper_window)
rrtSQL_cv_pt =hour_72_window_rrt(rrtSQL_cv_df, patient_df, timecol=time_col,time_var=time_var, lower_window=lower_window,upper_window=upper_window)
rrtSQL_mv_ie_pt =hour_72_window_rrt(rrtSQL_mv_ie_df, patient_df, timecol=time_col,time_var=time_var, lower_window=lower_window,upper_window=upper_window)
rrtSQL_mv_de_pt =hour_72_window_rrt(rrtSQL_mv_de_df, patient_df, timecol=time_col,time_var=time_var, lower_window=lower_window,upper_window=upper_window)
rrtSQL_mv_pe_pt =hour_72_window_rrt(rrtSQL_mv_pe_df, patient_df, timecol=time_col,time_var=time_var, lower_window=lower_window,upper_window=upper_window)
### merging all 5 filtered rrt_df together
def rrt_merging(rrtSQL_mv_ce_pt, rrtSQL_cv_pt, rrtSQL_mv_ie_pt, rrtSQL_mv_de_pt, rrtSQL_mv_pe_pt, timecol='charttime',time_var='t_0'):
###returns an aggregate y/n of if patient had positive rrt within timewindow.
rrt_merged_pt= pd.concat([rrtSQL_mv_ce_pt, rrtSQL_cv_pt, rrtSQL_mv_ie_pt, rrtSQL_mv_de_pt, rrtSQL_mv_pe_pt])
#making a 1 if has positive rrt within timewindow:
rrt_merged_pt= pd.DataFrame(rrt_merged_pt.loc[rrt_merged_pt.loc[:,'rrt']==1,:].groupby('icustay_id')[timecol].min().reset_index())
rrt_merged_pt['rrt']=1
rrt_merged_allpt_df= pd.merge(final_pt_df2[['icustay_id',time_var]], rrt_merged_pt, left_on= 'icustay_id', right_on = 'icustay_id', how='left') #n=240317
rrt_merged_allpt_df=rrt_merged_allpt_df.rename(index=str, columns={timecol:"first_charttime"})
rrt_merged_allpt_df['uom']='category' #adding a uom category
rrt_merged_allpt_df.loc[rrt_merged_allpt_df.loc[:,'rrt'].isnull(),'rrt']='0'
return(rrt_merged_allpt_df)
rrt_merged_allpt_df= rrt_merging(rrtSQL_mv_ce_pt, rrtSQL_cv_pt, rrtSQL_mv_ie_pt, rrtSQL_mv_de_pt, rrtSQL_mv_pe_pt, timecol=time_col,time_var=time_var)
return(rrt_merged_allpt_df)
rrt_merged_allpt_df= rrt_runmerge()
filter_subset_save(rrt_merged_allpt_df, savename="rrt_merged", save=save_boolean, return_df=False, time_filter_override=True)
del(rrt_merged_allpt_df)
###Output
time: 20.4 s
###Markdown
Urine Output
###Code
##depreciated
# urine_output_all_df= clinvar_fxn(
# 'urine_output_all',
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/urine_output_all.sql'
# )
###Output
time: 987 ยตs
###Markdown
UTI related variables
###Code
uti_all_df= clinvar_fxn(
'uti_all',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/uti_all.sql'
)
filter_subset_save(uti_all_df, savename="uti_all", save=save_boolean, return_df=False, time_filter_override=False)
del(uti_all_df)
###Output
time: 7.08 s
###Markdown
Blood Gas Test
###Code
def PaO2(bg_all_nosummary_df):
"""
overview: replaces the PO2 label with PaO2 on all instances (defined as sharing icustay_id and charttime being equal)
where the specimen label == 'ART'
input: bloodgas dataframe with values annotated.
output: bloodgas dataframe with values annotated where PO2 label is replaced with PaO2 according to above criteria
"""
#making a unique varaible to search for and mark all rows where time and icustay_id has an art flag
bg_all_nosummary_df['unique_var']= bg_all_nosummary_df['icustay_id'].map(str) + bg_all_nosummary_df['charttime'].map(str)
#making subset dataframe for label == SPECIMEN
bg_all_nosummary_specimen= bg_all_nosummary_df.loc[bg_all_nosummary_df.loc[:,'label']=='SPECIMEN',:]
#all ART related rows: unique_var for all rows where label== SPECIMEN
bg_all_nosummary_ART = bg_all_nosummary_specimen[bg_all_nosummary_specimen['value']=='ART']
bg_all_nosummary_ART_list= list(bg_all_nosummary_ART['unique_var'].unique())
#two criteria needed to change the PO2 to PaO2 label.
criteria1=(bg_all_nosummary_df['label'] == 'PO2')
criteria2=(bg_all_nosummary_df['unique_var'].isin(bg_all_nosummary_ART_list))
#making changes
bg_all_nosummary_df.loc[(criteria2 & criteria1),'label']= 'PaO2'
return(bg_all_nosummary_df)
bg_all_nosummary_df= clinvar_fxn(
'bg_all_nosummary',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/removed_aggregations/bg_all_nosummary.sql'
)
bg_all_nosummary_df = PaO2(bg_all_nosummary_df)
filter_subset_save(bg_all_nosummary_df, savename="bg_all_nosummary", save=save_boolean, return_df=False, time_filter_override=False)
del(bg_all_nosummary_df)
#bg_all_nosummary_df.head()
###Output
time: 510 ยตs
###Markdown
Vaso_active therapies
###Code
# 10/12/18 added amountuom as amount_uom, rateuom as rate_uom to many lines of the sql code.
weightdurations_df= clinvar_fxn(
'weightdurations',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/vasoactive_meds/weightdurations.sql' ##added to vasoactive_meds due to dependency of SQL code
)
#
epi_dose_df= clinvar_fxn(
'epinephrine_dose',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/vasoactive_meds/epinephrine_dose.sql'
)
#
norepi_dose_df= clinvar_fxn(
'norepinephrine_dose',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/vasoactive_meds/norepinephrine_dose.sql'
)
#
dopamine_dose_df= clinvar_fxn(
'dopamine_dose',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/vasoactive_meds/dopamine_dose.sql'
)
#
dobutamine_dose_df= clinvar_fxn(
'dobutamine_dose',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/vasoactive_meds/dobutamine_dose.sql'
)
#
vasopressin_dose_df= clinvar_fxn(
'vasopressin_dose',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/vasoactive_meds/vasopressin_dose.sql'
)
#removing units/hour, as these are not appropriate
vasopressin_dose_df= vasopressin_dose_df.loc[~vasopressin_dose_df.loc[:,'rate_uom'].isin(['Uhr','units/hour']),:]
#
phenylephrine_dose_df= clinvar_fxn(
'phenylephrine_dose',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/vasoactive_meds/phenylephrine_dose.sql'
)
# weightdurations_df= clinvar_fxn(
# 'weightdurations',
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/weightdurations.sql'
# )
#adding an identification label column and merging them into 1 df.
epi_dose_df['label']='epinephrine'
norepi_dose_df['label']='norepinephrine'
dopamine_dose_df['label']='dopamine'
dobutamine_dose_df['label']='dobutamine'
vasopressin_dose_df['label']='vasopressin'
phenylephrine_dose_df['label']='phenylephrine'
vaso_dose_df = pd.concat([epi_dose_df, norepi_dose_df, dopamine_dose_df, dobutamine_dose_df, vasopressin_dose_df,phenylephrine_dose_df ])
#rename starttime to charttime
vaso_dose_df.rename(index=str, columns={'starttime':"charttime"}, inplace=True)
filter_subset_save(vaso_dose_df, savename="vaso_dose", save=save_boolean, return_df=False, time_filter_override=False)
del(vaso_dose_df)
del(epi_dose_df, norepi_dose_df, dopamine_dose_df, dobutamine_dose_df, vasopressin_dose_df,phenylephrine_dose_df)
###Output
time: 1.47 s
###Markdown
ventilator settings and categorization
###Code
#ventsettings_df = pd.read_csv('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/csv/15082018_ventsettings_df.csv', index_col=0)
ventsettings_df= clinvar_fxn(
'ventsettings',
'/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/src/clinical_var_sql/all/ventsettings.sql'
)
#going from wide format to long:
#pd.melt(ventsettings_df, id_vars=['icustay_id','charttime'])
# filter_subset_save(ventsettings_df, savename="ventsettings", save=save_boolean, return_df=False, time_filter_override=False)
# del(ventsettings_df)
def vent_data(vent_df,time_var='t_0', lower_window=0, upper_window=3 ):
df= pd.merge(vent_df,
final_pt_df2[['icustay_id',time_var]],
left_on='icustay_id',
right_on= 'icustay_id',
how='left')
df['delta']= pd.to_datetime(df['charttime']) - pd.to_datetime(df[time_var])
df_timewindow= df.loc[df.loc[:,'delta']>= pd.Timedelta(days=lower_window),:]
df_timewindow= df_timewindow.loc[df_timewindow.loc[:,'delta']<= pd.Timedelta(days=upper_window),:]
df_timewindow['day']= df_timewindow['delta'].apply(lambda x: pd.to_timedelta(x,unit='d').days) #day #
return(df_timewindow)
#df_timewindow =vent_data(ventsettings_df,time_var='first_pos_else_neg_ssc', lower_window=-1, upper_window=1 )
def vent_day_categorizer(vent_df,time_var='t_0', lower_window=0, upper_window=3 ):
df_timewindow =vent_data(vent_df,time_var=time_var, lower_window=lower_window, upper_window=upper_window)
df_timewindow_perday=df_timewindow.groupby(['icustay_id','day'], as_index=False)[['mechvent','oxygentherapy']].agg({'mechvent':'max', 'oxygentherapy':'max'})
conditions= [
(df_timewindow_perday['mechvent']==1),
((df_timewindow_perday['oxygentherapy']==1) & (df_timewindow_perday['mechvent']==0)),
(df_timewindow_perday['oxygentherapy']==0 & (df_timewindow_perday['mechvent']==0))]
choices=['Mech', 'Oxygen', 'None']
#
df_timewindow_perday['value']= np.select(conditions, choices, default='error:no_value_filled')
df_timewindow_perday['value']
df_timewindow_perday=df_timewindow_perday.reset_index()
df_timewindow_perday['uom']= 'mech/O2/none category'
df_timewindow_perday= df_timewindow_perday.drop(['mechvent','oxygentherapy','index'], axis=1)
df_timewindow_perday=pd.merge(df_timewindow_perday, final_pt_df2[['icustay_id',time_var]] )
return(df_timewindow_perday)
#ventcategory_df = vent_categorization(final_pt_df2, ventsettings_df, time_var='first_pos_else_neg_ssc' )
ventcategory_df= vent_day_categorizer(ventsettings_df,time_var=time_var, lower_window=lower_window, upper_window=upper_window)
#ventcount_df = vent_count(final_pt_df2,ventsettings_df, time_var='first_pos_else_neg_ssc')
filter_subset_save(ventcategory_df, savename="ventcategory", save=save_boolean, return_df=False, time_filter_override=True)
#del(ventcategory_df)
ventcategory_df.head()
###Output
_____no_output_____
###Markdown
daily SOFA score running yiheng's sql codes to capture daily sofa_score. ideally i coulda just used my data above but she had this written already so i'll use this.link to her github: https://github.com/yihengpan/fluid_management/tree/master/sofa requirements for sofa_pansofa <- scorecalc <- scorecomp <- vaso_cv <-wt <-echo2 <- vaso_mv <- pafi2 <-bloodgas_pan_aterial <- bloodgas_pan <-ventelurations <- vitals_pan <- labs_pan <- uo_pan <- gcs_pan
###Code
sofa_path= '/Users/geickelb1/Documents/GitHub/fluid_management/sofa'
var='wt'
wt_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='echo2'
echo2_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='bloodgas_pan'
bloodgas_pan_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='bloodgas_pan_arterial'
bloodgas_pan_art_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
#vaso_mv, vaso_cv,pafi2, vitals_pan, labs_pan, uo_pan, gcs_pan
sofa_path= '/Users/geickelb1/Documents/GitHub/fluid_management/sofa'
var='vaso_mv'
vaso_mv_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='vaso_mv'
vaso_mv_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='vaso_cv'
vaso_cv_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='pafi1'
pafi1_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='pafi2'
pafi2_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='labs_pan'
labs_pan_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='uo_pan'
uo_pan_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='gcs_pan'
gcs_pan_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
###
var='scorecomp'
scorecomp_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='scorecalc'
scorecalc_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
var='sofa_pan'
sofa_pan_df= clinvar_fxn(
var,
Path(sofa_path+'/%s.sql' %(var))
)
#deleting these to clear up memory
del(vaso_cv_df, vaso_mv_df, labs_pan_df, gcs_pan_df, scorecalc_df, scorecomp_df, uo_pan_df, pafi1_df, pafi2_df, bloodgas_pan_art_df, echo2_df, wt_df)
sofa_pan_df['hadm_id'].nunique() #8707
final_pt_df2['hadm_id'].nunique() #8731
#adding in t_0 & icuadmit date
def sofa_day_window_filter(sofa_pan_df, time_var= 't_0', lower_window= 0, upper_window=3): #'t_0'):
import datetime
'''
#Yihangpan wrote a sql script and materialized view "sofa_pan" which gives the sofa score for each day in icu for each patient.
#since the sofa_pan has days after admission but not chartdates, I need to use day # to find the associated t_0 to t_0+72 for each patient.
# the challenge was that I had to relate day# in sofa_pan to my t_0 date. the day # was based on the days after icu admission, where day1 = the first day (0 to 24 hours post admission). this was changed so day 0= 0 to 24 hours.
#To do this, I added day# (where day 0 is the first day) to icu admission date.
#I then filtered on only the rows where this icuadmin + day# was between t_0 and t_0 + 72 hours.
#since t_0 has only day resolution, and for that I ignored hours and only took the date (rounded down all hours/minutes/seconds). this is similar to how i made the t_0 date.
#the problem this creates is that it widens the potential time window, so it theoretically can contain up to 95.99 hours, since hours on day 1 were collapsed to 0.
input:
sofa_pan_df: daily sofa scores captured from sofa_pan_sql.
optional:
time_var: the time variable we want to base the window off of
window_bottom= 0, time_var- window_bottom (days + time_var) = first daily sofa score to capture
window_top= 0, time_var- window_top (days + time_var) = last daily sofa score to capture
output: sofa_pan_sql annotated with days and filtered to time window set by window_bottom and window_top.
'''
#reducing to minimum vital patients
sofa_pan_df=sofa_pan_df.loc[sofa_pan_df.loc[:,"icustay_id"].isin(icustay_id_vitals),:]
##merging sofa_pan with final_pt df in order to append on icustay_id, the time var, and ICU_admit
sofa_df_merged= pd.merge(sofa_pan_df,
final_pt_df2[['icustay_id',time_var,'ICU_admit']],
left_on= 'icustay_id',
right_on = 'icustay_id',
how='left') #n=240317
#sofa_df_merged['admit_t0_rounded'] = pd.to_datetime(sofa_df_merged['ICU_admit']).dt.round('1440min')
sofa_df_merged['day'] = sofa_df_merged['day']-1 #putting the epoch days so that 0 = the first day
#approximating the charttime of the time associated with each sofa score. adding on days to icuadmit.
sofa_df_merged['approx_charttime']=pd.to_timedelta((sofa_df_merged['day'])*24, unit='h') + pd.to_datetime(sofa_df_merged['ICU_admit'])
#rounding down the charttime to the day, so hours and minutes are ignored (just like t_0)
sofa_df_merged['floor_charttime'] = sofa_df_merged['approx_charttime'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day, 24*(dt.hour//24)))
sofa_df_merged['floor_time_var'] = pd.to_datetime(sofa_df_merged[time_var]).apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day, 24*(dt.hour//24)))
sofa_df_72= sofa_df_merged.loc[
(sofa_df_merged['floor_charttime'].between(
(pd.to_datetime(sofa_df_merged['floor_time_var'])+ pd.to_timedelta(lower_window, unit='d')),
(pd.to_datetime(sofa_df_merged['floor_time_var'])+ pd.to_timedelta(upper_window, unit='d')+ pd.to_timedelta(1, unit='h')) #added 1hr timebuffer incase between is set as less than greater than
)),:]
return(sofa_df_72.drop(['floor_time_var','floor_charttime'], axis=1))
sofa_df_72= sofa_day_window_filter(sofa_pan_df, time_var= time_var, lower_window= lower_window, upper_window=upper_window)
sofa_df_72= sofa_day_window_filter(sofa_pan_df, time_var= time_var, lower_window= lower_window, upper_window=upper_window)
filter_subset_save(sofa_df_72, savename="sofa", save=save_boolean, return_df=False, time_filter_override=True)
del(sofa_df_72)
###Output
time: 1.3 s
###Markdown
saving as csv everything below is Depreciated as of 5/16/19: haven't need to save the data pretimewindow filtering for a year. this isn't being used anywhere and is just wasting space.
###Code
# #####NEED TO CLEAN UP PATHWAY CODING
# pd.DataFrame(vaso_dose_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_vaso_dose_df.csv' %(date))
# pd.DataFrame(ventsettings_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_ventsettings_df.csv' %(date))
# # pd.DataFrame(ventcount_df).to_csv(
# # '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_ventcount_df.csv' %(date)) #not useful
# # pd.DataFrame(echodata_df).to_csv(
# # '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_echodata_df.csv' %(date))
# pd.DataFrame(weightfirstday_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_weightfirstday_df.csv' %(date))
# pd.DataFrame(heightfirstday_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_heightfirstday_df.csv' %(date))
# pd.DataFrame(labs_all_nosummary_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_labs_all_nosummary_df.csv' %(date))
# pd.DataFrame(vitals_all_nosummary_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_vitals_all_nosummary_df.csv' %(date))
# # pd.DataFrame(gcsall_df).to_csv(
# # '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_gcsall_df.csv' %(date))
# pd.DataFrame(urine_output_all_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_urine_output_all_df.csv' %(date))
# pd.DataFrame(uti_all_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_uti_all_df.csv' %(date))
# pd.DataFrame(bg_all_nosummary_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_bg_all_nosummary_df.csv' %(date))
# #timewindowed
# pd.DataFrame(ventcategory_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_ventcategory_df.csv' %(date))
# pd.DataFrame(rrt_merged_allpt_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_rrt_all_df.csv' %(date)) #timewindowed
# pd.DataFrame(gcs72_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_gcs72_df.csv' %(date)) #gcs within time window here
# pd.DataFrame(sofa_df_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_sofa_df_72.csv' %(date))
###Output
time: 12.5 ms
###Markdown
time window filtering 8/28/18 (updated 1/28/19)saving a new version of each clincal variable dataframe that is filtered to only 72 hour window after t_0 for each icustay_id - need to organize this better reading in data if needed
###Code
# #final_pt_df2 = pd.read_csv('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/16082018_final_pt_df2.csv', index_col=0)
# #large import of all data
# date= '27082018'
# vaso_dose_df =pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_vaso_dose_df.csv' %(date), index_col=0)
# ventsettings_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_ventsettings_df.csv' %(date), index_col=0)
# ventcategory_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_ventcategory_df.csv' %(date), index_col=0)
# ventcount_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_ventcount_df.csv' %(date), index_col=0)
# echodata_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_echodata_df.csv' %(date), index_col=0)
# weightfirstday_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_weightfirstday_df.csv' %(date), index_col=0)
# heightfirstday_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_heightfirstday_df.csv' %(date), index_col=0)
# labs_all_nosummary_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_labs_all_nosummary_df.csv' %(date), index_col=0)
# vitals_all_nosummary_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_vitals_all_nosummary_df.csv' %(date), index_col=0)
# gcsall_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_gcsall_df.csv' %(date), index_col=0)
# rrt_all_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_rrt_all_df.csv' %(date), index_col=0)
# urine_output_all_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_urine_output_all_df.csv' %(date), index_col=0)
# uti_all_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_uti_all_df.csv' %(date), index_col=0)
# bg_all_nosummary_df=pd.read_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_bg_all_nosummary_df.csv' %(date), index_col=0)
# def time_window_filter(df, final_pt_df2,timecol,time_var='t_0', lower_window=0, upper_window=3):
# try:
# df= pd.merge(df, final_pt_df2[['icustay_id',time_var]], left_on= 'icustay_id', right_on = 'icustay_id') #n=240317
# df['delta']= pd.to_datetime(df[timecol]) - pd.to_datetime(df[time_var])
# df_after_t0= df.loc[df.loc[:,'delta']>= pd.Timedelta(days=lower_window),:]
# df_after_t0= df_after_t0.loc[df_after_t0.loc[:,'delta']<= pd.Timedelta(days=upper_window),:]
# except KeyError or NameError:
# df= pd.merge(df, final_pt_df2[['hadm_id',time_var]], left_on= 'hadm_id', right_on = 'hadm_id') #n=240317
# df['delta']= pd.to_datetime(df[timecol]) - pd.to_datetime(df[time_var])
# df_after_t0= df.loc[df.loc[:,'delta']>= pd.Timedelta(days=lower_window),:]
# df_after_t0= df_after_t0.loc[df_after_t0.loc[:,'delta']<= pd.Timedelta(days=upper_window),:]
# return(df_after_t0)
# ###list of data w/ 72 hour adjustments
# #vaso_dose_df - vaso_dose_72 -
# #ventsettings_df - ventsettings_72 -
# #ventcategory_df - ventcategory_df -
# #echodata_df - echodata_72 -
# #labs_all_nosummary_df- labs_all_nosummary_72 -
# #vitals_all_nosummary_df - vitals_all_nosummary_72 -
# #gcsall_df - Gcs72_df **modified the gcs in python above. within 72 hour window. -
# #rrt_all_df - rrt_merged_allpt_df *modified the rrt in python above. within 72 hour window. -
# #uti_all_df - uti_all_72 -
# #bg_all_nosummary_df - bg_all_nosummary_72
# #sofa_df_72 #already within 72hour window
# #urine_output_all_df - . xxx this also doesn't have times, but this won't be used much so i didn't bother editing.
# vaso_dose_72= time_window_filter(vaso_dose_df, final_pt_df2, 'starttime',time_var=time_var, lower_window=lower_window, upper_window=upper_window)
# ventsettings_72= time_window_filter(ventsettings_df, final_pt_df2, "charttime",time_var=time_var, lower_window=lower_window, upper_window=upper_window)
# # echodata_72= time_window_filter(echodata_df, final_pt_df2, 'charttime',time_var=time_var, lower_window=lower_window, upper_window=upper_window)
# labs_all_nosummary_72= time_window_filter(labs_all_nosummary_df, final_pt_df2, "charttime",time_var=time_var, lower_window=lower_window, upper_window=upper_window)
# vitals_all_nosummary_72 = time_window_filter(vitals_all_nosummary_df, final_pt_df2, "charttime",time_var=time_var, lower_window=lower_window, upper_window=upper_window)
# uti_all_72 = time_window_filter(uti_all_df, final_pt_df2, 'charttime',time_var=time_var, lower_window=lower_window, upper_window=upper_window)
# bg_all_nosummary_72 = time_window_filter(bg_all_nosummary_df, final_pt_df2, 'charttime',time_var=time_var, lower_window=lower_window, upper_window=upper_window)
###Output
time: 7.12 ms
###Markdown
filtering to patients with bare minimum vital numbers06.1-QC_and_missingness found that 3% or so of patients don't have baseline vitals counts. this is filtering the patients to only those who have this baseline value.as of 10/12/18, this code has not been implemented in here and is performed after importing. (updated below to be self contained in 1/28/19)
###Code
# date= '25012019'
# vitals_all_nosummary_df =pd.read_csv(
# Path('/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_vitals_all_nosummary_df.csv' %(date)), index_col=0)
# #using origional criteria to find pts who have atleast 1 spo2 reading within 3 days of t_0
# #The idea is that this should be the bare minimum amount of data for a patient, and without it, it's likely the physicians did not suspect an infection in these patients.
# vitals_filter = time_window_filter(vitals_all_nosummary_df, final_pt_df2, "charttime",time_var='t_0', lower_window=-1, upper_window=3)
# vitals_filter= vitals_filter.loc[
# vitals_filter['vitalid'].notnull(),:]#.count() #6930 NULL values
# icustay_id_vitals = (vitals_filter.loc[
# vitals_filter.loc[:,'vitalid']=='SpO2','icustay_id'
# ].unique())
# len(icustay_id) #8731
# len(icustay_id_vitals) #8629
# subject_id_vitals=list(final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),'subject_id'])
# hadm_id_vitals= list(final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),'hadm_id'])
# icustay_id_vitals= list(final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),'icustay_id'])
# del(vitals_filter)
# vitals_all_nosummary_72= df_subset(vitals_all_nosummary_72)
# pd.DataFrame(vitals_all_nosummary_72).to_csv("/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_vitals_all_nosummary_72.csv" %date) #final cohort database n=11493 subject_idโs (7/6/18)
# #saving this slightly reduced cohort for those who have sufficient vitals
# # date= '25012019'
# final_pt_df2_v=final_pt_df2.loc[final_pt_df2.loc[:,'icustay_id'].isin(icustay_id_vitals),:]
# pd.DataFrame(final_pt_df2_v).to_csv("/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s_final_pt_df2_v.csv" %date) #final cohort database n=11493 subject_idโs (7/6/18)
# def df_subset(df):
# try:
# df = df.loc[df.loc[:,'icustay_id'].isin(icustay_id_vitals),:]
# except KeyError or NameError:
# try:
# df = df.loc[df.loc[:,'hadm_id'].isin(hadm_id_vitals),:]
# except KeyError or NameError:
# df = df.loc[df.loc[:,'subject_id'].isin(subject_id_vitals),:]
# return(df)
# # subsetting each dataframe to only patients in final_patients_df2_v:
# dataframe_list= [
# age_df, ventcategory_df, vaso_dose_72, #echodata_72,
# labs_all_nosummary_72, weightfirstday_df,
# heightfirstday_df, vitals_all_nosummary_72,
# uti_all_72, bg_all_nosummary_72,
# rrt_merged_allpt_df, gcs72_df, sofa_df_72
# ]
# (
# age_df, ventcategory_df, vaso_dose_72, #echodata_72,
# labs_all_nosummary_72, weightfirstday_df,
# heightfirstday_df, vitals_all_nosummary_72,
# uti_all_72, bg_all_nosummary_72,
# rrt_merged_allpt_df, gcs72_df, sofa_df_72
# ) = (df_subset(df) for df in dataframe_list)
# bg_all_nosummary_72
# #date= '27082018'
# # date= '25012019'
# folder="48_hr_window"#"72_hr_window"
# pd.DataFrame(age_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_pt_info_df.csv' %(folder,date))
# pd.DataFrame(vaso_dose_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_vaso_dose_72.csv' %(folder,date))
# pd.DataFrame(vaso_dose_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_vaso_dose_72.csv' %(folder,date))
# pd.DataFrame(ventsettings_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_ventsettings_72.csv' %(folder,date))
# #vent category and count are already 72hour
# pd.DataFrame(ventsettings_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_ventsettings_df.csv' %(folder,date))
# pd.DataFrame(ventcategory_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_ventcategory_df.csv' %(folder,date))
# # pd.DataFrame(echodata_72).to_csv(
# # '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/72_hr_window/%s_echodata_72.csv' %(date))
# pd.DataFrame(labs_all_nosummary_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_labs_all_nosummary_72.csv' %(folder,date))
# #vent category and count are already limited to first day
# pd.DataFrame(weightfirstday_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_weightfirstday_df.csv' %(folder,date))
# pd.DataFrame(heightfirstday_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_heightfirstday_df.csv' %(folder,date))
# pd.DataFrame(vitals_all_nosummary_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_vitals_all_nosummary_72.csv' %(folder,date))
# pd.DataFrame(uti_all_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_uti_all_72.csv' %(folder,date))
# pd.DataFrame(bg_all_nosummary_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_bg_all_nosummary_72.csv' %(folder,date))
# pd.DataFrame(rrt_merged_allpt_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_rrt_merged_allpt_df.csv' %(folder,date))
# pd.DataFrame(gcs72_df).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_gcs72_df.csv' %(folder,date))
# pd.DataFrame(sofa_df_72).to_csv(
# '/Users/geickelb1/Documents/GitHub/mimiciii-antibiotics-modeling/data/raw/csv/%s/%s_sofa_df_72.csv' %(folder,date))
###Output
time: 12.9 ms
|
examples/inference/ex01_inference_SIR.ipynb
|
###Markdown
Inference of parameters (SIR model)In this notebook, we consider the SIR model with symptomatically and asymptomatically infected. We are trying to infer the parameters of the model * $\alpha$ (fraction of asymptomatic infectives), * $\beta$ (probability of infection on contact), * $\gamma_{I_a}$ (rate of recovery for asymptomatic infected individuals), and* $\gamma_{I_s}$ (rate of recovery for symptomatic infected individuals) when given the full data (of classes S, Ia, Is) from a generated trajectory.
###Code
%%capture
## compile PyRoss for this notebook
import os
owd = os.getcwd()
os.chdir('../../')
%run setup.py install
os.chdir(owd)
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import pyross
import time
from IPython.display import Image
Image('SIIR.jpg')
###Output
_____no_output_____
###Markdown
1) Generate a trajectoryWe generate a test trajectory on a population with two ages groups.
###Code
print('M', M)
print('Ni', Ni)
print('N', N)
print('C', C)
print('Ia0', Ia0)
print('Is0', Is0)
print('S0', S0)
print('R0', R0)
print('Tf',Tf)
print('Nf', Nf)
M = 2 # the population has two age groups
N = 5e4 # and this is the total population
# parameters for generating synthetic trajectory
beta = 0.02 # infection rate
gIa = 1./7 # recovery rate of asymptomatic infectives
gIs = 1./7 # recovery rate of asymptomatic infectives
alpha = 0.2 # fraction of asymptomatic infectives
fsa = 1 # the self-isolation parameter
# set the age structure
fi = np.array([0.25, 0.75]) # fraction of population in age age group
Ni = N*fi
# set the contact structure
C = np.array([[18., 9.],
[3., 12.]])
# C_ij = number of people group from group i that an individual from group j meets per day
# set up initial condition
Ia0 = np.array([10, 10]) # each age group has asymptomatic infectives
Is0 = np.array([10, 10]) # and also symptomatic infectives
R0 = np.array([0, 0]) # there are no recovered individuals initially
S0 = Ni - (Ia0 + Is0 + R0)
Tf = 100
Nf = Tf+1
def contactMatrix(t):
return C
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
true_parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
# use pyross stochastic to generate traj and save
sto_model = pyross.stochastic.SIR(parameters, M, Ni)
data = sto_model.simulate(S0, Ia0, Is0, contactMatrix, Tf, Nf)
data_array = data['X']
np.save('SIR_sto_traj.npy', data_array)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
t = data['t']
plt.fill_between(t, 0, np.sum(data_array[:, :M], axis=1), alpha=0.3)
plt.plot(t, np.sum(data_array[:, :M], axis=1), '-', label='S', lw=2)
plt.fill_between(t, 0, np.sum(data_array[:, M:2*M], axis=1), alpha=0.3)
plt.plot(t, np.sum(data_array[:, M:2*M], axis=1), '-', label='Ia', lw=2)
plt.fill_between(t, 0, np.sum(data_array[:, 2*M:3*M], axis=1), alpha=0.3)
plt.plot(t, np.sum(data_array[:, 2*M:3*M], axis=1), '-', label='Is', lw=2)
plt.legend(fontsize=26)
plt.grid()
plt.xlabel(r'time')
plt.autoscale(enable=True, axis='x', tight=True)
###Output
_____no_output_____
###Markdown
2) InferenceWe take the first $20$ data points of the trajectories and use it to infer the parameters of the model.
###Code
# load the data and rescale to intensive variables
Tf_inference = 50 # truncate to only getting the first few datapoints
Nf_inference = Tf_inference+1
x = np.load('SIR_sto_traj.npy').astype('float')
x = (x/N)[:Nf_inference]
steps = 101 # number internal integration steps taken, must be an odd number
x.shape
# Compare the deterministic trajectory and the stochastic trajectory with the same
# initial conditions and parameters
x0=x[0]
det_model = pyross.deterministic.SIR(parameters, int(M), fi)
# xm = estimator.integrate(x[0], 0, Tf_inference, Nf_inference, det_model, contactMatrix)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
# plt.plot(np.sum(xm[:, M:], axis=1), label='deterministic I')
plt.plot(np.sum(x[:Nf_inference, M:], axis=1), label='stochastic I')
plt.legend()
plt.show()
# initialise the estimator
estimator = pyross.inference.SIR(parameters, M, fi, int(N), steps)
# compute -log_p for the original (correct) parameters
start_time = time.time()
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
logp = estimator.obtain_minus_log_p(parameters, x, Tf_inference, Nf_inference, contactMatrix)
end_time = time.time()
print(logp)
print(end_time - start_time)
# Define the prior (Gamma prior around guess of parameter with defined std. deviation)
alpha_g = 0.3
beta_g = 0.04
gIa_g = 0.1
gIs_g = 0.1
# compute -log_p for the initial guess
parameters = {'alpha':alpha_g, 'beta':beta_g, 'gIa':gIa_g, 'gIs':gIs_g, 'fsa':fsa}
logp = estimator.obtain_minus_log_p(parameters, x, Tf_inference, Nf_inference, contactMatrix)
print(logp)
x
# the names of the parameters to be inferred
eps = 1e-4
keys = ['alpha', 'beta', 'gIa', 'gIs']
# initial guess
guess = np.array([alpha_g, beta_g, gIa_g, gIs_g])
# error bars on the initial guess
alpha_std = 0.2
beta_std = 0.1
gIa_std = 0.1
gIs_std = 0.1
stds = np.array([alpha_std, beta_std , gIa_std, gIs_std])
# bounds on the parameters
bounds = np.array([(eps, 0.8), (eps, 0.2), (eps, 0.6), (eps, 0.6)])
# Stopping criterion for minimisation (realtive change in function value)
ftol = 1e-6
start_time = time.time()
params = estimator.infer_parameters(keys, guess, stds, bounds, x, Tf_inference, Nf_inference,
contactMatrix,
global_max_iter=20,
local_max_iter=200,
global_ftol_factor=1e3,
ftol=ftol,
verbose=True)
end_time = time.time()
print(params) # best guess
print(end_time - start_time)
# compute log_p for best estimate
start_time = time.time()
new_parameters = estimator.fill_params_dict(keys, params)
logp = estimator.obtain_minus_log_p(new_parameters, x, Tf_inference, Nf_inference, contactMatrix)
end_time = time.time()
print(logp)
print(end_time - start_time)
print("True parameters:")
print(true_parameters)
print("\nInferred parameters:")
print(new_parameters)
x = np.load('SIR_sto_traj.npy').astype('float')/N
Nf = x.shape[0]
Tf = Nf-1
det_model = pyross.deterministic.SIR(new_parameters, int(M), fi)
x_det = estimator.integrate(x[0], 0, Tf, Nf, det_model, contactMatrix)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(np.sum(x_det[:, :M], axis=1), label='Inferred S')
plt.plot(np.sum(x[:, :M], axis=1), label='True S')
plt.plot(np.sum(x_det[:, M:2*M], axis=1), label='Inferred Ia')
plt.plot(np.sum(x[:, M:2*M], axis=1), label='True Ia')
plt.plot(np.sum(x_det[:, 2*M:3*M], axis=1), label='Inferred Is')
plt.plot(np.sum(x[:, 2*M:3*M], axis=1), label='True Is')
plt.xlim([0, Tf])
plt.axvspan(0, Tf_inference,
label='Used for inference',
alpha=0.3, color='dodgerblue')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Inference of parameters (SIR model)In this notebook, we consider the SIR model with symptomatically and asymptomatically infected. We are trying to infer the parameters of the model * $\alpha$ (fraction of asymptomatic infectives), * $\beta$ (probability of infection on contact), * $\gamma_{I_a}$ (rate of recovery for asymptomatic infected individuals), and* $\gamma_{I_s}$ (rate of recovery for symptomatic infected individuals) when given the full data (of classes S, Ia, Is) from a generated trajectory.
###Code
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import pyross
import time
from IPython.display import Image
Image('SIIR.jpg')
###Output
_____no_output_____
###Markdown
1) Generate a trajectoryWe generate a test trajectory on a population with two ages groups.
###Code
M = 2 # the population has two age groups
N = 1e6 # and this is the total population
# parameters for generating synthetic trajectory
beta = 0.02 # infection rate
gIa = 1./7 # recovery rate of asymptomatic infectives
gIs = 1./7 # recovery rate of asymptomatic infectives
alpha = 0.2 # fraction of asymptomatic infectives
fsa = 1 # the self-isolation parameter
# set the age structure
fi = np.array([0.25, 0.75]) # fraction of population in age age group
Ni = N*fi
# set the contact structure
C = np.array([[18., 9.],
[3., 12.]])
# C_ij = number of people group from group i that an individual from group j meets per day
# set up initial condition
Ia0 = np.array([10, 10]) # each age group has asymptomatic infectives
Is0 = np.array([2, 2]) # and also symptomatic infectives
R0 = np.array([0, 0]) # there are no recovered individuals initially
S0 = Ni - (Ia0 + Is0 + R0)
Tf = 100
Nf = Tf+1
def contactMatrix(t):
return C
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
true_parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
# use pyross stochastic to generate traj and save
sto_model = pyross.stochastic.SIR(parameters, M, Ni)
data = sto_model.simulate(S0, Ia0, Is0, contactMatrix, Tf, Nf, method='tau-leaping')
data_array = data['X']
np.save('SIR_sto_traj.npy', data_array)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
t = data['t']
plt.fill_between(t, 0, np.sum(data_array[:, :M], axis=1), alpha=0.3)
plt.plot(t, np.sum(data_array[:, :M], axis=1), '-', label='S', lw=2)
plt.fill_between(t, 0, np.sum(data_array[:, M:2*M], axis=1), alpha=0.3)
plt.plot(t, np.sum(data_array[:, M:2*M], axis=1), '-', label='Ia', lw=2)
plt.fill_between(t, 0, np.sum(data_array[:, 2*M:3*M], axis=1), alpha=0.3)
plt.plot(t, np.sum(data_array[:, 2*M:3*M], axis=1), '-', label='Is', lw=2)
plt.legend(fontsize=26)
plt.grid()
plt.xlabel(r'time')
plt.autoscale(enable=True, axis='x', tight=True)
###Output
_____no_output_____
###Markdown
2) InferenceWe take the first $20$ data points of the trajectories and use it to infer the parameters of the model.
###Code
# load the data and rescale to intensive variables
Tf_inference = 20 # truncate to only getting the first few datapoints
Nf_inference = Tf_inference+1
x = np.load('SIR_sto_traj.npy').astype('float')
x = (x)[:Nf_inference]
estimator = pyross.inference.SIR(parameters, M, Ni)
# Compare the deterministic trajectory and the stochastic trajectory with the same
# initial conditions and parameters
x0=x[0]
estimator.set_det_model(parameters)
estimator.set_contact_matrix(contactMatrix)
xm = estimator.integrate(x[0], 0, Tf_inference, Nf_inference)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(np.sum(xm[:, M:], axis=1), label='deterministic I')
plt.plot(np.sum(x[:Nf_inference, M:], axis=1), label='stochastic I')
plt.legend()
plt.show()
# compute -log_p for the original (correct) parameters
start_time = time.time()
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
# use faster ODE methods to speed up inference
estimator.set_lyapunov_method('euler')
logp = estimator.obtain_minus_log_p(parameters, x, Tf_inference, contactMatrix, tangent=False)
end_time = time.time()
print(logp)
print(end_time - start_time)
# compare to tangent space
start_time = time.time()
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
logp = estimator.obtain_minus_log_p(parameters, x, Tf_inference, contactMatrix, tangent=True)
end_time = time.time()
print(logp)
print(end_time - start_time)
# Define the prior (log normal prior around guess of parameter with defined std. deviation)
alpha_g = 0.25
beta_g = 0.04
gIa_g = 0.1
gIs_g = 0.1
# compute -log_p for the initial guess
parameters = {'alpha':alpha_g, 'beta':beta_g, 'gIa':gIa_g, 'gIs':gIs_g, 'fsa':fsa}
logp = estimator.obtain_minus_log_p(parameters, x, Tf_inference, contactMatrix)
print(logp)
# Set up priors
eps = 1e-4
priors = {
'alpha':{
'mean': alpha_g,
'std': 0.2,
'bounds': [eps, 0.8],
'prior_fun': 'truncnorm'
},
'beta':{
'mean': beta_g,
'std': 0.1,
'bounds': [eps, 0.2],
'prior_fun': 'lognorm'
},
'gIa':{
'mean': gIa_g,
'std': 0.2,
'bounds': [eps, 0.6]
},
'gIs':{
'mean': gIs_g,
'std': 0.2,
'bounds': [eps, 0.6]
}
}
# Stopping criterion for minimisation (realtive change in function value)
ftol = 1e-6
start_time = time.time()
res = estimator.infer_parameters(x, Tf_inference, contactMatrix, priors, tangent=False,
global_max_iter=20, local_max_iter=400,
cma_population=32, global_atol=10,
ftol=ftol, verbose=True)
end_time = time.time()
print(res['map_dict']) # best guess
print(end_time - start_time)
# compute log_p for best estimate
start_time = time.time()
logp = estimator.obtain_minus_log_p(res['map_dict'], x, Tf_inference, contactMatrix)
end_time = time.time()
print(logp)
print(end_time - start_time)
print("True parameters:")
print(true_parameters)
print("\nInferred parameters:")
print(res['map_dict'])
print(res['flat_map'])
x = np.load('SIR_sto_traj.npy').astype('float')
Nf = x.shape[0]
Tf = Nf-1
# set the deterministic method to be solve_ivp for accurate integration over long time scale
estimator.set_det_model(res['map_dict'])
estimator.set_params(res['map_dict'])
x_det = estimator.integrate(x[Nf_inference], Nf_inference, Tf, Nf-Nf_inference)
t_inf = np.linspace(Nf_inference, Tf, Nf-Nf_inference)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
# plt.plot(np.sum(x_det[:, :M], axis=1), label='Inferred S')
# plt.plot(np.sum(x[:, :M], axis=1), label='True S')
plt.plot(t_inf, np.sum(x_det[:, M:2*M], axis=1), label='Inferred Ia')
plt.plot(np.sum(x[:, M:2*M], axis=1), label='True Ia')
plt.plot(t_inf, np.sum(x_det[:, 2*M:3*M], axis=1), label='Inferred Is')
plt.plot(np.sum(x[:, 2*M:3*M], axis=1), label='True Is')
plt.xlim([0, Tf])
plt.axvspan(0, Tf_inference,
label='Used for inference',
alpha=0.3, color='dodgerblue')
plt.legend()
plt.show()
eps = 1e-3
x = np.load('SIR_sto_traj.npy').astype('float')[:Nf_inference]
hess = estimator.hessian(x, Tf_inference, res, contactMatrix=contactMatrix, eps=eps, tangent=False,
fd_method="central")
cov = np.linalg.inv(hess)
print(cov)
v, w = np.linalg.eig(cov)
print(v)
###Output
[[ 2.38152583e-04 -7.75990575e-08 4.31932018e-04 -1.14960050e-04]
[-7.75990575e-08 5.31514895e-08 -1.50420118e-07 3.86970234e-08]
[ 4.31932018e-04 -1.50420118e-07 8.97543196e-04 -2.31197807e-04]
[-1.14960050e-04 3.86970234e-08 -2.31197807e-04 7.44430959e-05]]
[1.17197378e-03 2.44894533e-05 1.36756717e-05 5.31253490e-08]
###Markdown
From here onwards, still work in process (need to update Forecast module)
###Code
parameters = res['map_dict'].copy()
parameters['fsa'] = fsa
parameters['cov'] = cov
# Initialise pyross forecast module
model_forecast = pyross.forecast.SIR(parameters, M, Ni)
# Initial condition for forecast is last configuration from inference-trajectory
S0_forecast = x[Tf_inference,:M]
Ia0_forecast = x[Tf_inference,M:2*M]
Is0_forecast = x[Tf_inference,2*M:]
print(Ia0_forecast, Is0_forecast)
# Number of simulations over which we average, use 500
Ns = 500
Tf_forecast = Tf - Tf_inference
Nf_forecast = Tf_forecast+1
result_forecast = model_forecast.simulate(S0_forecast, Ia0_forecast, Is0_forecast,
contactMatrix, Tf_forecast, Nf_forecast,
verbose=True, method='deterministic',
Ns=Ns)
trajectories_forecast = result_forecast['X']
t_forecast = result_forecast['t'] + Tf_inference
fontsize=25
#
ylabel=r'Fraction of infectives'
#
# Plot total number of symptomatic infectives
cur_trajectories_forecast = trajectories_forecast[:,4] + trajectories_forecast[:,5]
cur_mean_forecast = np.mean( cur_trajectories_forecast, axis=0)
percentile = 10
percentiles_lower = np.percentile(cur_trajectories_forecast,percentile,axis=0)
percentiles_upper = np.percentile(cur_trajectories_forecast,100-percentile,axis=0)
percentiles_median = np.percentile(cur_trajectories_forecast,50,axis=0)
cur_trajectory_underlying = data_array[:,4] + data_array[:,5]
#
# Plot trajectories
#
fig, ax = plt.subplots(1,1,figsize=(10,8))
ax.axvspan(0, Tf_inference,
label='Range used for inference',
alpha=0.3, color='dodgerblue')
ax.set_title(r'Forecast with inferred parameters',
y=1.05,
fontsize=fontsize)
# for i,e in enumerate(cur_trajectories_forecast):
# ax.plot(t_forecast,e,
# alpha=0.15,
# )
ax.fill_between(t_forecast, percentiles_lower, percentiles_upper, color='darkorange', alpha=0.2)
ax.plot(cur_trajectory_underlying,
lw=3,
color='limegreen',
label='Trajectory used for inference')
ax.plot(t_forecast,percentiles_median,
alpha=1,ls='--',
color='orange',label='Median',
lw=3)
plt.legend()
plt.xlim([0, Tf])
plt.show()
###Output
_____no_output_____
###Markdown
Inference of parameters (SIR model)In this notebook, we consider the SIR model with symptomatically and asymptomatically infected. We are trying to infer the parameters of the model * $\alpha$ (fraction of asymptomatic infectives), * $\beta$ (probability of infection on contact), * $\gamma_{I_a}$ (rate of recovery for asymptomatic infected individuals), and* $\gamma_{I_s}$ (rate of recovery for symptomatic infected individuals) when given the full data (of classes S, Ia, Is) from a generated trajectory.
###Code
%%capture
## compile PyRoss for this notebook
import os
owd = os.getcwd()
os.chdir('../../')
%run setup.py install
os.chdir(owd)
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
import pyross
import time
###Output
_____no_output_____
###Markdown
1) Generate a trajectoryWe generate a test trajectory on a population with two ages groups.
###Code
M = 2 # the population has two age groups
N = 5e4 # and this is the total population
# correct params
beta = 0.02 # infection rate
gIa = 1./7 # recovery rate of asymptomatic infectives
gIs = 1./7 # recovery rate of asymptomatic infectives
alpha = 0.2 # fraction of asymptomatic infectives
fsa = 0.8 # the self-isolation parameter
# set the age structure
fi = np.array([0.25, 0.75]) # fraction of population in age age group
Ni = N*fi
# set the contact structure
C = np.array([[18., 9.], [3., 12.]])
# set up initial condition
Ia0 = np.array([10, 10]) # each age group has asymptomatic infectives
Is0 = np.array([10, 10]) # and also symptomatic infectives
R0 = np.array([0, 0]) # there are no recovered individuals initially
S0 = Ni - (Ia0 + Is0 + R0)
Tf = 100
Nf = Tf+1
def contactMatrix(t):
return C
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
true_parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
# use pyross stochastic to generate traj and save
sto_model = pyross.stochastic.SIR(parameters, M, Ni)
data = sto_model.simulate(S0, Ia0, Is0, contactMatrix, Tf, Nf)
data_array = data['X']
np.save('SIR_sto_traj.npy', data_array)
plt.plot(data_array[:, 0], label='S')
plt.plot(data_array[:, M], label='Ia')
plt.plot(data_array[:, 2*M], label='Is')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
2) InferenceWe take the first $50$ data points of the trajectories and use it to infer the parameters of the model.
###Code
# load the data and rescale to intensive variables
Tf = 50 # truncate to only getting the first few datapoints
Nf = Tf+1
x = np.load('SIR_sto_traj.npy').astype('float')
x = (x/N)[:Nf]
steps = 101 # number internal integration steps taken, must be an odd number
# initialise the estimator
estimator = pyross.inference.SIR(parameters, M, fi, int(N), steps)
# compute -log_p for the original (correct) parameters
start_time = time.time()
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
logp = estimator.obtain_minus_log_p(parameters, x, Tf, Nf, contactMatrix)
end_time = time.time()
print(logp)
print(end_time - start_time)
# Define the prior (Gamma prior around guess of parameter with defined std. deviation)
beta_g = 0.1
beta_std = 0.1
gIa_g = 0.14
gIa_std = 0.05
gIs_g = 0.2
gIs_std = 0.1
alpha_g = 0.3
alpha_std = 0.2
fsa_g = 0.8
fsa_std = 0.05
# compute -log_p for the initial guess
parameters = {'alpha':alpha_g, 'beta':beta_g, 'gIa':gIa_g, 'gIs':gIs_g, 'fsa':fsa_g}
logp = estimator.obtain_minus_log_p(parameters, x, Tf, Nf, contactMatrix)
print(logp)
# inference
eps = 1e-4 # step size for finite difference computation of Hessian
ftol = 1e-6 # Stopping criterion for minimisation (realtive change in function value)
keys = ['alpha', 'beta', 'gIa', 'gIs', 'fsa']
guess = np.array([alpha_g, beta_g, gIa_g, gIs_g, fsa_g]) # Initial value (and expected value of priors)
bounds = np.array([(eps, 0.8), (eps, 0.2), (eps, 0.6), (eps, 0.6), (0.7, 0.9)]) # give some bounds
stds = np.array([alpha_std, beta_std , gIa_std, gIs_std, fsa_std])
start_time = time.time()
params = estimator.infer_parameters(keys, guess, stds, bounds, x, Tf, Nf, contactMatrix, global_max_iter=50,
local_max_iter=200, eps=eps, global_ftol_factor=1e2, ftol=ftol, verbose=True)
end_time = time.time()
print(params) # best guess
print(end_time - start_time)
# compute log_p for best estimate
start_time = time.time()
new_parameters = estimator.fill_params_dict(keys, params)
logp = estimator.obtain_minus_log_p(new_parameters, x, Tf, Nf, contactMatrix)
end_time = time.time()
print(logp)
print(end_time - start_time)
print("True parameters:")
print(true_parameters)
print("\nInferred parameters:")
print(new_parameters)
x = np.load('SIR_sto_traj.npy').astype('float')/N
Nf = x.shape[0]
Tf = Nf-1
det_model = pyross.deterministic.SIR(new_parameters, int(M), fi)
x_det = estimator.integrate(x[0], 0, Tf, Nf, det_model, contactMatrix)
plt.plot(np.sum(x_det[:, :M], axis=1), label='Inferred S')
plt.plot(np.sum(x[:, :M], axis=1), label='True S')
plt.plot(np.sum(x_det[:, M:2*M], axis=1), label='Inferred Ia')
plt.plot(np.sum(x[:, M:2*M], axis=1), label='True Ia')
plt.plot(np.sum(x_det[:, 2*M:3*M], axis=1), label='Inferred Is')
plt.plot(np.sum(x[:, 2*M:3*M], axis=1), label='True Is')
plt.axvspan(0, 50,
label='Used for inference',
alpha=0.3, color='dodgerblue')
plt.legend()
plt.show()
hess = estimator.compute_hessian(keys, params, guess, stds, x, Tf, Nf, contactMatrix)
np.linalg.inv(hess) # the covariance that can be inputed into the forecast module for forecasting
###Output
_____no_output_____
|
analysis/case_study_enron.ipynb
|
###Markdown
Case Study - ENRONIn this notebook we highlight a particular dataset, namely the ENRON email dataset.Our aim is to showcase the new observables created for annotated hypergraphs and to highlight the effect of the null model.
###Code
import pandas as pd
import numpy as np
import networkx as nx
# import matplotlib.pyplot as plt
# %matplotlib inline
import ternary
import seaborn as sns
from ahyper import AnnotatedHypergraph
import matplotlib.pyplot as plt
A = AnnotatedHypergraph.from_incidence('enron',
relabel_roles=True,
add_metadata=True,
root='../data/')
A.assign_role_interaction_matrix(np.array([[0,1,0.25],[0,0,0],[0,0,0]]))
G = A.to_weighted_projection(use_networkx=True)
###Output
_____no_output_____
###Markdown
Features Graph
###Code
# TO DO: Create a snapshot of the network where nodes are pie charts of their roles.
import graph_tool
###Output
_____no_output_____
###Markdown
Node Role Density and Neighbourhood Role Density
###Code
# TODO: Ternary plot? Local density v null models, local density v node role participation
# TODO: Calculate distance between node role and local neighbourhood?
# !pip install python-ternary --user --quiet
# !pip install seaborn --user --quiet
from ahyper.observables import local_role_density, node_role_participation
local_role_den = pd.DataFrame(local_role_density(A)).T
node_role_par = pd.DataFrame(node_role_participation(A)).T
###Output
_____no_output_____
###Markdown
FIGURE 1A
###Code
fig, tax = ternary.figure(scale=1)
fig.set_size_inches(10, 9)
tax.scatter(local_role_den.values, marker='o', color='C0', label="Local Role Density")
tax.scatter(node_role_par.values, marker='o', color='C1', label="Node Role Density")
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=0.2, color="blue")
tax.ticks(ticks=[0,0.2,0.4,0.6,0.8,1], axis='lbr', linewidth=1,
tick_formats='%.1f',
fontsize=24,
offset=0.02)
tax.left_axis_label("TO", offset=0.15, fontsize=24)
tax.right_axis_label("FROM", offset=0.15, fontsize=24)
tax.bottom_axis_label("CC", offset=0.15, fontsize=24)
fontsize = 16
offset = 0.15
tax.legend(fontsize=fontsize)
tax.boundary(linewidth=1)
tax.gridlines(multiple=0.1, color="gray")
tax.get_axes().axis('off')
tax.ax.axis('off');
tax._redraw_labels()
fig.savefig('../fig/roles_local_v_node.pdf', bbox_inches='tight')
lines = list(zip(local_role_den.index, local_role_den.values, node_role_par.values))
def size(line):
index,x,y = line
return ((x-y)**2).sum()
lines.sort(key=lambda x: size(x), reverse=True)
names = pd.read_csv('../data/enron/enron_jobs.csv', index_col='node_id')
# [names.loc[x] for x,_,_ in lines[:5]]
ids = names.query('job=="CEO"').index
focus = [l for l in lines if l[0] in ids]
def format_name(string):
return ' '.join(string.split('.')).title()
name_annotations = [(f[0],f[2],format_name(names['name'][f[0]])) for f in focus]
###Output
_____no_output_____
###Markdown
FIGURE 1B
###Code
fig, tax = ternary.figure(scale=1)
fig.set_size_inches(10, 9)
fontsize = 16
offset = 0.15
first=True
for line in focus:
tax.plot(line[1:], color='k', alpha=0.25)
lr = tax.scatter([line[1]], marker='o', color='C0', label="Local Role Density")
nr = tax.scatter([line[2]], marker='o', color='C1', label="Node Role Density")
if first:
tax.legend(fontsize=fontsize)
first=False
for ix,point,name in name_annotations:
tax.annotate(name, position=point + np.array([-0.04, 0.04, 0.00]), fontsize=fontsize)
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=0.2, color="blue")
tax.ticks(ticks=[0,0.2,0.4,0.6,0.8,1], axis='lbr', linewidth=1,
tick_formats='%.1f',
fontsize=24,
offset=0.02)
tax.left_axis_label("TO", offset=0.15, fontsize=24)
tax.right_axis_label("FROM", offset=0.15, fontsize=24)
tax.bottom_axis_label("CC", offset=0.15, fontsize=24)
tax.boundary(linewidth=1)
tax.gridlines(multiple=0.1, color="gray")
tax.get_axes().axis('off')
tax.ax.axis('off');
tax._redraw_labels()
fig.savefig('../fig/roles_assortativity.pdf', bbox_inches='tight')
A._degeneracy_avoiding_MCMC(n_steps=len(A.IL)*100, role_labels=True)
local_role_den_rp = pd.DataFrame(local_role_density(A)).T
A._degeneracy_avoiding_MCMC(n_steps=len(A.IL)*100, role_labels=False)
local_role_den_rd = pd.DataFrame(local_role_density(A)).T
###Output
_____no_output_____
###Markdown
Figure 1C
###Code
fig, tax = ternary.figure(scale=1)
fig.set_size_inches(10, 9)
tax.scatter(local_role_den.values, marker='o', color='black', label="Local Role Density")
tax.scatter(local_role_den_rp.values, marker='o', color='C0', label="Role-preserving")
tax.scatter(local_role_den_rd.values, marker='o', color='C1', label="Non-role-preserving")
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=0.2, color="blue")
tax.ticks(ticks=[0,0.2,0.4,0.6,0.8,1], axis='lbr', linewidth=1,
tick_formats='%.1f',
fontsize=24,
offset=0.02)
tax.left_axis_label("TO", offset=0.15, fontsize=24)
tax.right_axis_label("FROM", offset=0.15, fontsize=24)
tax.bottom_axis_label("CC", offset=0.15, fontsize=24)
fontsize = 16
offset = 0.15
tax.legend(fontsize=fontsize)
tax.boundary(linewidth=1)
tax.gridlines(multiple=0.1, color="gray")
tax.get_axes().axis('off')
tax.ax.axis('off');
tax._redraw_labels()
fig.savefig('../fig/roles_null_model.pdf', bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Role Assortativity
###Code
def read_results(path):
"""Read results from a directory."""
original = pd.read_csv(f'{path}/original.csv', index_col=0, header=None)[1]
role_preserving_ensemble = pd.read_csv(f'{path}/role_preserving_ensemble.csv', index_col=False, header=0)
role_destroying_ensemble = pd.read_csv(f'{path}/role_destroying_ensemble.csv', index_col=False, header=0)
return original, role_preserving_ensemble, role_destroying_ensemble
original, preserving_ensemble, destroying_ensemble = read_results('../results/enron_assort/')
results = read_results('../results/enron_assort')
assort_features = [f for f in original.index if 'assortativity' in f]
og = original[assort_features]
og = pd.DataFrame(og).T
pe = preserving_ensemble[assort_features]
de = destroying_ensemble[assort_features]
combined = pd.concat([og,pe,de],
keys=['Original','Role-preserving','Non-role-preserving'],
names=['ensemble','sample'])
combined = combined.stack()
combined.index.names = ['ensemble','sample','feature']
combined = combined.reset_index()
combined.feature = combined.feature.apply(lambda x: '-'.join(x.split('_')[1:]))
combined[1] = ''
combined = combined.query('feature in ["from-to","from-cc","to-to","to-cc","cc-cc"]')
with plt.style.context("seaborn-whitegrid", {'font.size':14}):
fontsize=14
sns.catplot(data=combined,
kind='box',
hue='ensemble',
x='feature',
y=0,
linewidth=1.5,
fliersize=2,
notch=False,
height=4,
aspect=2,
legend_out=False,
)
fig = plt.gcf()
ax = plt.gca()
# AXIS LABELS
# ax.set_xlabel('Role Pair');
ax.set_xlabel(None);
ax.set_ylabel('Assortativity', fontsize=fontsize);
# XTICKLABELS
z = [x.get_text() for x in list(ax.get_xticklabels())]
z = [x.replace('-','/').upper() for x in z]
ax.set_xticklabels(z, fontsize=fontsize);
ax.set_yticklabels(ax.get_yticklabels(),fontsize=fontsize)
# LEGEND
ax.legend(fontsize=fontsize);
fig.savefig('../fig/enron_assortativity.pdf', bbox_inches='tight')
# Alternate formulation with individual axes.
sns.catplot(data=combined,
kind='box',
hue='ensemble',
x=1,
y=0,
col='feature',
linewidth=2,
fliersize=2,
notch=False,
height=4,
aspect=0.4,
legend_out=False,
)
###Output
_____no_output_____
|
notebooks/run4_gen_text-tied-poetry-TEMPLATE_v0.ipynb
|
###Markdown
https://arxiv.org/pdf/1810.04805.pdf
###Code
import os
os.sys.path.append('..')
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import logging
import json
import math
import os
import random
import six
from tqdm import tqdm_notebook as tqdm
from IPython.display import HTML, display
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import tokenization
from modeling import BertConfig, BertForMaskedLanguageModelling
from optimization import BERTAdam
from masked_language_model import notqdm, convert_tokens_to_features, LMProcessor, predict_masked_words, predict_next_words, improve_words_recursive
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
###Output
_____no_output_____
###Markdown
Args
###Code
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary file that the BERT model was trained on.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--init_checkpoint",
default=None,
type=str,
help="Initial checkpoint (usually from a pre-trained BERT model).")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
experiment_name = 'poetry_uncased_5_tied_mlm'
argv = """
--task_name lm \
--data_dir {DATA_DIR} \
--vocab_file {BERT_BASE_DIR}/vocab.txt \
--bert_config_file {BERT_BASE_DIR}/bert_config.json \
--init_checkpoint {BERT_BASE_DIR}/pytorch_model.bin \
--do_train \
--do_eval \
--gradient_accumulation_steps 2 \
--train_batch_size 16 \
--learning_rate 3e-5 \
--num_train_epochs 3.0 \
--max_seq_length 128 \
--output_dir ../outputs/{name}/
""".format(
BERT_BASE_DIR='../data/weights/cased_L-12_H-768_A-12',
DATA_DIR='../data/input/poetry_gutenberg',
name=experiment_name
).replace('\n', '').split(' ')
print(argv)
args = parser.parse_args(argv)
###Output
['--task_name', 'lm', '--data_dir', '../data/input/poetry_gutenberg', '--vocab_file', '../data/weights/cased_L-12_H-768_A-12/vocab.txt', '--bert_config_file', '../data/weights/cased_L-12_H-768_A-12/bert_config.json', '--init_checkpoint', '../data/weights/cased_L-12_H-768_A-12/pytorch_model.bin', '--do_train', '--do_eval', '--gradient_accumulation_steps', '2', '--train_batch_size', '16', '--learning_rate', '3e-5', '--num_train_epochs', '3.0', '--max_seq_length', '128', '--output_dir', '../outputs/poetry_uncased_5_tied_mlm/']
###Markdown
Init
###Code
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = BertConfig.from_json_file(args.bert_config_file)
if args.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length {} because the BERT model was only trained up to sequence length {}".format(
args.max_seq_length, bert_config.max_position_embeddings))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
save_path = os.path.join(args.output_dir, 'state_dict.pkl')
save_path
###Output
Output directory (../outputs/poetry_uncased_5_tied_mlm/) already exists and is not empty.
###Markdown
Load data
###Code
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
decoder = {v:k for k,v in tokenizer.wordpiece_tokenizer.vocab.items()}
processors = {
"lm": LMProcessor,
}
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name](tokenizer=tokenizer)
label_list = processor.get_labels()
###Output
_____no_output_____
###Markdown
Load model
###Code
model = BertForMaskedLanguageModelling(bert_config)
if args.init_checkpoint is not None:
model.bert.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'))
if os.path.isfile(save_path):
model.load_state_dict(torch.load(save_path, map_location='cpu'))
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
model
###Output
_____no_output_____
###Markdown
Generate here
###Code
text="""
"Roses are red, my love
Violets are blue
Sugar is sweet, my love
But not as sweet as you"
We dated through high school
And when the big day came
I wrote into your book
Next to my name
"Roses are red, my love
Violets are blue
Sugar is sweet, my love
But not as sweet as you" (as sweet as you)
Then I went far away
And you found someone new
I read your letter, dear
And I wrote back to you
"Roses are red, my love
Violets are blue
Sugar is sweet, my love
Good luck, may god bless you" (may god bless you)
Is that your little girl?
She looks a lot like you
Someday some boy will write
In her book, too
"Roses are red, my love
Violets are blue
Sugar is sweet, my love
But not as sweet as you" (roses are red)
"""
# (blue changes are removed words, red are added words, where the darkness is the models confidence)
poem=improve_words_recursive(
text,
processor,
tokenizer,
model,
device=device,
max_seq_length=150, # how long the block of text it, the model is best on 150 words. Best not to change this
debug=1, # how often to print the blue and red debug text
# You can try tweaking these to get more or less stability
ITERATIVE_MASK_FRAC=0.06, # fraction of words to replace each iteration
iterations=60, # At each iterations replaces some (ITERATIVE_MASK_FRAC) words. To low and the original words remain. 10-100 seem ok.
T=1, # Temperature - Higher gives more random, but less stable output. Default=1, ranges 0.1-inf.
)
poem
###Output
_____no_output_____
|
docs/notebooks/api/build_integration.ipynb
|
###Markdown
Building Nodes and Weights for Integration Example
###Code
import pyblp
pyblp.__version__
###Output
_____no_output_____
###Markdown
In this example, we'll build nodes and weights for integration over agent choice probabilities according to a :class:`Integration` configuration. We'll construct a sparse grid of nodes and weights according to a level-5 Gauss-Hermite quadrature rule.
###Code
integration = pyblp.Integration('grid', 5)
integration
###Output
_____no_output_____
###Markdown
Usually, this configuration should be passed directly to :class:`Problem`, which will create a sparse grid of dimension $K_2$, the number of demand-side nonlinear product characteristics. Alternatively, we can build the sparse grid ourselves and pass the constructed agent data to :class:`Problem`, possibly after modifying the nodes and weights. If we want to allow agents to have heterogeneous tastes over 2 product characteristics, we'll need a grid of dimension 2.
###Code
agent_data = pyblp.build_integration(integration, 2)
agent_data.nodes.shape
agent_data.weights.shape
###Output
_____no_output_____
###Markdown
Building Nodes and Weights for Integration Example
###Code
import pyblp
pyblp.__version__
###Output
_____no_output_____
###Markdown
In this example, we'll build nodes and weights for integration over agent choice probabilities according to a :class:`Integration` configuration. We'll construct a sparse grid of nodes and weights according to a level-5 Gauss-Hermite quadrature rule.
###Code
integration = pyblp.Integration('grid', 5)
integration
###Output
_____no_output_____
###Markdown
Usually, this configuration should be passed directly to :class:`Problem`, which will create a sparse grid of dimension $K_2$, the number of demand-side nonlinear product characteristics. Alternatively, we can build the sparse grid ourselves and pass the constructed agent data to :class:`Problem`, possibly after modifying the nodes and weights. If we want to allow agents to have heterogeneous tastes over 2 product characteristics, we'll need a grid of dimension 2.
###Code
agent_data = pyblp.build_integration(integration, 2)
agent_data.nodes.shape
agent_data.weights.shape
###Output
_____no_output_____
###Markdown
Building Nodes and Weights for Integration Example
###Code
import pyblp
pyblp.__version__
###Output
_____no_output_____
###Markdown
In this example, we'll build nodes and weights for integration over agent choice probabilities according to a :class:`Integration` configuration. We'll construct a sparse grid of nodes and weights according to a level-5 Gauss-Hermite quadrature rule.
###Code
integration = pyblp.Integration('grid', 5)
integration
###Output
_____no_output_____
###Markdown
Usually, this configuration should be passed directly to :class:`Problem`, which will create a sparse grid of dimension $K_2$, the number of demand-side nonlinear product characteristics. Alternatively, we can build the sparse grid ourselves and pass the constructed agent data to :class:`Problem`, possibly after modifying the nodes and weights. If we want to allow agents to have heterogeneous tastes over 2 product characteristics, we'll need a grid of dimension 2.
###Code
agent_data = pyblp.build_integration(integration, 2)
agent_data.nodes.shape
agent_data.weights.shape
###Output
_____no_output_____
###Markdown
Building Nodes and Weights for Integration Example
###Code
import pyblp
pyblp.__version__
###Output
_____no_output_____
###Markdown
In this example, we'll build nodes and weights for integration over agent choice probabilities according to a :class:`Integration` configuration. We'll construct a sparse grid of nodes and weights according to a level-5 Gauss-Hermite quadrature rule.
###Code
integration = pyblp.Integration('grid', 5)
integration
###Output
_____no_output_____
###Markdown
Usually, this configuration should be passed directly to :class:`Problem`, which will create a sparse grid of dimension $K_2$, the number of demand-side nonlinear product characteristics. Alternatively, we can build the sparse grid ourselves and pass the constructed agent data to :class:`Problem`, possibly after modifying the nodes and weights. If we want to allow agents to have heterogeneous tastes over 2 product characteristics, we'll need a grid of dimension 2.
###Code
agent_data = pyblp.build_integration(integration, 2)
agent_data.nodes.shape
agent_data.weights.shape
###Output
_____no_output_____
###Markdown
Building Nodes and Weights for Integration Example
###Code
import pyblp
pyblp.__version__
###Output
_____no_output_____
###Markdown
In this example, we'll build nodes and weights for integration over agent choice probabilities according to a :class:`Integration` configuration. We'll construct a sparse grid of nodes and weights according to a level-5 Gauss-Hermite quadrature rule.
###Code
integration = pyblp.Integration('grid', 5)
integration
###Output
_____no_output_____
###Markdown
Usually, this configuration should be passed directly to :class:`Problem`, which will create a sparse grid of dimension $K_2$, the number of demand-side nonlinear product characteristics. Alternatively, we can build the sparse grid ourselves and pass the constructed agent data to :class:`Problem`, possibly after modifying the nodes and weights. If we want to allow agents to have heterogeneous tastes over 2 product characteristics, we'll need a grid of dimension 2.
###Code
agent_data = pyblp.build_integration(integration, 2)
agent_data.nodes.shape
agent_data.weights.shape
###Output
_____no_output_____
###Markdown
Building Nodes and Weights for Integration Example
###Code
import pyblp
pyblp.__version__
###Output
_____no_output_____
###Markdown
In this example, we'll build nodes and weights for integration over agent choice probabilities according to a :class:`Integration` configuration. We'll construct a sparse grid of nodes and weights according to a level-5 Gauss-Hermite quadrature rule.
###Code
integration = pyblp.Integration('grid', 5)
integration
###Output
_____no_output_____
###Markdown
Usually, this configuration should be passed directly to :class:`Problem`, which will create a sparse grid of dimension $K_2$, the number of nonlinear product characteristics. Alternatively, we can build the sparse grid ourselves and pass the constructed agent data to :class:`Problem`, possibly after modifying the nodes and weights. If we want to allow agents to have heterogeneous tastes over 2 product characteristics, we'll need a grid of dimension 2.
###Code
agent_data = pyblp.build_integration(integration, 2)
agent_data.nodes.shape
agent_data.weights.shape
###Output
_____no_output_____
###Markdown
Building Nodes and Weights for Integration Example
###Code
import pyblp
pyblp.__version__
###Output
_____no_output_____
###Markdown
In this example, we'll build nodes and weights for integration over agent choice probabilities according to a :class:`Integration` configuration. We'll construct a sparse grid of nodes and weights according to a level-5 Gauss-Hermite quadrature rule.
###Code
integration = pyblp.Integration('grid', 5)
integration
###Output
_____no_output_____
###Markdown
Usually, this configuration should be passed directly to :class:`Problem`, which will create a sparse grid of dimension $K_2$, the number of nonlinear product characteristics. Alternatively, we can build the sparse grid ourselves and pass the constructed agent data to :class:`Problem`, possibly after modifying the nodes and weights. If we want to allow agents to have heterogeneous tastes over 2 product characteristics, we'll need a grid of dimension 2.
###Code
agent_data = pyblp.build_integration(integration, 2)
agent_data.nodes.shape
agent_data.weights.shape
###Output
_____no_output_____
|
Pandas_Text_Analysis/2_Clean_Text/2_String_Methods.ipynb
|
###Markdown
Python String Formatting Methods
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
String Methods Count
###Code
s = pd.Series(['a', 'b', 'c', 'a', None, 'b', 'aa', 'c'])
s
# Count of string
s.str.count('a')
# dropping null values
s.dropna().str.count("a") # Note float changes to int
###Output
_____no_output_____
###Markdown
This changes with Pandas 1.0 where 'Object' is changed to 'String' There both the outputs are int64
###Code
s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'],
dtype="object")
s
#Lower case
s.str.lower()
#Upper
s.str.upper()
#Length
s.str.len()
###Output
_____no_output_____
###Markdown
Working with Dataframes
###Code
df = pd.DataFrame(np.random.randn(3, 2),
columns=[' Column A ', ' Column B '], index=range(3))
df
# Dataframe Column
df.columns.str.strip()
df.columns.str.lower()
#Renaming columns
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_')
df
###Output
_____no_output_____
###Markdown
Splitting and replacing strings
###Code
s2 = pd.Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
s2
#Splitting on '_'
s2.str.split('_')
###Output
_____no_output_____
###Markdown
Elements in the split lists can be accessed using get or [] notation: Get method
###Code
# selecting elements at index 1
s2.str.split('_').str.get(1)
# selecting elements at index 2
s2.str.split('_').str[2]
###Output
_____no_output_____
###Markdown
Expand It is easy to expand this to return a DataFrame using expand.
###Code
s2.str.split('_', expand = True)
###Output
_____no_output_____
###Markdown
It is also possible to limit the number of splits:
###Code
s2.str.split('_', expand = True, n = 1)
###Output
_____no_output_____
###Markdown
R split rsplit is similar to split except it works in the reverse direction, i.e., from the end of the string to the beginning of the string:
###Code
s2.str.rsplit('_', expand=True, n=1)
###Output
_____no_output_____
###Markdown
Replace replace by default replaces regular expressions:
###Code
s3 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca','', np.nan, 'CABA', 'dog', 'cat'])
s3
#replace all 'a' or 'dog' by 'XX-XX '
s3.str.replace('a|dog', 'XX-XX ', case=False)
###Output
_____no_output_____
###Markdown
If you do want literal replacement of a string (equivalent to str.replace()), you can set the optional regex parameter to False
###Code
s3.str.replace('a|dog', 'XX-XX ', case=False, regex = False)
###Output
_____no_output_____
###Markdown
Concatenation Concatenating a single Series into a string
###Code
s = pd.Series(['a', 'b', 'c', 'd'])
s
###Output
_____no_output_____
###Markdown
Cat method
###Code
s.str.cat(sep='')
s.str.cat(sep=',')
###Output
_____no_output_____
###Markdown
By default, missing values are ignored. Using na_rep, they can be given a representation:
###Code
t = pd.Series(['a', 'b', np.nan, 'd'])
t
t.str.cat(sep=',')
t.str.cat(sep = '',na_rep= '-' )
###Output
_____no_output_____
###Markdown
Concatenating a Series and something list-like into a Series
###Code
s
s.str.cat(['A', 'B', 'C', 'D'])
###Output
_____no_output_____
###Markdown
Indexing with .str You can use [] notation to directly index by position locations. If you index past the end of the string, the result will be a NaN.
###Code
s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan,
'CABA', 'dog', 'cat'])
s
s.str[0]
s.str[1]
###Output
_____no_output_____
###Markdown
Extracting substrings
###Code
#DataFrame is returned expand = True
pd.Series(['a1', 'b2', 'c3']).str.extract(r'[ab](\d)', expand=True)
# series is returned expand = False
pd.Series(['a1', 'b2', 'c3']).str.extract(r'[ab](\d)', expand=False)
###Output
_____no_output_____
###Markdown
Testing for Strings that match or contain a pattern
###Code
pattern = r'[0-9][a-z]'
pd.Series(['1', '2', '3a', '3b', '03c']).str.contains(pattern)
pd.Series(['1', '2', '3a', '3b', '03c']).str.match(pattern)
###Output
_____no_output_____
###Markdown
The distinction between match and contains is strictness: match relies on strict re.match, while contains relies on re.search. Methods like match, contains, startswith, and endswith take an extra na argument so missing values can be considered True or False:
###Code
s4 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
s4
s4.str.contains('A', na=False)
###Output
_____no_output_____
###Markdown
Creating indicator variables You can extract dummy variables from string columns. For example if they are separated by a '|':
###Code
s = pd.Series(['a', 'a|b', np.nan, 'a|c'])
s
s.str.get_dummies(sep= '|')
###Output
_____no_output_____
|
data/22-Logistic-Regression-in-sklearn.ipynb
|
###Markdown
ไฝฟ็จsklearnไธญ็้ป่พๅๅฝ
###Code
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
log_reg.score(X_train, y_train)
log_reg.score(X_test, y_test)
def plot_descision_boundary(model, axis):
x0, x1 = np.meshgrid(
np.linspace(axis[0], axis[1], int((axis[1] - axis[0]) * 100)).reshape(-1, 1),
np.linspace(axis[2], axis[3], int((axis[3] - axis[2]) * 100)).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = model.predict(X_new)
zz = y_predict.reshape(x0.shape)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#EF9A9A', '#FFF59D', '#90CAF9'])
plt.contourf(x0, x1, zz, 5, cmap=custom_cmap)
plot_descision_boundary(log_reg, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
plt.show()
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
def PolynomialLogisticRegression(degree):
return Pipeline([
('poly', PolynomialFeatures(degree=degree)),
('std_scaler', StandardScaler()),
('log_reg', LogisticRegression())
])
poly_log_reg = PolynomialLogisticRegression(degree=2)
poly_log_reg.fit(X, y)
poly_log_reg.score(X_train, y_train)
poly_log_reg.score(X_test, y_test)
plot_descision_boundary(poly_log_reg, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
plt.show()
###Output
_____no_output_____
|
One-Shot-Learning/One Shot Learning.ipynb
|
###Markdown
The code is better explained in this blog by Soren Boumahttps://sorenbouma.github.io/blog/oneshot/
###Code
from keras.layers import Input, Conv2D, Lambda, merge, Dense, Flatten,MaxPooling2D
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras import backend as K
from keras.optimizers import SGD,Adam
from keras.losses import binary_crossentropy
import numpy.random as rng
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.utils import shuffle
%matplotlib inline
def W_init(shape,name=None):
"""Initialize weights as in paper"""
values = rng.normal(loc=0,scale=1e-2,size=shape)
return K.variable(values,name=name)
#//TODO: figure out how to initialize layer biases in keras.
def b_init(shape,name=None):
"""Initialize bias as in paper"""
values=rng.normal(loc=0.5,scale=1e-2,size=shape)
return K.variable(values,name=name)
input_shape = (105, 105, 1)
left_input = Input(input_shape)
right_input = Input(input_shape)
#build convnet to use in each siamese 'leg'
convnet = Sequential()
convnet.add(Conv2D(64,(10,10),activation='relu',input_shape=input_shape,
kernel_initializer=W_init,kernel_regularizer=l2(2e-4)))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(128,(7,7),activation='relu',
kernel_regularizer=l2(2e-4),kernel_initializer=W_init,bias_initializer=b_init))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(128,(4,4),activation='relu',kernel_initializer=W_init,kernel_regularizer=l2(2e-4),bias_initializer=b_init))
convnet.add(MaxPooling2D())
convnet.add(Conv2D(256,(4,4),activation='relu',kernel_initializer=W_init,kernel_regularizer=l2(2e-4),bias_initializer=b_init))
convnet.add(Flatten())
convnet.add(Dense(4096,activation="sigmoid",kernel_regularizer=l2(1e-3),kernel_initializer=W_init,bias_initializer=b_init))
#call the convnet Sequential model on each of the input tensors so params will be shared
encoded_l = convnet(left_input)
encoded_r = convnet(right_input)
#layer to merge two encoded inputs with the l1 distance between them
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
#call this layer on list of two input tensors.
L1_distance = L1_layer([encoded_l, encoded_r])
prediction = Dense(1,activation='sigmoid',bias_initializer=b_init)(L1_distance)
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
optimizer = Adam(0.00006)
#//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking
siamese_net.compile(loss="binary_crossentropy",optimizer=optimizer)
siamese_net.count_params()
###Output
Using TensorFlow backend.
###Markdown
Data The data is pickled as an N_classes x n_examples x width x height array, and there is an accompanyng dictionary to specify which indexes belong to which languages.
###Code
PATH = "" #CHANGE THIS - path where the pickled data is stored
with open(os.path.join(PATH,"train.pickle"), "rb") as f:
(X,c) = pickle.load(f)
with open(os.path.join(PATH, "val.pickle"), "rb") as f:
(Xval,cval) = pickle.load(f)
print("training alphabets")
print(c.keys())
print("validation alphabets:")
print(cval.keys())
class Siamese_Loader:
"""For loading batches and testing tasks to a siamese net"""
def __init__(self, path, data_subsets = ["train", "val"]):
self.data = {}
self.categories = {}
self.info = {}
for name in data_subsets:
file_path = os.path.join(path, name + ".pickle")
print("loading data from {}".format(file_path))
with open(file_path,"rb") as f:
(X,c) = pickle.load(f)
self.data[name] = X
self.categories[name] = c
def get_batch(self,batch_size,s="train"):
"""Create batch of n pairs, half same class, half different class"""
X=self.data[s]
n_classes, n_examples, w, h = X.shape
#randomly sample several classes to use in the batch
categories = rng.choice(n_classes,size=(batch_size,),replace=False)
#initialize 2 empty arrays for the input image batch
pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]
#initialize vector for the targets, and make one half of it '1's, so 2nd half of batch has same class
targets=np.zeros((batch_size,))
targets[batch_size//2:] = 1
for i in range(batch_size):
category = categories[i]
idx_1 = rng.randint(0, n_examples)
pairs[0][i,:,:,:] = X[category, idx_1].reshape(w, h, 1)
idx_2 = rng.randint(0, n_examples)
#pick images of same class for 1st half, different for 2nd
if i >= batch_size // 2:
category_2 = category
else:
#add a random number to the category modulo n classes to ensure 2nd image has
# ..different category
category_2 = (category + rng.randint(1,n_classes)) % n_classes
pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(w, h,1)
return pairs, targets
def generate(self, batch_size, s="train"):
"""a generator for batches, so model.fit_generator can be used. """
while True:
pairs, targets = self.get_batch(batch_size,s)
yield (pairs, targets)
def make_oneshot_task(self,N,s="val",language=None):
"""Create pairs of test image, support set for testing N way one-shot learning. """
X=self.data[s]
n_classes, n_examples, w, h = X.shape
indices = rng.randint(0,n_examples,size=(N,))
if language is not None:
low, high = self.categories[s][language]
if N > high - low:
raise ValueError("This language ({}) has less than {} letters".format(language, N))
categories = rng.choice(range(low,high),size=(N,),replace=False)
else:#if no language specified just pick a bunch of random letters
categories = rng.choice(range(n_classes),size=(N,),replace=False)
true_category = categories[0]
ex1, ex2 = rng.choice(n_examples,replace=False,size=(2,))
test_image = np.asarray([X[true_category,ex1,:,:]]*N).reshape(N, w, h,1)
support_set = X[categories,indices,:,:]
support_set[0,:,:] = X[true_category,ex2]
support_set = support_set.reshape(N, w, h,1)
targets = np.zeros((N,))
targets[0] = 1
targets, test_image, support_set = shuffle(targets, test_image, support_set)
pairs = [test_image,support_set]
return pairs, targets
def test_oneshot(self,model,N,k,s="val",verbose=0):
"""Test average N way oneshot learning accuracy of a siamese neural net over k one-shot tasks"""
n_correct = 0
if verbose:
print("Evaluating model on {} random {} way one-shot learning tasks ...".format(k,N))
for i in range(k):
inputs, targets = self.make_oneshot_task(N,s)
probs = model.predict(inputs)
if np.argmax(probs) == np.argmax(targets):
n_correct+=1
percent_correct = (100.0*n_correct / k)
if verbose:
print("Got an average of {}% {} way one-shot learning accuracy".format(percent_correct,N))
return percent_correct
def train(self, model, epochs, verbosity):
model.fit_generator(self.generate(batch_size),
)
#Instantiate the class
loader = Siamese_Loader(PATH)
def concat_images(X):
"""Concatenates a bunch of images into a big matrix for plotting purposes."""
nc,h,w,_ = X.shape
X = X.reshape(nc,h,w)
n = np.ceil(np.sqrt(nc)).astype("int8")
img = np.zeros((n*w,n*h))
x = 0
y = 0
for example in range(nc):
img[x*w:(x+1)*w,y*h:(y+1)*h] = X[example]
y += 1
if y >= n:
y = 0
x += 1
return img
def plot_oneshot_task(pairs):
"""Takes a one-shot task given to a siamese net and """
fig,(ax1,ax2) = plt.subplots(2)
ax1.matshow(pairs[0][0].reshape(105,105),cmap='gray')
img = concat_images(pairs[1])
ax1.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax2.matshow(img,cmap='gray')
plt.xticks([])
plt.yticks([])
plt.show()
#example of a one-shot learning task
pairs, targets = loader.make_oneshot_task(20,"train","Japanese_(katakana)")
plot_oneshot_task(pairs)
#Training loop
print("!")
evaluate_every = 1 # interval for evaluating on one-shot tasks
loss_every=50 # interval for printing loss (iterations)
batch_size = 32
n_iter = 2
N_way = 20 # how many classes for testing one-shot tasks>
n_val = 250 #how mahy one-shot tasks to validate on?
best = -1
weights_path = os.path.join(PATH, "weights")
print("training")
for i in range(1, n_iter):
(inputs,targets)=loader.get_batch(batch_size)
loss=siamese_net.train_on_batch(inputs,targets)
print(loss)
if i % evaluate_every == 0:
print("evaluating")
val_acc = loader.test_oneshot(siamese_net,N_way,n_val,verbose=True)
if val_acc >= best:
print("saving")
siamese_net.save(weights_path)
best=val_acc
if i % loss_every == 0:
print("iteration {}, training loss: {:.2f},".format(i,loss))
def nearest_neighbour_correct(pairs,targets):
"""returns 1 if nearest neighbour gets the correct answer for a one-shot task
given by (pairs, targets)"""
L2_distances = np.zeros_like(targets)
for i in range(len(targets)):
L2_distances[i] = np.sum(np.sqrt(pairs[0][i]**2 - pairs[1][i]**2))
if np.argmin(L2_distances) == np.argmax(targets):
return 1
return 0
def test_nn_accuracy(N_ways,n_trials,loader):
"""Returns accuracy of one shot """
print("Evaluating nearest neighbour on {} unique {} way one-shot learning tasks ...".format(n_trials,N_ways))
n_right = 0
for i in range(n_trials):
pairs,targets = loader.make_oneshot_task(N_ways,"val")
correct = nearest_neighbour_correct(pairs,targets)
n_right += correct
return 100.0 * n_right / n_trials
ways = np.arange(1, 9, 2)
resume = False
val_accs, train_accs,nn_accs = [], [], []
trials = 450
for N in ways:
val_accs.append(loader.test_oneshot(siamese_net, N,trials, "val", verbose=True))
train_accs.append(loader.test_oneshot(siamese_net, N,trials, "train", verbose=True))
nn_accs.append(test_nn_accuracy(N,trials, loader))
#plot the accuracy vs num categories for each
plt.plot(ways, val_accs, "m")
plt.plot(ways, train_accs, "y")
plt.plot(ways, nn_accs, "c")
plt.plot(ways,100.0/ways,"r")
plt.show()
fig,ax = plt.subplots(1)
ax.plot(ways,val_accs,"m",label="Siamese(val set)")
ax.plot(ways,train_accs,"y",label="Siamese(train set)")
plt.plot(ways,nn_accs,label="Nearest neighbour")
ax.plot(ways,100.0/ways,"g",label="Random guessing")
plt.xlabel("Number of possible classes in one-shot tasks")
plt.ylabel("% Accuracy")
plt.title("Omiglot One-Shot Learning Performance of a Siamese Network")
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
inputs,targets = loader.make_oneshot_task(20,"val")
plt.show()
print(inputs[0].shape)
plot_oneshot_task(inputs)
p=siamese_net.predict(inputs)
print(p)
a=test_nn_accuracy(3,500,loader)
print(a)
###Output
Evaluating nearest neighbour on 500 unique 3 way one-shot learning tasks ...
60.4
|
ml/testing-debugging/testing-debugging-classification.ipynb
|
###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Case Study: Debugging in Classification This Colab quickly demonstrates a few concepts related to debugging classification models. You will explore potential problems in implementing these tasks:* Calculating loss for classification problems.* Optimizing your model* Applying regularization.* Following best practices in development and debugging.Please **make a copy** of this Colab before running it. Click on *File*, and then click on *Save a copy in Drive*. Load MNIST Data MNIST is a dataset of images of the numbers 0 to 9. The problem is to classify the images as numbers. Setup libraries and load the MNIST dataset. Display the first few rows to verify that the data loaded. You'll explore the data format after the data loads.
###Code
# Reset environment for a new run
% reset -f
# Load Libraries
from os.path import join # for joining file pathnames
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import unittest
import sys
# Set Pandas display options
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
# Load data
mnistDf_backup = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Shuffle data
mnistDf_backup.sample(frac=1).reset_index(drop=True)
# Use the first 5000 examples for faster prototyping
mnistDf = mnistDf_backup[0:5000]
mnistDf.head()
###Output
_____no_output_____
###Markdown
Understanding the Data Format Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28ร28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. Modify the form below and run the code to view data for a given example.
###Code
showExample = 1000 # @param
digitData = np.reshape(mnistDf.iloc[showExample,0:-1],[28,28])
print digitData
###Output
_____no_output_____
###Markdown
Do you have Imbalanced Classes? As we read in the course, imbalanced classes make classification harder. Let's look at the distribution of classes. Do you have imbalanced classes?
###Code
%hide_result # hides result of cell computation
# Calculate the number of classes
numClasses = mnistDf.iloc[:,0].unique().shape[0]
# Plot histogram of class distribution
plt.hist(mnistDf.iloc[:,0], bins=range(numClasses+1))
plt.xticks(range(numClasses+1))
###Output
_____no_output_____
###Markdown
The preceding graph shows that the 10 classes are roughly equally represented. Shuffle and Split Dataset As part of [Data Debugging](https://developers.google.com/machine-learning/testing-debugging/common/data-errors) best practices, ensure your splits are statistically equivalent by shuffling your data to remove any pre-existing order.
###Code
# Shuffle data
mnistDf = mnistDf.sample(frac=1).reset_index(drop=True)
# Split dataset into data and labels
mnistData = mnistDf.iloc[:,1:-1].copy(deep=True)
mnistLabels = mnistDf.iloc[:,0].copy(deep=True)
###Output
_____no_output_____
###Markdown
Process Data Scale the data values to `[0,1]` since the values are bounded to `[0,255]` and do not contain outliers. Then check that the scaled data values are as expected by generating summary statistics using the `DataFrame.describe()` function.Run the following cell to scale data and generate statistics. This cell takes a few minutes to run.
###Code
def minMaxScaler(arr):
min = np.min(arr)
max = np.max(arr)
arr = (arr-min)/max
return arr
for featureIdx in range(mnistData.shape[1]):
mnistData.iloc[:,featureIdx] = minMaxScaler(mnistData.iloc[:,featureIdx])
mnistData.describe()
###Output
_____no_output_____
###Markdown
Oh no! Some of your features are all `NaN`. What do you think the cause is? Hint: While NaNs have many causes, in this case, the NaN values are caused by the properties of your data. Use the next code cell to explore your data. Then check the next cell for the solution. Try to find the solution yourself. Debugging `NaN`s and exploring your data are important skills.
###Code
# First reload your data
mnistData = mnistDf.iloc[:,1:-1].copy(deep=True)
# Explore your data
###Output
_____no_output_____
###Markdown
Solution Start exploring your data by generating a high-level summary using `Dataframe.describe()`.
###Code
mnistData.describe()
###Output
_____no_output_____
###Markdown
Because some of the feature columns are all zeros, the scaling function divided by 0 (because `np.max` returns 0). The division by 0 resulted in NaN values. This result shows you how easily NaNs can arise in engineered data. The `describe` function will not detect every occurrence of NaN (or None). Instead, use the command `DataFrame.isnull().any()`.*Note*: Given the maximum value of the feature data is 255, you could simply divide the input by 255 instead of using min-max scaling, and avoid introducing NaNs. However, this example purposely uses min-max scaling to show how NaNs can appear in engineered data.Now let's try scaling the data again.
###Code
# Redefine the scaling function to check for zeros
def minMaxScaler(arr):
max = np.max(arr)
if(max!=0): # avoid /0
min = np.min(arr)
arr = (arr-min)/max
return arr
# Reload data
mnistData = mnistDf.iloc[:,1:-1].copy(deep=True)
# Scale data
for featureIdx in range(mnistData.shape[1]):
mnistData.iloc[:,featureIdx] = minMaxScaler(mnistData.iloc[:,featureIdx])
###Output
_____no_output_____
###Markdown
You should follow best practice and prevent this bug from recurring by writing a unit test to check for not having `NaN` values in your engineered data. Remove All-Zero Features? You might think that getting NaNs and discovering that some features were all-zero is good luck because those features can be discarded. However, your training data and validation data might have different all-zero features. Since you should not use validation data to make modeling decisions, you cannot remove only those features that are all-zero in both. Furthermore, data in the future might have different characteristics. There are pros and cons in either case. This Colab keeps the features since reducing the feature set is not a concern. Establish Baseline Following development best practices, you should establish a baseline. The simplest baseline is predicting the most common class. You saw that the most common class is 1. Let's check the accuracy when always predicting 1.
###Code
np.sum(mnistLabels==1)*1.0/mnistLabels.shape[0]*100
###Output
_____no_output_____
###Markdown
Your baseline accuracy is about 11%. Should be easy to beat, right? Train a Linear Model Let's start nice and easy with a linear model. All we need is an accuracy > 11%.First, let's define a function to plot our loss and accuracy curves. The function will also print the final loss and accuracy. Instead of using `verbose=1`, you can call the function.
###Code
def showClassificationResults(trainHistory):
"""Function to:
* Print final loss & accuracy.
* Plot loss & accuracy curves.
Args:
trainHistory: object returned by model.fit
"""
# Print final loss and accuracy
print("Final training loss: " + str(trainHistory.history['loss'][-1]))
print("Final validation loss: " + str(trainHistory.history['val_loss'][-1]))
print("Final training accuracy: " + str(trainHistory.history['acc'][-1]))
print("Final validation accuracy: " + str(trainHistory.history['val_acc'][-1]))
# Plot loss and accuracy curves
f = plt.figure(figsize=(10,4))
axLoss = f.add_subplot(121)
axAcc = f.add_subplot(122)
axLoss.plot(trainHistory.history['loss'])
axLoss.plot(trainHistory.history['val_loss'])
axLoss.legend(['Training loss', 'Validation loss'], loc='best')
axLoss.set_xlabel('Training epochs')
axLoss.set_ylabel('Loss')
axAcc.plot(trainHistory.history['acc'])
axAcc.plot(trainHistory.history['val_acc'])
axAcc.legend(['Training accuracy', 'Validation accuracy'], loc='best')
axAcc.set_xlabel('Training epochs')
axAcc.set_ylabel('Accuracy')
###Output
_____no_output_____
###Markdown
Now train a linear model with an output layer and a hidden layer.
###Code
model = None
# Define
model = keras.Sequential()
model.add(keras.layers.Dense(mnistData.shape[1],
activation='linear',
input_dim=mnistData.shape[1]))
model.add(keras.layers.Dense(1, activation='linear'))
# Compile
model.compile(optimizer="adam", loss='mse', metrics=['accuracy'])
# Train
trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100,
validation_split=0.2, verbose=0)
# Plot
showClassificationResults(trainHistory)
###Output
_____no_output_____
###Markdown
Wow, that accuracy is terrible! What could the cause be?Hint: You followed the same procedure as for the previous regression problem. Do you need an adaptation for a classification problem? Experiment with the code above or skip to the solution below. Solution In regression, the last layer uses a linear activation function. In classification, the last layer cannot use a linear transform. Instead, one option is a softmax transform. Furthermore, in regression, the loss is calculated using MSE while in classification, loss is calculated using crossentropy. Before running your model, if you wrote a test to validate the output values, your test would detect the anomalous output. You'll look at such a test later. Move onto the next section to fix the loss calculation. Fixing Loss Calculation Since your labels are integers instead of one-hot encodings, use `sparse_categorical_crossentropy` instead of `categorical_crossentropy` so that you avoid converting the integers to one-hot encoding. Retrain the model with the new loss calculation by running the following cell. Look through the code to note the changes. What do you think of the result?
###Code
model = None
# Define
model = keras.Sequential()
model.add(keras.layers.Dense(mnistData.shape[1], activation='linear',
input_dim = mnistData.shape[1]))
model.add(keras.layers.Dense(10, activation='softmax'))
# Compile
model.compile(optimizer="adam",
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train
trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100,
validation_split=0.1, verbose=0)
# Plot
showClassificationResults(trainHistory)
###Output
_____no_output_____
###Markdown
Your loss curves are much better. Your accuracy has improved too. You're on the right track. Train a Nonlinear Model Switch to a nonlinear model by modifying the code below to use relu activation functions instead of linear activation functions. Run the code. What do you observe?
###Code
model = None
# Define
model = keras.Sequential()
model.add(keras.layers.Dense(mnistData.shape[1], activation='', # use 'relu'
input_dim=mnistData.shape[1]))
model.add(keras.layers.Dense(10, activation='softmax'))
# Compile
model.compile(optimizer="adam", loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train
trainHistory = model.fit(mnistData, mnistLabels, epochs=20, batch_size=100,
validation_split=0.1, verbose=0)
# Plot
showClassificationResults(trainHistory)
###Output
_____no_output_____
###Markdown
The quality of the nonlinear model is significantly better than of the linear model. Progress! Move onto the next section. Adding a Second Layer Increasing the model's capacity significantly improved your results. Perhaps you can continue this strategy by adding a second relu layer. Run the following code cell to train the model with another relu layer.
###Code
model = None
# Define
model = keras.Sequential()
model.add(keras.layers.Dense(mnistData.shape[1], activation='relu',
input_dim = mnistData.shape[1]))
model.add(keras.layers.Dense(mnistData.shape[1], activation='relu'))
model.add(keras.layers.Dense(10,activation='softmax'))
# Compile
model.compile(optimizer="adam", loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train
trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100,
validation_split=0.1, verbose=0)
# Plot
showClassificationResults(trainHistory)
###Output
_____no_output_____
###Markdown
Guess what. Your previous model had training and validation accuracies of 100% and 95%. You can't do much better than that! So your new accuracy is about the same. How high can you push your accuracy? With this configuration the highest training and validation accuracies appear to be 100% and 96% respectively. Since the neural net returns similar accuracy with 1 or 2 layers, let's use the simpler model with 1 layer.Does your model begin to overfit the training data if you train for long enough? (Your model starts overfitting training data at the point when your validation loss starts increasing.) Check for Training/Validation Data Skew Our validation accuracy is a little worse than our training accuracy. While this result is always expected, you should check for typical errors. The commonest cause is having different distributions of data and labels in training and validation. Confirm that the distribution of classes in training and validation data is similar.
###Code
%hide_result # hides result of cell computation
f = plt.figure(figsize=(10,3))
ax = f.add_subplot(1,2,1)
plt.hist(mnistLabels[0:len(mnistLabels)*8/10], bins=range(numClasses+1))
plt.xticks(range(numClasses+1))
ax2 = f.add_subplot(1,2,2,)
plt.hist(mnistLabels[len(mnistLabels)*8/10:-1], bins=range(numClasses+1))
plt.xticks(range(numClasses+1))
###Output
_____no_output_____
###Markdown
Apply Dropout Regularization Dropout regularization is a common regularization method that removes a random selection of a fixed number of units in a network layer for a single gradient step. Typically, dropout will improve generalization at a dropout rate of between 10% and 50% of neurons. Try to reduce the divergence between training and validation loss by using dropout regularization with values between 0.1 and 0.5. Dropout does not improve the results in this case. However, at a dropout of 0.5, the difference in loss decreases, though both training and validation loss decrease in absolute terms.
###Code
from keras import regularizers
model = None
# Define lambda
dropoutLambda = 0.5 #@param
# Define model
model = keras.Sequential()
model.add(keras.layers.Dense(mnistData.shape[1],
input_dim=mnistData.shape[1],
activation='relu'))
model.add(keras.layers.Dropout(dropoutLambda,
noise_shape=(1, mnistData.shape[1])))
model.add(keras.layers.Dense(10, activation='softmax'))
# Compile
model.compile(optimizer = "adam",
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
# Train
trainHistory = model.fit(mnistData,
mnistLabels,
epochs=30,
batch_size=500,
validation_split=0.1,
verbose=0)
# Plot
showClassificationResults(trainHistory)
###Output
_____no_output_____
###Markdown
Sample results using dropout regularization after 30 epochs:Lambda | Training Loss | Validation Loss------- | ------------------------------------------------------0.1 | 0.99 | 0.950.2 | 0.99 | 0.950.3 | 0.99 | 0.950.5 | 0.97 | 0.94 Check Accuracy for Data Slices For classification problems, you should always check the metrics by class to ensure your model predicts well across all classes. Check accuracy on the 10 classes by running the next cell by using the function `sklearn.metrics.classification_report` from the scikit-learn library. In the output, the rows with indices 0 to 9 correspond to the classes for the labels 0 to 9. The columns "Precision", "Recall", and "[F1-Score](https://en.wikipedia.org/wiki/F1_score)" correspond to the respective classification metrics for each class. "Support" is the number of examples for the class in question. For example, for the label "4", when predicting on 464 examples labelled "4", the model has a precision of 0.98, a recall of 0.97, and a F1 score of 0.98.The classification metrics are very uniform across all classes, which is perfect. In your classification problem, in case any metric is lower for a class, then you should investigate why the model has lower-quality predictions for that class.
###Code
from sklearn.metrics import classification_report
mnistPred = model.predict_classes(x = mnistData)
print(classification_report(mnistLabels, mnistPred))
###Output
_____no_output_____
###Markdown
Testing for Anomalous Values In the section [Train a Linear Model](https://colab.corp.google.com/google_src/cloud/karangill/mlcc/google3/engedu/ml/capitalg/colab/testing_debugging_classification.ipynbscrollTo=B6AOgLcC5nwp), you debugged an incorrect calculation of loss. Before running your model, if you wrote a test to validate the output values, your test would detect the anomalous output. For example, you could test whether the distribution of predicted labels on the training dataset is similar to the actual distribution of training labels. A simple statistical implementation of this concept is to compare the standard deviation and mean of the predicted and actual labels.First, check the standard deviation and mean of the actual labels.
###Code
print("Mean of actual labels: " + str(np.mean(mnistLabels)))
print("Standard deviation of actual labels: " + str(np.std(mnistLabels)))
###Output
_____no_output_____
###Markdown
Write tests to check if the mean and standard deviation of the predicted labels falls within the expected range. The expected range defined in the tests below is somewhat arbitrary. In practice, you will tune the range thresholds to accommodate natural variation in predictions.
###Code
class mlTest(unittest.TestCase):
'''Class to test statistics of predicted output on training data against
statistics of labels to validate that model predictions are in the]
expected range.
'''
def testStd(self):
y = model.predict(mnistData)
yStd = np.std(y)
yStdActual = np.std(mnistLabels)
deltaStd = 0.05
errorMsg = 'Std. dev. of predicted values ' + str(yStd) + \
' and actual values ' + str(yStdActual) + \
' differs by >' + str(deltaStd) + '.'
self.assertAlmostEqual(yStd, yStdActual, delta=deltaStd, msg=errorMsg)
def testMean(self):
y = model.predict(mnistData)
yMean = np.mean(y)
yMeanActual = np.mean(mnistLabels)
deltaMean = 0.05
errorMsg = 'Mean of predicted values ' + str(yMean) + \
' and actual values ' + str(yMeanActual) + \
' differs by >' + str(deltaMean) + '.'
self.assertAlmostEqual(yMean, yMeanActual, delta=deltaMean, msg=errorMsg)
###Output
_____no_output_____
###Markdown
Run the following cell to train a model with the wrong loss calculation and execute the tests. The tests should fail.
###Code
#@title Train model and run tests
model = None
# Define
model = keras.Sequential()
model.add(keras.layers.Dense(mnistData.shape[1],
activation='linear',
input_dim=mnistData.shape[1]))
model.add(keras.layers.Dense(1, activation='linear'))
# Compile
model.compile(optimizer="adam", loss='mse', metrics=['accuracy'])
# Train
trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100,
validation_split=0.1, verbose=0)
suite = unittest.TestLoader().loadTestsFromTestCase(mlTest)
unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
###Output
_____no_output_____
###Markdown
Since the tests fail, check the data distribution of predicted labels for anomalies.
###Code
yPred = model.predict(mnistData)
plt.hist(yPred, bins=range(11))
###Output
_____no_output_____
|
jupyter_french/topic09_time_series/topic9_part2_facebook_prophet-fr_def.ipynb
|
###Markdown
ย ย ย ย [mlcourse.ai](https://mlcourse.ai) - Open Machine Learning CourseAuteur: [Egor Polusmak](https://www.linkedin.com/in/egor-polusmak/). Traduit et รฉditรฉ par [Yuanyuan Pao](https://www.linkedin.com/in/yuanyuanpao/) et [Ousmane Cissรฉ](https://fr.linkedin.com/in/ousmane-cisse). Ce matรฉriel est soumis aux termes et conditions de la licence [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). L'utilisation gratuite est autorisรฉe ร des fins non commerciales. Topic 9. Analyse des sรฉries temporelles en Python Partie 2. Prรฉdire l'avenir avec Facebook Prophet La prรฉvision de sรฉries chronologiques trouve une large application dans l'analyse de donnรฉes. Ce ne sont que quelques-unes des prรฉvisions imaginables des tendances futures qui pourraient รชtre utiles:- Le nombre de serveurs dont un service en ligne aura besoin l'annรฉe prochaine.- La demande d'un produit d'รฉpicerie dans un supermarchรฉ un jour donnรฉ.- Le cours de clรดture de demain d'un actif financier nรฉgociable.Pour un autre exemple, nous pouvons faire une prรฉdiction des performances d'une รฉquipe, puis l'utiliser comme rรฉfรฉrence: d'abord pour fixer des objectifs pour l'รฉquipe, puis pour mesurer les performances rรฉelles de l'รฉquipe par rapport ร la rรฉfรฉrence.Il existe plusieurs mรฉthodes diffรฉrentes pour prรฉdire les tendances futures, par exemple, [ARIMA](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average), [ARCH](https://en.wikipedia.org/wiki/Autoregressive_conditional_heteroskedasticity), [modรจles rรฉgressifs](https://en.wikipedia.org/wiki/Autoregressive_model), [rรฉseaux de neurones](https://medium.com/machine-learning-world/neural-networks-for-algorithmic-trading-1-2-correct-time-series-forecasting-backtesting-9776bfd9e589).Dans cet article, nous examinerons [Prophet](https://facebook.github.io/prophet/), une bibliothรจque de prรฉvisions de sรฉries chronologiques publiรฉe par Facebook et open source, le 23 fรฉvrier 2017. Nous l'essayerons รฉgalement dans le problรจme de prรฉdiction du nombre quotidien de publications sur Medium. Plan de l'article1. Introduction2. Le modรจle de prรฉvision de Prophet3. Entraรฎnez-vous avec le Prophetย ย ย ย * 3.1 Installation en Python * 3.2 Ensemble de donnรฉesย ย ย ย * 3.3 Analyse visuelle exploratoire * 3.4 Faire une prรฉvisionย ย ย ย * 3.5 รvaluation de la qualitรฉ des prรฉvisions * 3.6 Visualisation4. Transformation Box-Cox5. Rรฉsumรฉ6. Rรฉfรฉrences 1. IntroductionSelon [l'article](https://research.fb.com/prophet-forecasting-at-scale/) sur Facebook Research, Prophet a รฉtรฉ initialement dรฉveloppรฉ dans le but de crรฉer des prรฉvisions commerciales de haute qualitรฉ. Cette bibliothรจque tente de rรฉsoudre les difficultรฉs suivantes, communes ร de nombreuses sรฉries chronologiques commerciales:- Effets saisonniers causรฉs par le comportement humain: cycles hebdomadaires, mensuels et annuels, creux et pics les jours fรฉriรฉs.- Changements de tendance dus aux nouveaux produits et aux รฉvรฉnements du marchรฉ.- Valeurs aberrantes.Les auteurs affirment que, mรชme avec les paramรจtres par dรฉfaut, dans de nombreux cas, leur bibliothรจque produit des prรฉvisions aussi prรฉcises que celles fournies par des analystes expรฉrimentรฉs.De plus, Prophet dispose d'un certain nombre de personnalisations intuitives et facilement interprรฉtables qui permettent d'amรฉliorer progressivement la qualitรฉ du modรจle de prรฉvision. Ce qui est particuliรจrement important, ces paramรจtres sont tout ร fait comprรฉhensibles mรชme pour les non-experts en analyse de sรฉries chronologiques, qui est un domaine de la science des donnรฉes nรฉcessitant certaines compรฉtences et expรฉrience.Soit dit en passant, l'article d'origine s'intitule ยซPrรฉvisions ร grande รฉchelleยป, mais il ne s'agit pas de l'รฉchelle au sens ยซhabituelยป, qui traite des problรจmes de calcul et d'infrastructure d'un grand nombre de programmes de travail. Selon les auteurs, Prophet devrait bien รฉvoluer dans les 3 domaines suivants:- Accessibilitรฉ ร un large public d'analystes, รฉventuellement sans expertise approfondie des sรฉries chronologiques.- Applicabilitรฉ ร un large รฉventail de problรจmes de prรฉvision distincts.- Estimation automatisรฉe des performances d'un grand nombre de prรฉvisions, y compris la signalisation des problรจmes potentiels pour leur inspection ultรฉrieure par l'analyste. 2. Le modรจle de prรฉvision ProphetMaintenant, regardons de plus prรจs comment fonctionne Prophet. Dans son essence, cette bibliothรจque utilise le [modรจle de rรฉgression additive](https://en.wikipedia.org/wiki/Additive_model) $y(t)$ comprenant les composants suivants:$$y(t) = g(t) + s(t) + h(t) + \epsilon_{t},$$oรน:* La tendance $g(t)$ modรฉlise les changements non pรฉriodiques.* La saisonnalitรฉ $s(t)$ reprรฉsente des changements pรฉriodiques.* La composante vacances $h(t)$ fournit des informations sur les vacances et les รฉvรฉnements.Ci-dessous, nous considรฉrerons quelques propriรฉtรฉs importantes de ces composants de modรจle. TendanceLa bibliothรจque Prophet implรฉmente deux modรจles de tendance possibles pour $g(t)$.Le premier est appelรฉ *Croissance saturรฉe non linรฉaire*. Il est reprรฉsentรฉ sous la forme du [modรจle de croissance logistique](https://en.wikipedia.org/wiki/Fonction_logistique):$$g(t) = \frac{C}{1+e^{-k(t - m)}},$$oรน:* $C$ est la capacitรฉ de charge (c'est-ร -dire la valeur maximale de la courbe).* $k$ est le taux de croissance (qui reprรฉsente "la pente" de la courbe).* $m$ est un paramรจtre de dรฉcalage.Cette รฉquation logistique permet de modรฉliser la croissance non linรฉaire avec saturation, c'est-ร -dire lorsque le taux de croissance d'une valeur diminue avec sa croissance. Un des exemples typiques serait de reprรฉsenter la croissance de l'audience d'une application ou d'un site Web.En fait, $C$ et $k$ ne sont pas nรฉcessairement des constantes et peuvent varier dans le temps. Prophet prend en charge le rรฉglage automatique et manuel de leur variabilitรฉ. La bibliothรจque peut elle-mรชme choisir des points optimaux de changements de tendance en ajustant les donnรฉes historiques fournies.En outre, Prophet permet aux analystes de dรฉfinir manuellement des points de changement du taux de croissance et des valeurs de capacitรฉ ร diffรฉrents moments. Par exemple, les analystes peuvent avoir des informations sur les dates des versions prรฉcรฉdentes qui ont influencรฉ de maniรจre importante certains indicateurs clรฉs de produit.Le deuxiรจme modรจle de tendance est un simple *modรจle linรฉaire par morceaux* (Piecewise Linear Model) avec un taux de croissance constant. Il est le mieux adaptรฉ aux problรจmes sans saturation de la croissance. SaisonnalitรฉLa composante saisonniรจre $s(t)$ fournit un modรจle flexible de changements pรฉriodiques dus ร la saisonnalitรฉ hebdomadaire et annuelle.Les donnรฉes saisonniรจres hebdomadaires sont modรฉlisรฉes avec des variables factices. Six nouvelles variables sont ajoutรฉes: ยซlundiยป, ยซmardiยป, ยซmercrediยป, ยซjeudiยป, ยซvendrediยป, ยซsamediยป, qui prennent des valeurs 0 ou 1 selon le jour de la semaine. La caractรฉristique ยซdimancheยป n'est pas ajoutรฉe car ce serait une combinaison linรฉaire des autres jours de la semaine, et ce fait aurait un effet nรฉgatif sur le modรจle.Le modรจle de saisonnalitรฉ annuelle dans Prophet repose sur la sรฉrie de Fourier.Depuis la version 0.2, vous pouvez รฉgalement utiliser des sรฉries chronologiques infra-journaliรจres et faire des prรฉvisions infra-journaliรจres, ainsi qu'utiliser la nouvelle caractรฉristique de saisonnalitรฉ quotidienne. Vacances et รฉvรฉnementsLa composante $h(t)$ reprรฉsente les jours anormaux prรฉvisibles de l'annรฉe, y compris ceux dont les horaires sont irrรฉguliers, par exemple les Black Fridays.Pour utiliser cette caractรฉristique, l'analyste doit fournir une liste personnalisรฉe d'รฉvรฉnements. ErreurLe terme d'erreur $\epsilon(t)$ reprรฉsente des informations qui n'รฉtaient pas reflรฉtรฉes dans le modรจle. Habituellement, il est modรฉlisรฉ comme un bruit normalement distribuรฉ. Analyse comparative (benchmark) de ProphetPour une description dรฉtaillรฉe du modรจle et des algorithmes derriรจre Prophet, reportez-vous ร l'article ["Forecasting at scale"](https://peerj.com/preprints/3190/) de Sean J. Taylor et Benjamin Letham.Les auteurs ont รฉgalement comparรฉ leur bibliothรจque avec plusieurs autres mรฉthodes de prรฉvision de sรฉries chronologiques. Ils ont utilisรฉ l'[Erreur absolue moyenne en pourcentage (MAPE)](https://en.wikipedia.org/wiki/Mean _absolue_ pourcentage_erreur) comme mesure de la prรฉcision de la prรฉdiction. Dans cette analyse, Prophet a montrรฉ une erreur de prรฉvision considรฉrablement plus faible que les autres modรจles. Regardons de plus prรจs comment la qualitรฉ de la prรฉvision a รฉtรฉ mesurรฉe dans l'article. Pour ce faire, nous aurons besoin de la formule d'erreur moyenne absolue en pourcentage.Soit $y_{i}$ la *valeur rรฉelle (historique)* et $\hat{y}_{i}$ la *valeur prรฉvue* donnรฉe par notre modรจle.$e_{i} = y_{i} - \hat{y}_{i}$ est alors *l'erreur de prรฉvision* et $p_{i} =\frac{\displaystyle e_{i}}{\displaystyle y_{i}}$ est *l'erreur de prรฉvision relative*.Nous dรฉfinissons$$MAPE = mean\big(\left |p_{i} \right |\big)$$MAPE est largement utilisรฉ comme mesure de la prรฉcision des prรฉdictions car il exprime l'erreur en pourcentage et peut donc รชtre utilisรฉ dans les รฉvaluations de modรจles sur diffรฉrents ensembles de donnรฉes.De plus, lors de l'รฉvaluation d'un algorithme de prรฉvision, il peut s'avรฉrer utile de calculer [MAE (Mean Absolute Error)](https://en.wikipedia.org/wiki/Mean _error_ absolue) afin d'avoir une image des erreurs en nombres absolus. En utilisant des composants prรฉcรฉdemment dรฉfinis, son รฉquation sera$$MAE = mean\big(\left |e_{i}\right |\big)$$ Quelques mots sur les algorithmes avec lesquels Prophet a รฉtรฉ comparรฉ. La plupart d'entre eux sont assez simples et sont souvent utilisรฉs comme rรฉfรฉrence pour d'autres modรจles:* `naive` est une approche de prรฉvision simpliste dans laquelle nous prรฉdisons toutes les valeurs futures en nous appuyant uniquement sur l'observation au dernier moment disponible.* `snaive` (saisonnier naรฏf) est un modรจle qui fait des prรฉdictions constantes en tenant compte des informations sur la saisonnalitรฉ. Par exemple, dans le cas de donnรฉes saisonniรจres hebdomadaires pour chaque futur lundi, nous prรฉdirions la valeur du dernier lundi et pour tous les futurs mardis, nous utiliserions la valeur du dernier mardi, etc.* `mean` utilise la valeur moyenne des donnรฉes comme prรฉvision.* `arima` signifie *Autoregressive Integrated Moving Average*, voir [Wikipedia](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average) pour plus de dรฉtails.* `ets` signifie *Lissage exponentiel*, voir [Wikipedia](https://en.wikipedia.org/wiki/Exponential_smoothing) pour plus d'informations. 3. Entraรฎnez-vous avec Facebook Prophet 3.1 Installation en PythonTout d'abord, vous devez installer la bibliothรจque. Prophet est disponible pour Python et R. Le choix dรฉpendra de vos prรฉfรฉrences personnelles et des exigences du projet. Plus loin dans cet article, nous utiliserons Python.En Python, vous pouvez installer Prophet ร l'aide de PyPI:```$ pip install fbprophet```Dans R, vous trouverez le package CRAN correspondant. Reportez-vous ร la [documentation](https://facebookincubator.github.io/prophet/docs/installation.html) pour plus de dรฉtails.Importons les modules dont nous aurons besoin et initialisons notre environnement:
###Code
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy import stats
%matplotlib inline
###Output
_____no_output_____
###Markdown
3.2 Jeu de donnรฉesNous prรฉdirons le nombre quotidien de publications publiรฉes sur [Medium](https://medium.com/).Tout d'abord, nous chargeons notre jeu de donnรฉes.
###Code
df = pd.read_csv("../../data/medium_posts.csv.zip", sep="\t")
###Output
_____no_output_____
###Markdown
Ensuite, nous omettons toutes les colonnes ร l'exception de `published` et `url`. Le premier correspond ร la dimension temporelle tandis que le second identifie de maniรจre unique un message par son URL. Par la suite, nous nous dรฉbarrassons des doublons possibles et des valeurs manquantes dans les donnรฉes:
###Code
df = df[["published", "url"]].dropna().drop_duplicates()
###Output
_____no_output_____
###Markdown
Ensuite, nous devons convertir `published` au format datetime car par dรฉfaut `pandas` traite ce champ comme une chaรฎne.
###Code
df["published"] = pd.to_datetime(df["published"])
###Output
_____no_output_____
###Markdown
Trions la trame de donnรฉes par date et jetons un ลil ร ce que nous avons:
###Code
df.sort_values(by=["published"]).head(n=3)
###Output
_____no_output_____
###Markdown
La date de sortie publique de Medium รฉtait le 15 aoรปt 2012. Mais, comme vous pouvez le voir sur les donnรฉes ci-dessus, il existe au moins plusieurs lignes avec des dates de publication beaucoup plus anciennes. Ils sont apparus d'une maniรจre ou d'une autre dans notre ensemble de donnรฉes, mais ils ne sont guรจre lรฉgitimes. Nous allons simplement couper notre sรฉrie chronologique pour ne conserver que les lignes qui tombent sur la pรฉriode du 15 aoรปt 2012 au 25 juin 2017:
###Code
df = df[
(df["published"] > "2012-08-15") & (df["published"] < "2017-06-26")
].sort_values(by=["published"])
df.head(n=3)
df.tail(n=3)
###Output
_____no_output_____
###Markdown
Comme nous allons prรฉdire le nombre de publications, nous allons agrรฉger et compter les publications uniques ร chaque moment donnรฉ. Nous nommerons la nouvelle colonne correspondante `posts`:
###Code
aggr_df = df.groupby("published")[["url"]].count()
aggr_df.columns = ["posts"]
###Output
_____no_output_____
###Markdown
Dans cette pratique, nous sommes intรฉressรฉs par le nombre de messages **par jour**. Mais en ce moment, toutes nos donnรฉes sont divisรฉes en intervalles de temps irrรฉguliers qui sont infรฉrieurs ร une journรฉe. C'est ce qu'on appelle une sรฉrie chronologique infra-journaliรจre (*sub-daily time series*). Pour le voir, affichons les 3 premiรจres lignes:
###Code
aggr_df.head(n=3)
###Output
_____no_output_____
###Markdown
Pour rรฉsoudre ce problรจme, nous devons agrรฉger le nombre de messages par "bins" d'une taille de date. Dans l'analyse des sรฉries chronologiques, ce processus est appelรฉ *rรฉรฉchantillonnage* (*resampling*). Et si l'on *rรฉduit* le taux d'รฉchantillonnage des donnรฉes, il est souvent appelรฉ *sous-รฉchantillonnage* (*downsampling*).Heureusement, `pandas` a une fonctionnalitรฉ intรฉgrรฉe pour cette tรขche. Nous allons rรฉรฉchantillonner notre indice de date jusqu'ร des "bins" d'un jour:
###Code
daily_df = aggr_df.resample("D").apply(sum)
daily_df.head(n=3)
###Output
_____no_output_____
###Markdown
3.3 Analyse visuelle exploratoireComme toujours, il peut รชtre utile et instructif de regarder une reprรฉsentation graphique de vos donnรฉes.Nous allons crรฉer un tracรฉ de sรฉrie chronologique pour toute la plage de temps. L'affichage de donnรฉes sur une pรฉriode aussi longue peut donner des indices sur la saisonnalitรฉ et les รฉcarts anormaux visibles.Tout d'abord, nous importons et initialisons la bibliothรจque `Plotly`, qui permet de crรฉer de superbes graphes interactifs:
###Code
from plotly import graph_objs as go
from plotly.offline import init_notebook_mode, iplot
# Initialize plotly
init_notebook_mode(connected=True)
###Output
_____no_output_____
###Markdown
Nous dรฉfinissons รฉgalement une fonction d'aide, qui tracera nos trames de donnรฉes tout au long de l'article:
###Code
def plotly_df(df, title=""):
"""Visualize all the dataframe columns as line plots."""
common_kw = dict(x=df.index, mode="lines")
data = [go.Scatter(y=df[c], name=c, **common_kw) for c in df.columns]
layout = dict(title=title)
fig = dict(data=data, layout=layout)
iplot(fig, show_link=False)
###Output
_____no_output_____
###Markdown
Essayons de tracer notre jeu de donnรฉes *tel quel*:
###Code
plotly_df(daily_df, title="Posts on Medium (daily)")
###Output
_____no_output_____
###Markdown
Les donnรฉes ร haute frรฉquence peuvent รชtre assez difficiles ร analyser. Mรชme avec la possibilitรฉ de zoomer fournie par `Plotly`, il est difficile d'infรฉrer quoi que ce soit de significatif ร partir de ce graphique, ร l'exception de la tendance ร la hausse et ร l'accรฉlรฉration.Pour rรฉduire le bruit, nous allons rรฉรฉchantillonner le compte ร rebours des postes jusqu'ร la semaine. Outre le *binning*, d'autres techniques possibles de rรฉduction du bruit incluent [Moving-Average Smoothing](https://en.wikipedia.org/wiki/Moving_average) et [Exponential Smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing), entre autres.Nous sauvegardons notre dataframe sous-รฉchantillonnรฉ dans une variable distincte, car dans cette pratique, nous ne travaillerons qu'avec des sรฉries journaliรจres:
###Code
weekly_df = daily_df.resample("W").apply(sum)
###Output
_____no_output_____
###Markdown
Enfin, nous traรงons le rรฉsultat:
###Code
plotly_df(weekly_df, title="Posts on Medium (weekly)")
###Output
_____no_output_____
###Markdown
Ce graphique sous-รฉchantillonnรฉ s'avรจre un peu meilleur pour la perception d'un analyste.L'une des fonctions les plus utiles fournies par `Plotly` est la possibilitรฉ de plonger rapidement dans diffรฉrentes pรฉriodes de la chronologie afin de mieux comprendre les donnรฉes et trouver des indices visuels sur les tendances possibles, les effets pรฉriodiques et irrรฉguliers.Par exemple, un zoom avant sur quelques annรฉes consรฉcutives nous montre des points temporels correspondant aux vacances de Noรซl, qui influencent grandement les comportements humains.Maintenant, nous allons omettre les premiรจres annรฉes d'observations, jusqu'en 2015. Premiรจrement, elles ne contribueront pas beaucoup ร la qualitรฉ des prรฉvisions en 2017. Deuxiรจmement, ces premiรจres annรฉes, ayant un nombre trรจs faible de messages par jour, sont susceptible d'augmenter le bruit dans nos prรฉvisions, car le modรจle serait obligรฉ d'ajuster ces donnรฉes historiques anormales avec des donnรฉes plus pertinentes et indicatives des derniรจres annรฉes.
###Code
daily_df = daily_df.loc[daily_df.index >= "2015-01-01"]
daily_df.head(n=3)
###Output
_____no_output_____
###Markdown
Pour rรฉsumer, ร partir de l'analyse visuelle, nous pouvons voir que notre ensemble de donnรฉes n'est pas stationnaire avec une tendance croissante importante. Il montre รฉgalement une saisonnalitรฉ hebdomadaire et annuelle et un certain nombre de jours anormaux chaque annรฉe. 3.4 Faire une prรฉvisionL'API de Prophet est trรจs similaire ร celle que vous pouvez trouver dans `sklearn`. Nous crรฉons d'abord un modรจle, puis appelons la mรฉthode `fit` et, enfin, faisons une prรฉvision. L'entrรฉe de la mรฉthode `fit` est un` DataFrame` avec deux colonnes:* `ds` (datestamp ou horodatage) doit รชtre de type` date` ou `datetime`.* `y` est une valeur numรฉrique que nous voulons prรฉdire.Pour commencer, nous allons importer la bibliothรจque et รฉliminer les messages de diagnostic sans importance:
###Code
import logging
from fbprophet import Prophet
logging.getLogger().setLevel(logging.ERROR)
###Output
_____no_output_____
###Markdown
Convertissons notre dataframe de donnรฉes au format requis par Prophet:
###Code
df = daily_df.reset_index()
df.columns = ["ds", "y"]
df.tail(n=3)
###Output
_____no_output_____
###Markdown
Les auteurs de la bibliothรจque conseillent gรฉnรฉralement de faire des prรฉdictions basรฉes sur au moins plusieurs mois, idรฉalement, plus d'un an de donnรฉes historiques. Heureusement, dans notre cas, nous avons plus de quelques annรฉes de donnรฉes pour s'adapter au modรจle.Pour mesurer la qualitรฉ de nos prรฉvisions, nous devons diviser notre ensemble de donnรฉes en une *partie historique*, qui est la premiรจre et la plus grande tranche de nos donnรฉes, et une *partie prรฉdiction*, qui sera situรฉe ร la fin de la chronologie. Nous allons supprimer le dernier mois de l'ensemble de donnรฉes afin de l'utiliser plus tard comme cible de prรฉdiction:
###Code
prediction_size = 30
train_df = df[:-prediction_size]
train_df.tail(n=3)
###Output
_____no_output_____
###Markdown
Maintenant, nous devons crรฉer un nouvel objet `Prophet`. Ici, nous pouvons passer les paramรจtres du modรจle dans le constructeur. Mais dans cet article, nous utiliserons les valeurs par dรฉfaut. Ensuite, nous formons notre modรจle en invoquant sa mรฉthode `fit` sur notre jeu de donnรฉes de formation:
###Code
m = Prophet()
m.fit(train_df);
###Output
_____no_output_____
###Markdown
En utilisant la mรฉthode `Prophet.make_future_dataframe`, nous crรฉons un dataframe qui contiendra toutes les dates de l'historique et s'รฉtendra รฉgalement dans le futur pour les 30 jours que nous avons omis auparavant.
###Code
future = m.make_future_dataframe(periods=prediction_size)
future.tail(n=3)
###Output
_____no_output_____
###Markdown
Nous prรฉdisons les valeurs avec `Prophet` en passant les dates pour lesquelles nous voulons crรฉer une prรฉvision. Si nous fournissons รฉgalement les dates historiques (comme dans notre cas), en plus de la prรฉdiction, nous obtiendrons un ajustement dans l'รฉchantillon pour l'historique. Appelons la mรฉthode `predict` du modรจle avec notre dataframe `future` en entrรฉe:
###Code
forecast = m.predict(future)
forecast.tail(n=3)
###Output
_____no_output_____
###Markdown
Dans le dataframe rรฉsultant, vous pouvez voir de nombreuses colonnes caractรฉrisant la prรฉdiction, y compris les composants de tendance et de saisonnalitรฉ ainsi que leurs intervalles de confiance. La prรฉvision elle-mรชme est stockรฉe dans la colonne `yhat`.La bibliothรจque Prophet possรจde ses propres outils de visualisation intรฉgrรฉs qui nous permettent d'รฉvaluer rapidement le rรฉsultat.Tout d'abord, il existe une mรฉthode appelรฉe `Prophet.plot` qui trace tous les points de la prรฉvision:
###Code
m.plot(forecast);
###Output
_____no_output_____
###Markdown
Ce graphique n'a pas l'air trรจs informatif. La seule conclusion dรฉfinitive que nous pouvons tirer ici est que le modรจle a traitรฉ de nombreux points de donnรฉes comme des valeurs aberrantes.La deuxiรจme fonction `Prophet.plot_components` pourrait รชtre beaucoup plus utile dans notre cas. Il nous permet d'observer diffรฉrentes composantes du modรจle sรฉparรฉment: tendance, saisonnalitรฉ annuelle et hebdomadaire. De plus, si vous fournissez des informations sur les vacances et les รฉvรฉnements ร votre modรจle, elles seront รฉgalement affichรฉes dans ce graphique.Essayons-le:
###Code
m.plot_components(forecast);
###Output
_____no_output_____
###Markdown
Comme vous pouvez le voir sur le graphique des tendances, Prophet a fait du bon travail en adaptant la croissance accรฉlรฉrรฉe des nouveaux messages ร la fin de 2016. Le graphique de la saisonnalitรฉ hebdomadaire conduit ร la conclusion qu'il y a gรฉnรฉralement moins de nouveaux messages le samedi et le dimanche que le les autres jours de la semaine. Dans le graphique de saisonnalitรฉ annuelle, il y a une baisse importante le jour de Noรซl. 3.5 รvaluation de la qualitรฉ des prรฉvisions รvaluons la qualitรฉ de l'algorithme en calculant les mesures d'erreur pour les 30 derniers jours que nous avons prรฉdits. Pour cela, nous aurons besoin des observations $y_i$ et des valeurs prรฉdites correspondantes $\hat{y}_i$.Examinons l'objet `forecast` que la bibliothรจque a crรฉรฉ pour nous:
###Code
print(", ".join(forecast.columns))
###Output
_____no_output_____
###Markdown
Nous pouvons voir que cette base de donnรฉes contient toutes les informations dont nous avons besoin, ร l'exception des valeurs historiques. Nous devons joindre l'objet `forecast` avec les valeurs rรฉelles `y` de l'ensemble de donnรฉes d'origine `df`. Pour cela nous allons dรฉfinir une helper fonction que nous rรฉutiliserons plus tard:
###Code
def make_comparison_dataframe(historical, forecast):
"""Join the history with the forecast.
The resulting dataset will contain columns 'yhat', 'yhat_lower', 'yhat_upper' and 'y'.
"""
return forecast.set_index("ds")[["yhat", "yhat_lower", "yhat_upper"]].join(
historical.set_index("ds")
)
###Output
_____no_output_____
###Markdown
Appliquons cette fonction ร notre derniรจre prรฉvision:
###Code
cmp_df = make_comparison_dataframe(df, forecast)
cmp_df.tail(n=3)
###Output
_____no_output_____
###Markdown
Nous allons รฉgalement dรฉfinir une helper fonction que nous utiliserons pour รฉvaluer la qualitรฉ de nos prรฉvisions avec les mesures d'erreur MAPE et MAE:
###Code
def calculate_forecast_errors(df, prediction_size):
"""Calculate MAPE and MAE of the forecast.
Args:
df: joined dataset with 'y' and 'yhat' columns.
prediction_size: number of days at the end to predict.
"""
# Make a copy
df = df.copy()
# Now we calculate the values of e_i and p_i according to the formulas given in the article above.
df["e"] = df["y"] - df["yhat"]
df["p"] = 100 * df["e"] / df["y"]
# Recall that we held out the values of the last `prediction_size` days
# in order to predict them and measure the quality of the model.
# Now cut out the part of the data which we made our prediction for.
predicted_part = df[-prediction_size:]
# Define the function that averages absolute error values over the predicted part.
error_mean = lambda error_name: np.mean(np.abs(predicted_part[error_name]))
# Now we can calculate MAPE and MAE and return the resulting dictionary of errors.
return {"MAPE": error_mean("p"), "MAE": error_mean("e")}
###Output
_____no_output_____
###Markdown
Utilisons notre fonction:
###Code
for err_name, err_value in calculate_forecast_errors(cmp_df, prediction_size).items():
print(err_name, err_value)
###Output
_____no_output_____
###Markdown
En consรฉquence, l'erreur relative de notre prรฉvision (MAPE) est d'environ 22,72%, et en moyenne notre modรจle est erronรฉ de 70,45 posts (MAE). 3.6 VisualisationCrรฉons notre propre visualisation du modรจle construit par Prophet. Il comprendra les valeurs rรฉelles, les prรฉvisions et les intervalles de confiance.Premiรจrement, nous allons tracer les donnรฉes sur une pรฉriode de temps plus courte pour rendre les points de donnรฉes plus faciles ร distinguer. Deuxiรจmement, nous ne montrerons les performances du modรจle que pour la pรฉriode que nous avons prรฉvue, c'est-ร -dire les 30 derniers jours. Il semble que ces deux mesures devraient nous donner un graphique plus lisible.Troisiรจmement, nous utiliserons `Plotly` pour rendre notre graphique interactif, ce qui est idรฉal pour l'exploration.Nous dรฉfinirons notre propre helper fonction `show_forecast` et l'appellerons (pour en savoir plus sur son fonctionnement, veuillez vous rรฉfรฉrer aux commentaires dans le code et la [documentation](https://plot.ly/python/)):
###Code
def show_forecast(cmp_df, num_predictions, num_values, title):
"""Visualize the forecast."""
def create_go(name, column, num, **kwargs):
points = cmp_df.tail(num)
args = dict(name=name, x=points.index, y=points[column], mode="lines")
args.update(kwargs)
return go.Scatter(**args)
lower_bound = create_go(
"Lower Bound",
"yhat_lower",
num_predictions,
line=dict(width=0),
marker=dict(color="444"),
)
upper_bound = create_go(
"Upper Bound",
"yhat_upper",
num_predictions,
line=dict(width=0),
marker=dict(color="444"),
fillcolor="rgba(68, 68, 68, 0.3)",
fill="tonexty",
)
forecast = create_go(
"Forecast", "yhat", num_predictions, line=dict(color="rgb(31, 119, 180)")
)
actual = create_go("Actual", "y", num_values, marker=dict(color="red"))
# In this case the order of the series is important because of the filling
data = [lower_bound, upper_bound, forecast, actual]
layout = go.Layout(yaxis=dict(title="Posts"), title=title, showlegend=False)
fig = go.Figure(data=data, layout=layout)
iplot(fig, show_link=False)
show_forecast(cmp_df, prediction_size, 100, "New posts on Medium")
###Output
_____no_output_____
###Markdown
ร premiรจre vue, la prรฉdiction des valeurs moyennes par notre modรจle semble raisonnable. La valeur รฉlevรฉe de MAPE que nous avons obtenue ci-dessus peut s'expliquer par le fait que le modรจle n'a pas rรฉussi ร saisir l'amplitude croissante de pic-ร -pic (peak-to-peak) d'une faible saisonnalitรฉ. En outre, nous pouvons conclure du graphique ci-dessus que de nombreuses valeurs rรฉelles se trouvent en dehors de l'intervalle de confiance. Prophet peut ne pas convenir aux sรฉries chronologiques avec une variance instable, du moins lorsque les paramรจtres par dรฉfaut sont utilisรฉs. Nous allons essayer de rรฉsoudre ce problรจme en appliquant une transformation ร nos donnรฉes. 4. Transformation Box-Cox Jusqu'ร prรฉsent, nous avons utilisรฉ Prophet avec les paramรจtres par dรฉfaut et les donnรฉes d'origine. Nous laisserons les paramรจtres du modรจle seuls. Mais malgrรฉ cela, nous avons encore des progrรจs ร faire. Dans cette section, nous appliquerons la [BoxโCox transformation](http://onlinestatbook.com/2/transformations/box-cox.html) ร notre sรฉrie originale. Voyons oรน cela nous mรจnera.Quelques mots sur cette transformation. Il s'agit d'une transformation de donnรฉes monotone qui peut รชtre utilisรฉe pour stabiliser la variance. Nous utiliserons la transformation Box-Cox ร un paramรจtre, qui est dรฉfinie par l'expression suivante:$$\begin{equation} boxcox^{(\lambda)}(y_{i}) = \begin{cases} \frac{\displaystyle y_{i}^{\lambda} - 1}{\displaystyle \lambda} &, \text{if $\lambda \neq 0$}.\\ ln(y_{i}) &, \text{if $\lambda = 0$}. \end{cases}\end{equation}$$Nous devrons implรฉmenter l'inverse de cette fonction afin de pouvoir restaurer l'รฉchelle de donnรฉes d'origine. Il est facile de voir que l'inverse est dรฉfini comme:$$\begin{equation} invboxcox^{(\lambda)}(y_{i}) = \begin{cases} e^{\left (\frac{\displaystyle ln(\lambda y_{i} + 1)}{\displaystyle \lambda} \right )} &, \text{if $\lambda \neq 0$}.\\ e^{y_{i}} &, \text{if $\lambda = 0$}. \end{cases}\end{equation}$$La fonction correspondante en Python est implรฉmentรฉe comme suit:
###Code
def inverse_boxcox(y, lambda_):
return np.exp(y) if lambda_ == 0 else np.exp(np.log(lambda_ * y + 1) / lambda_)
###Output
_____no_output_____
###Markdown
Tout d'abord, nous prรฉparons notre jeu de donnรฉes en dรฉfinissant son index:
###Code
train_df2 = train_df.copy().set_index("ds")
###Output
_____no_output_____
###Markdown
Ensuite, nous appliquons la fonction `stats.boxcox` de` Scipy`, qui applique la transformation Box โ Cox. Dans notre cas, il renverra deux valeurs. La premiรจre est la sรฉrie transformรฉe et la seconde est la valeur trouvรฉe de $\lambda$ qui est optimale en termes de maximum de log-vraisemblance (maximum log-likelihood):
###Code
train_df2["y"], lambda_prophet = stats.boxcox(train_df2["y"])
train_df2.reset_index(inplace=True)
###Output
_____no_output_____
###Markdown
Nous crรฉons un nouveau modรจle `Prophet` et rรฉpรฉtons le cycle d'ajustement de prรฉvision que nous avons dรฉjร fait ci-dessus:
###Code
m2 = Prophet()
m2.fit(train_df2)
future2 = m2.make_future_dataframe(periods=prediction_size)
forecast2 = m2.predict(future2)
###Output
_____no_output_____
###Markdown
ร ce stade, nous devons inverser la transformation de Box โ Cox avec notre fonction inverse et la valeur connue de $\lambda$:
###Code
for column in ["yhat", "yhat_lower", "yhat_upper"]:
forecast2[column] = inverse_boxcox(forecast2[column], lambda_prophet)
###Output
_____no_output_____
###Markdown
Ici, nous allons rรฉutiliser nos outils pour faire le dataframe de comparaison et calculer les erreurs:
###Code
cmp_df2 = make_comparison_dataframe(df, forecast2)
for err_name, err_value in calculate_forecast_errors(cmp_df2, prediction_size).items():
print(err_name, err_value)
###Output
_____no_output_____
###Markdown
On peut donc affirmer avec certitude que la qualitรฉ du modรจle s'est amรฉliorรฉe. Enfin, tracons nos performances prรฉcรฉdentes avec les derniers rรฉsultats cรดte ร cรดte. Notez que nous utilisons `prediction_size` pour le troisiรจme paramรจtre afin de zoomer sur l'intervalle prรฉvu:
###Code
show_forecast(cmp_df, prediction_size, 100, "No transformations")
show_forecast(cmp_df2, prediction_size, 100, "BoxโCox transformation")
###Output
_____no_output_____
|
15. attention/Attention_Basics.ipynb
|
###Markdown
Attention BasicsIn this notebook, we look at how attention is implemented. We will focus on implementing attention in isolation from a larger model. That's because when implementing attention in a real-world model, a lot of the focus goes into piping the data and juggling the various vectors rather than the concepts of attention themselves.We will implement attention scoring as well as calculating an attention context vector. Attention Scoring Inputs to the scoring functionLet's start by looking at the inputs we'll give to the scoring function. We will assume we're in the first step in the decoding phase. The first input to the scoring function is the hidden state of decoder (assuming a toy RNN with three hidden nodes -- not usable in real life, but easier to illustrate):
###Code
dec_hidden_state = [5,1,20]
###Output
_____no_output_____
###Markdown
Let's visualize this vector:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Let's visualize our decoder hidden state
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(dec_hidden_state)), annot=True, cmap=sns.light_palette("purple", as_cmap=True), linewidths=1)
###Output
_____no_output_____
###Markdown
Our first scoring function will score a single annotation (encoder hidden state), which looks like this:
###Code
annotation = [3,12,45] #e.g. Encoder hidden state
# Let's visualize the single annotation
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(annotation)), annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
###Output
_____no_output_____
###Markdown
IMPLEMENT: Scoring a Single AnnotationLet's calculate the dot product of a single annotation. NumPy's [dot()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) is a good candidate for this operation
###Code
def single_dot_attention_score(dec_hidden_state, enc_hidden_state):
# TODO: return the dot product of the two vectors
return np.dot(dec_hidden_state, enc_hidden_state)
single_dot_attention_score(dec_hidden_state, annotation)
###Output
_____no_output_____
###Markdown
Annotations MatrixLet's now look at scoring all the annotations at once. To do that, here's our annotation matrix:
###Code
annotations = np.transpose([[3,12,45], [59,2,5], [1,43,5], [4,3,45.3]])
###Output
_____no_output_____
###Markdown
And it can be visualized like this (each column is a hidden state of an encoder time step):
###Code
# Let's visualize our annotation (each column is an annotation)
ax = sns.heatmap(annotations, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
###Output
_____no_output_____
###Markdown
IMPLEMENT: Scoring All Annotations at OnceLet's calculate the scores of all the annotations in one step using matrix multiplication. Let's continue to us the dot scoring methodTo do that, we'll have to transpose `dec_hidden_state` and [matrix multiply](https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html) it with `annotations`.
###Code
def dot_attention_score(dec_hidden_state, annotations):
# TODO: return the product of dec_hidden_state transpose and enc_hidden_states
return np.dot(np.transpose(dec_hidden_state), annotations)
attention_weights_raw = dot_attention_score(dec_hidden_state, annotations)
attention_weights_raw
###Output
_____no_output_____
###Markdown
Looking at these scores, can you guess which of the four vectors will get the most attention from the decoder at this time step? SoftmaxNow that we have our scores, let's apply softmax:
###Code
def softmax(x):
print(np.exp(x))
return np.exp(x)/np.sum(np.exp(x))
attention_weights = softmax(attention_weights_raw)
attention_weights
###Output
[ inf 2.59961668e+172 1.88618081e+064 inf]
###Markdown
Even when knowing which annotation will get the most focus, it's interesting to see how drastic softmax makes the end score become. The first and last annotation had the respective scores of 927 and 929. But after softmax, the attention they'll get is 0.12 and 0.88 respectively. Applying the scores back on the annotationsNow that we have our scores, let's multiply each annotation by its score to proceed closer to the attention context vector. This is the multiplication part of this formula (we'll tackle the summation part in the latter cells)
###Code
def apply_attention_scores(attention_weights, annotations):
# TODO: Multiple the annotations by their weights
return np.matmul(annotations, attention_weights)
applied_attention = apply_attention_scores(attention_weights, annotations)
applied_attention
###Output
_____no_output_____
###Markdown
Let's visualize how the context vector looks now that we've applied the attention scores back on it:
###Code
# Let's visualize our annotations after applying attention to them
ax = sns.heatmap(applied_attention, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
###Output
_____no_output_____
###Markdown
Contrast this with the raw annotations visualized earlier in the notebook, and we can see that the second and third annotations (columns) have been nearly wiped out. The first annotation maintains some of its value, and the fourth annotation is the most pronounced. Calculating the Attention Context VectorAll that remains to produce our attention context vector now is to sum up the four columns to produce a single attention context vector
###Code
def calculate_attention_vector(applied_attention):
return np.sum(applied_attention, axis=1)
attention_vector = calculate_attention_vector(applied_attention)
attention_vector
# Let's visualize the attention context vector
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(attention_vector)), annot=True, cmap=sns.light_palette("Blue", as_cmap=True), linewidths=1)
###Output
_____no_output_____
|
02_Analysis_ethane_methanol/answers/Exercise_02_solutions.ipynb
|
###Markdown
Analysing Hydration Free Energies from a SOMD SimulationThis notebook will guide you through how to run an alchemical free energy analysis. We are looking at how to compute the relative hydration free energy between ethane and methanol using the Sire tool `analyse_freenrg mbar`.The notebook forms part of the CCPBio-Sim workshop **Alchemical Free Energy Simulation Analysis with analyse_freenrg** run on the 11th of April 2018 at the University of Bristol.*Author: Antonia Mey Email: [email protected]***Reading time of the document: 30 mins** Let's start with the necessary imports
###Code
%pylab inline
import glob
import seaborn as sbn
sbn.set_style("ticks")
sbn.set_context("notebook", font_scale = 2)
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Quick reminder of what we are looking atThis notebook is designed to run an alchemical free energy analysis. We are looking at how to compute the relative hydration free energy between ethane and methanol. The idea stems from using the fact that you can write down a thermodynamic cycle for the hydration process such as this:The analysis will be done using the `analyse_freenrg mbar` tool in Sire, assuming that the required alchemical free energy calculations were run with somd-freenrg. It will be assumed that you have successfully run a simulation and data to be analysed can be found in the directory `data`. Understanding the data When we run a SOMD free energy calculation a lot of data is generated. Generally a good directory structure for keeping track of your data and allowing automation in the analysis is required. An example of how to structure your data is shown in the picture below and can be found in your current working directory of this notebook:The directories ranging from `lambda-0.000` to `lambda-1.0000` contain the main ouptut data. You would normall find the following set of files: ``` gradients.dat moves.dat sim_restart.s3 SYSTEM.s3gradients.s3 simfile.dat sim_restart.s3.previous traj000000001.dcd````.s3` files are sire related files. The file `traj000000001.dcd` contains the simulation trajectory. **1. Task -- Can you visualise the trajectory of the ฮป=0.0 simulation of the vaccuum simulation using nglview and the topology file provided?**
###Code
pwd
#trajectory file
traj_file = '../data/ethane~methanol/vacuum/run001/output/lambda-0.00/traj000000001.dcd'
#topology file
topology_file = '../data/ethane~methanol/vacuum/run001/output/lambda-0.00/SYSTEM.parm7'
##Insert code here to load your trajectory into nglview
import mdtraj as md
from nglview import NGLWidget
protein = md.load(traj_file, top=topology_file)
view = NGLWidget()
view.add_trajectory(protein)
view
###Output
_____no_output_____
###Markdown
The most important file needed for the data analysis however, is the `simfile.dat`. You can find in every ฮป directory. Let's have a look at what the file contains. We can easily access it using the `head` command:
###Code
!head -n13 ../data/ethane~methanol/free/run001/output/lambda-0.000/simfile.dat
###Output
#This file was generated on Thu Apr 5 15:36:13 2018
#Using the somd command, of the molecular library Sire version <2018.1.0>
#For more information visit: https://github.com/michellab/Sire
#
#General information on simulation parameters:
#Simulation used 10000 moves, 100 cycles and 2000 ps of simulation time
#Generating lambda is 0.00000
#Alchemical array is (0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
#Generating temperature is 25 C
#Energy was saved every 250 steps
#
#
# [step] [potential kcal/mol] [gradient kcal/mol] [forward Metropolis] [backward Metropolis] [u_kl]
###Markdown
The file contains information on:- when it was generated, - what version of Sire/SOMD was used and - what essential simulation parameters were used. In particular you find information on - how long the simulation was run, - what the saving intervals was - what ฮป value was used, - what the other alchemical values were, - and the temperature of the simualtion. These are all recorded in the comments, as indicated by the . The data in the file is then contained in the following columns, where headings are indicated. The step, the current potential energy in kcal/mol, the gradient, used for thermodynamic integration, in kcal/mol forward and backward Metropolis, which are not relevant for this tutorial, and reduced potential energies (unitless) of the energies evaluated at each alchemical state. Don't worry if this is a bit confusing, you don't really have to do anything with it, but it is necessary input for MBAR. Actual data analysis using pymbar and thermodynamic integrationIn the following we will look at how we can actually extract the free energy differences using `analyse_freenrg mbar` to compute the relative hydration free energy of ethane and methanol. We will look at both ways to compute this free energy difference using the available command line interface but also how to easily compute Using the commandline interface `analyse_freenrg mbar`You can open a terminal and compute a free energy from the simulations of the solvated molecule in water going through a pertubration and the free energy from the vaccuum simulation. You can either use the terminal along with the notebook or look at the cells below to execute a bash command. In order to get overview of all the functionality of the tool you can simply type `analyse_freenrg mbar --help`. Soulte in WaterFor this you can either use the terminal to type: ` analyse_freenrg mbar -i data/ethane~methanol/free/run001/output/lambda-*/simfile.dat -o free.dat --subsampling` or invoke the next cell. This uses pymbar and computes a single ฮG for the perturbation of ethane to methanol while solvated in water. The output file `free.dat` contains all useful data for further analysis and we will look at it in a bit.
###Code
!analyse_freenrg mbar -i ../data/ethane~methanol/free/run001/output/lambda-*/simfile.dat -o free.dat --subsampling
###Output
Starting analyse_freenrg: number of threads equals 8
Simulation data is analysed using the python module pymbar
----------------------------------------------------------
# Writing all output to file free.dat
#Lambda array was not given, trying to infer lambda values from simulation files...
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.000/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.100/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.200/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.300/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.400/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.500/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.600/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.700/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.800/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-0.900/simfile.dat
working on input file ../data/ethane~methanol/free/run001/output/lambda-1.000/simfile.dat
#Subsampling energies according to statistical inefficiency for pymbar
#Subsampling gradients according to statistical inefficiency
#running mbar ====================================================
K (total states) = 11, total samples = 11330
N_k =
[1030 1030 1030 1030 1030 1030 1030 1030 1030 1030 1030]
There are 11 states with samples.
Initializing free energies to zero.
Initial dimensionless free energies with method zeros
f_k =
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
Final dimensionless free energies
f_k =
[ 0. -0.58552531 -0.71294229 -0.6791611 -0.69948915 -0.90844967
-1.37313811 -2.13558111 -3.22756857 -4.67884 -6.50042645]
MBAR initialization complete.
#running mbar done ===============================================
###Markdown
Solute in VacuumNow we have to do the same analysis for the vacuum siumulations. Again type on the command line:`analyse_freenrg mbar -i data/ethane~methanol/vacuum/run001/output/lambda-*/simfile.dat -o vacuum.dat --subsampling`.Note how we have now replaced the `free` directroy with `vacuum` and use a different ouptutfile `vacuum.dat`. Below again is a simple way of executing it in this notebook
###Code
!analyse_freenrg mbar -i ../data/ethane~methanol/vacuum/run001/output/lambda-*/simfile.dat -o vacuum.dat --subsampling
###Output
Starting analyse_freenrg: number of threads equals 8
Simulation data is analysed using the python module pymbar
----------------------------------------------------------
# Writing all output to file vacuum.dat
#Lambda array was not given, trying to infer lambda values from simulation files...
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.00/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.10/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.20/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.30/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.40/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.50/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.60/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.70/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.80/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-0.90/simfile.dat
working on input file ../data/ethane~methanol/vacuum/run001/output/lambda-1.00/simfile.dat
#Subsampling energies according to statistical inefficiency for pymbar
#Subsampling gradients according to statistical inefficiency
#running mbar ====================================================
K (total states) = 11, total samples = 58256
N_k =
[5296 5296 5296 5296 5296 5296 5296 5296 5296 5296 5296]
There are 11 states with samples.
Initializing free energies to zero.
Initial dimensionless free energies with method zeros
f_k =
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
Final dimensionless free energies
f_k =
[ 0. 0.17459461 0.56051859 1.008912 1.47343312 1.93875809
2.39987819 2.85595365 3.30813418 3.75842606 4.20938917]
MBAR initialization complete.
#running mbar done ===============================================
###Markdown
Looking at the mbar outputWe have now generated the files `free.dat` and `vacuum.dat`. At the bottom of the file we have information on the computed free energy difference of the simulation in solution and vacuum.
###Code
!tail free.dat
!tail vacuum.dat
###Output
0.5000 1.1749
0.6000 1.4457
0.7000 1.7090
0.8000 1.9682
0.9000 2.2234
1.0000 2.4770
#MBAR free energy difference in kcal/mol:
2.492748, 0.009131
#TI free energy difference in kcal/mol:
2.477022
###Markdown
Note how the free energy was computed for both thermodynamic integration and MBAR. One way of assuring that free energy estimates are not terrible is to compare these two different estiamtors to each other. They work very differently in which way they lead to the computation of the free energy and a close agreement is usually a good indication for having well converged simulations. Now we want to actually calculated ฮฮG of the relative hydration free energy for ethane and methanol. It is given by this formula: Let's extract the relevant information from the files we computed and find out the hydration free energy.
###Code
#helper function to extract DG from the freenrg_analysis generated files.
def get_nth_line(fname, line):
fh = open (fname, 'r')
lineList = fh.readlines()
fh.close()
return lineList[line].split(',')
vac_mbar = get_nth_line('vacuum.dat', -3)
vac_ti = get_nth_line('vacuum.dat', -1)
solv_mbar = get_nth_line('free.dat', -3)
solv_ti = get_nth_line('free.dat', -1)
#mbar
DG_vac_mbar = float(vac_mbar[0])
DG_solv_mbar = float(solv_mbar[0])
#ti
DG_vac_ti = float(vac_ti[0])
DG_solv_ti = float(solv_ti[0])
###Output
_____no_output_____
###Markdown
**2. Task: Compute ฮฮG according to the above formula using the data in the variable `DG_vac_mbar`, `DG_solv_mbar` for the free energy estimate using MBAR**
###Code
#insert code here
DDG_mbar = DG_solv_mbar- DG_vac_mbar
###Output
_____no_output_____
###Markdown
**3. Task: Compute ฮฮG according to the above formula using the data in the variable `DG_vac_ti`, `DG_solv_ti` for the free energy estimate using MBAR**
###Code
#insert code here
DDG_ti =DG_solv_ti-DG_vac_ti
###Output
_____no_output_____
###Markdown
**The experimental absolute free energy of hydration of ethane is +1.8 kcal/mol. The experimental absolute free energy of hydration of methanol is -5.1 kcal/mol How well do the exeperimental results compare to the computed result?** Errors and looking at more dataWe are running a simulation with limited data with the assumption that a time average over the data is a good represeantion of the average observable, i.e. the free energy in this case. Doing an error analysis is important. MBAR does have an inbuilt error erstimate and we can compute the error to go along the computed `DDG_mbar` variable by doing some error propagation : **4. Task: Compute the error on ฮฮG using the rules of error propagation.**
###Code
#retrieving the error
DG_vac_mbar_error = float(vac_mbar[1])
DG_solv_mbar_error = float(solv_mbar[1])
#Now insert the code needed to estiamte the error:
DDG_error = np.sqrt(DG_vac_mbar_error*DG_vac_mbar_error+DG_solv_mbar_error*DG_solv_mbar_error)
print ('The relative hydration free energy of ethane and methanol'
'is %.2f ยฑ %.2f kcal/mol' %(DDG_mbar,DDG_error))
###Output
The relative hydration free energy of ethane and methanolis -6.34 ยฑ 0.04 kcal/mol
###Markdown
In order to obtain reliable error estimats it is strongly recommended to have multiple independent runs of your simulations. These independent runs can then be used get more accurate error estimates. Comparing PMF of MBAR and TIThe free energy files produced by `analyse_freenrg mbar` contain additional information, such as the potential of mean force over the different ฮป windows. Again we can compare MBAR estimates easily with TI estimates this way. Furthermore we can also look at the average gradient used for TI. In the following we will explore how to plot these.
###Code
#Let's read the free.dat file
fh = open ('free.dat', 'r')
lines = fh.readlines()
fh.close()
#Now we need to find the PMF in the file
#Let's loop over the lines and extract the data
count = 0
num_lambdas = 11
pmf_mbar = []
pmf_ti = []
for line in lines:
if line.startswith('#PMF from MBAR'):
pmf_mbar = lines[(count+1):(count+1+num_lambdas)]
if line.startswith('#PMF from TI'):
pmf_ti = lines[(count+1):(count+1+num_lambdas)]
count = count +1
for i in range(len(pmf_mbar)):
temp = pmf_mbar[i].strip().split(' ')
float_temp = [float(i) for i in temp]
pmf_mbar[i] = float_temp
pmf_mbar =np.array(pmf_mbar)
for i in range(len(pmf_ti)):
temp = pmf_ti[i].strip().split(' ')
float_temp = [float(i) for i in temp]
pmf_ti[i] = float_temp
pmf_ti =np.array(pmf_ti)
###Output
_____no_output_____
###Markdown
Now we have two numpy array `pmf_ti` and `pmf_mbar`, wich we can use to plot the pmfs. The first column of the array is the lambda value. The second column of the array is free pmf and the third value for the mbar estimate is the error on the pmf. We can now plot these. Make sure you understand how to read and write a plot like this.
###Code
plot(pmf_ti[:,0], pmf_ti[:,1], marker = 'o', label='TI')
errorbar(pmf_mbar[:,0], pmf_mbar[:,1], yerr=pmf_mbar[:,2], marker = '^', label='mbar')
xlabel(r'$\lambda$')
ylabel('PMF in kcal/mol')
legend()
sbn.despine()
###Output
_____no_output_____
###Markdown
Plotting the average gradientBelow we will create a similar plot as before using the average gradient.
###Code
## extracting gradient data
count = 0
num_lambdas = 11
avg_gradient = []
for line in lines:
if line.startswith('#TI average gradients'):
avg_gradient = lines[(count+1):(count+1+num_lambdas)]
break
count = count +1
for i in range(len(avg_gradient)):
temp = avg_gradient[i].strip().split(' ')
float_temp = [float(i) for i in temp]
avg_gradient[i] = float_temp
avg_gradient =np.array(avg_gradient)
###Output
_____no_output_____
###Markdown
**5. Task: Plot `avg_gradient` using errorbar from the example above.**
###Code
#Insert code here to plot the average gradient
errorbar(avg_gradient[:,0], avg_gradient[:,1], avg_gradient[:,2])
xlabel(r'$\lambda$')
ylabel(r'$\langle\frac{\partial U}{\partial \lambda}\rangle$ [kcal/mol]')
###Output
_____no_output_____
###Markdown
Best practices:So how do we know if the simulation is actually reliable or not. There are some rules of thumb that can be used to assess this, i.e. follow some best practices. We follow general guidelines presented by [Kilmovich, Shirts and Mobley](https://dx.doi.org/10.1007%2Fs10822-015-9840-9)The paper suggests the following:"Conceptually, we break analysis into four main stages: - subsampling the data to retain uncorrelated samples - calculating free energy differences along with the corresponding statistical errors via a variety of TI-and FEP-based methods- producing textual and graphical outputs of the computed data inspecting for: 1. for convergence and identifying the equilibrated portion of the simulation 2. good phase space overlap for all pairs of adjacent lambda states" You will find that we have already addressed most of these best practice ideas. Indeed we used the `--subsampling` flag inorder to obtain uncorrelated samples from the data when running our analysis and we are comparing the free energy estimate using an integral based method (TI) and a reweighting method (MBAR). What we haven't done yet is look at phase space overlap and detecting equilibration. This will be handeled to some extent in the advanced tasks. In the following we will look at good phase space overlap of adjacent lambda states. The overlap matrixcan be used to look at the phase space overlap of neighbouring lambdas. By adding the flag `--overlap` this matrix will be automatically computed and added to the output file. So let's look at the overlap matrix for the simulation in solution and vacuum. This time we will write out a files called `free_overlap.dat` and `vacuum_overlap.dat`
###Code
%%capture run_info_vacuum
#Let's run the analysis again with the keyword --overlap
!analyse_freenrg mbar -i ../data/ethane~methanol/vacuum/run001/output/lambda-*/simfile.dat -o vacuum_overlap.dat --subsampling --overlap
%%capture run_info_solution
!analyse_freenrg mbar -i ../data/ethane~methanol/free/run001/output/lambda-*/simfile.dat -o free_overlap.dat --subsampling --overlap
#A helper function to read the overlap matrix from file
def get_overlp_matrix(filename, n_states=11):
fh = open (filename, 'r')
lines = fh.readlines()
fh.close()
count = 0
matrix = []
for line in lines:
if line.startswith('#Overlap'):
matrix = lines[(count+1):(count+1+n_states)]
break
count = count+1
for i in range(len(matrix)):
temp = matrix[i].strip().split(' ')
float_temp = [float(j) for j in temp]
matrix[i] = float_temp
matrix =np.array(matrix)
return matrix
overlap_vacuum = get_overlp_matrix('vacuum_overlap.dat')
overlap_solv = get_overlp_matrix('free_overlap.dat')
###Output
_____no_output_____
###Markdown
Plotting the overlap matricesThe plotting library has a nice advanced heat map feature that allows you to not only plot a pictorial image of a matrix or heatmap but also add the numercal values making it easier to read the plot
###Code
fig = figure(figsize=(12,12))
ax = sbn.heatmap(overlap_vacuum, annot=True, fmt='.2f', linewidths=.5, annot_kws={"size": 12})
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
ax.set_title('Overlap matrix in vacuum')
fig = figure(figsize=(12,12))
ax = sbn.heatmap(overlap_solv, annot=True, fmt='.2f', linewidths=.5, annot_kws={"size": 12})
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
ax.set_title('Overlap matrix in solution')
###Output
_____no_output_____
###Markdown
Example of a bad overlap matrixBelow we have the same simulation as before, but reducing the number of lambda windows from 11 to 6. What do you observe in terms of the overlap matrix?
###Code
## insert code in order to plot the overlap matrix for the reduced number of lambda windows.
overlap_bad = get_overlp_matrix('../data/ethane~methanol/6-lambda-overlap.dat', n_states=6)
fig = figure(figsize=(12,12))
ax = sbn.heatmap(overlap_bad, annot=True, fmt='.2f', linewidths=.5, annot_kws={"size": 12})
ax.set_xlabel(r'$\lambda$ index')
ax.set_ylabel(r'$\lambda$ index')
ax.set_title('Overlap matrix with 6 lambda')
###Output
_____no_output_____
###Markdown
Advanced tasksCompare your estimate using less data. `analyse_freenrg mbar` has the option `--discard`. This will run your analysis with a number of frames discarded from the start of the simulation. Advanced tasks:1. Rerun the analysis discarding 1000 frames from the start writing out a new free.dat and vacuum.dat file. 2. Compute the relative free energy of hydration from the new analysis. How does the value differe from your original estimate? Does is compare better or worse to the experimental value?3. Plot the PMF of the original analysis and the one with the initial 1000 frames discarded for the simulations in solution. What do you observe?4. Explore the various plots and overlaps for the vacuum simulation as well.
###Code
#Rerun with discarding 1000 frames form the vacuum simulation here:
#Rerun with discarding 1000 frames form the solution simulation here:
#Compute the hydration free energy from this new estimate:
#Plot the PMF of the MBAR analysis of the original analysis in comparison to the
#MBAR analysis where the first 1000 frames were discarded.
###Output
_____no_output_____
|
track-behavior.ipynb
|
###Markdown
Height
###Code
shift = 48
HCONST = 1454.9 # pixels
FRAME_WIDTH = 2704 - (2 * shift)
WINGSPAN = .8 # meters, max extent while flying
for date, day_tracks in all_tracks.items():
for camera, tracks in day_tracks.items():
for track in tracks:
height = bf.calculate_height(track['mean_wing'], HCONST, WINGSPAN)
track['height'] = height
constant = WINGSPAN * HCONST
print(constant)
wing_pixels = np.arange(10, 300)
plt.plot(constant / wing_pixels)
plt.xlabel('Wingspan (pixels)')
plt.ylabel('Estimated height (meters)')
title = "Wingspan in pixels versus estimated height meters"
if save:
bf.save_fig(plots_save_folder, title)
camera_names = {'16Nov':['NotChyniangale', 'Chyniangale',
'BBC', 'FibweParking', 'FibwePublic',
'MusoleTower', 'MusolePath', 'MusoleParking',
'Sunset', 'Puku'],
'17Nov': ['NotChyniangale', 'Chyniangale',
'BBC', 'FibweParking2', 'FibwePublic',
'MusoleTower', 'MusolePath2',
'Sunset', 'Puku'],
'18Nov': ['NotChyniangale', 'Chyniangale',
'BBC', 'FibweParking', 'FibwePublic',
'MusoleTower', 'MusoleParking',
'Sunset', 'Puku'],
'19Nov': ['NotChyniangale', 'Chyniangale',
'BBC', 'FibweParking', 'FibwePublic',
'MusoleTower', 'MusolePath', 'MusoleParking',
'Sunset', 'Puku'],
'20Nov': ['NotChyniangale', 'Chyniangale',
'BBC', 'FibweParking', 'FibwePublic',
'MusoleTower', 'MusoleParking',
'Sunset', 'Puku'],
}
lower_percentile=.05
upper_percentile=.95
total = 0
total_tracks = 0
for date, day_tracks in all_tracks.items():
# camera_names = []
heights = []
for t_ind, (camera, tracks) in enumerate(day_tracks.items()):
# camera_names.append(camera)
camera_heights = []
for track in tracks:
total_tracks += 1
camera_heights.append(track['height'])
camera_heights = bf.get_middle_percentiles(camera_heights,
lower_percentile,
upper_percentile)
heights.extend(camera_heights)
total += len(heights)
print(f"{date} number of tracks {len(heights)}, .9 {len(heights) * .9}")
print(f"total {total} out of {total_tracks} .9: {.9 * total_tracks}")
lower_percentile = .05
upper_percentile = .95
plt.rcParams.update({'font.size': 15})
save = False
max_height = 0
max_y = 160
for date, day_tracks in all_tracks.items():
# camera_names = []
heights = []
xs = []
colors = []
for t_ind, camera in enumerate(camera_names[date]):
tracks = day_tracks[camera]
# for t_ind, (camera, tracks) in enumerate(day_tracks.items()):
# camera_names.append(camera)
camera_heights = []
for track in tracks:
camera_heights.append(track['height'])
camera_heights = bf.get_middle_percentiles(camera_heights,
lower_percentile,
upper_percentile)
xs.extend([t_ind for _ in camera_heights])
heights.extend(camera_heights)
colors.append(camera_colors[camera])
fig, (ax1) = plt.subplots(1, 1)
if max(heights) > max_height:
max_height = max(heights)
palette = sns.color_palette(colors)
sns.violinplot(x=xs, y=heights, ax=ax1, palette=palette)
ax1.set_title(f"{date}")
ax1.set_xticklabels(camera_names[date], rotation=90)
ax1.set_ylabel("Height (meters)")
ax1.set_ylim(0, max_y)
title = (f"{date} height distributions "
+ f"lower percentile {lower_percentile} "
+ f"upper percentile {upper_percentile} "
+ f"max yaxis {max_y}")
if save:
bf.save_fig(plots_save_folder, title)
print(max_height)
np.max(heights)
colors
lower_percentile = .05
upper_percentile = .95
save = True
plt.rcParams.update({'font.size': 15})
max_freq = 0
for date, day_tracks in all_tracks.items():
freqs = []
xs = []
colors = []
for t_ind, camera in enumerate(camera_names[date]):
tracks = day_tracks[camera]
camera_freqs = []
colors.append(camera_colors[camera])
for track in tracks:
camera_freqs.append(track['peak_freq'])
camera_freqs = bf.get_middle_percentiles(camera_freqs,
lower_percentile,
upper_percentile)
if max_freq < np.max(camera_freqs):
max_freq = np.max(camera_freqs)
xs.extend([t_ind for _ in camera_freqs])
freqs.extend(camera_freqs)
fig, (ax1) = plt.subplots(1, 1)
palette = sns.color_palette(colors)
sns.violinplot(x=xs, y=freqs, ax=ax1, palette=palette)
ax1.set_title(f"{date}")
ax1.set_xticklabels(camera_names[date], rotation=90)
ax1.set_ylabel("Wingbeats per second")
ax1.set_ylim(0, 5.0)
title = (f"{date} wingbeat distributions "
+ f"lower percentile {lower_percentile} "
+ f"upper percentile {upper_percentile}"
+ f"big font")
if save:
bf.save_fig(plots_save_folder, title)
print(max_freq)
save = False
lower_percentile = .05
upper_percentile = .95
for date_ind, (date, day_tracks) in enumerate(all_tracks.items()):
colors = []
straightness_measures = []
all_cameras = []
xs = []
camera_names = []
for cam_ind, (camera, tracks) in enumerate(day_tracks.items()):
camera_names.append(camera)
camera_straightness = []
for track in tracks:
camera_straightness.append(
bf.calculate_straightness(track['track']))
camera_straightness = bf.get_middle_percentiles(camera_straightness,
lower_percentile,
upper_percentile)
straightness_measures.extend(camera_straightness)
xs.extend([cam_ind for _ in camera_straightness])
colors.append(camera_colors[camera])
fig, ax = plt.subplots(1, 1)
palette = sns.color_palette(colors)
# sns.violinplot(x=xs, y=heights, ax=ax1, )
sns.violinplot(x=xs, y=straightness_measures,
ax=ax, cut=0, palette=palette)
ax.set_title(f"{date}")
ax.set_xticklabels(camera_names, rotation=90)
ax.set_ylabel("Track Straightness")
title = (f"{date} track straightness "
+ f"lower percentile {lower_percentile} "
+ f"upper percentile {upper_percentile}")
if save:
bf.save_fig(plots_save_folder, title)
len(tracks)
for track in tracks[4::500]:
plt.figure(figsize=(5,5))
camera_straightness = bf.calculate_straightness(track['track'])
plt.title(camera_straightness)
plt.scatter(track['track'][:,0], track['track'][:,1], s=.1)
plt.gca().axis('equal')
shift = 0 # loss on each side from not padding during detection (48)
FRAME_WIDTH = 2704 - (2 * shift)
WINGSPAN = .8 # meters, max extent while flying
center_utm = {'middle': np.array([200450, 8606950]),
'right': np.array([200800, 8606900])}
camera_locations = {'FibweParking2': [-12.5903393, 30.2525047],
'FibweParking': [-12.5903393, 30.2525047],
'Chyniangale': [-12.5851284, 30.245529],
'BBC': [-12.5863538, 30.2484985],
'Sunset': [-12.585784, 30.240003],
'NotChyniangale': [-12.5849206, 30.2436135],
'MusoleParking': [-12.58787, 30.2401],
'MusolePath2': [-12.589544, 30.242488],
'MusolePath': [-12.589544, 30.242488],
'Puku': [-12.584838, 30.24137],
'FibwePublic': [-12.592537, 30.2515924],
'MusoleTower': [-12.589434, 30.244736],
}
all_camera_utms = bf.latlong_dict_to_utm(camera_locations)
root_folder = ".../kasanka-bats/processed/deep-learning"
observations_root = os.path.join(root_folder, "observations")
all_observations = {}
day_folders = sorted(glob.glob(os.path.join(observations_root, '*')))
for day_folder in day_folders:
obs_files = sorted(glob.glob(os.path.join(day_folder, '*.npy')))
date = os.path.basename(day_folder)
all_observations[date] = {}
for obs_file in obs_files:
camera = os.path.splitext(obs_file)[0].split('-')[-1]
obs = np.load(obs_file, allow_pickle=True)
# .item() to get dict from inside the array that was wrapped around
# it when using np.save()
all_observations[date][camera] = obs.item()
# Remove observations to exclude (because camera ran out of batteries etc.)
exclude=True
# Manually exclude cameras that had issues
all_observations['17Nov']['MusoleParking']['exclude'] = True
all_observations['18Nov']['MusolePath']['exclude'] = True
all_observations['20Nov']['MusolePath']['exclude'] = True
if exclude:
good_obs = {}
for date, day_obs in all_observations.items():
good_obs[date] = {}
for camera, obs in day_obs.items():
if 'exclude' in obs.keys():
if obs['exclude']:
continue
good_obs[date][camera] = obs
all_observations = good_obs
axis_labels_day_ind = 0
save=False
center = 'middle'
all_figs = []
all_axs = []
for day_ind, (date, day) in enumerate(all_observations.items()):
fig, axs = plt.subplots(1, 1, subplot_kw=dict(polar=True))
all_figs.append(fig)
all_axs.append(axs)
max_bats_per_degree = 0
max_camera = None
max_date = None
min_bats_per_degree = 100000
min_camera = None
min_date = None
max_height = 0
for day_ind, ((date, day), fig, axs) in enumerate(zip(all_observations.items(), all_figs, all_axs)):
day_total, day_total_mean = bf.get_day_total(day, center_utm[center], all_camera_utms,
FRAME_WIDTH, WINGSPAN, exclude=exclude)
fractions = []
cameras = []
totals = []
contribution = []
angles = []
colors = []
camera_utms = bf.get_camera_locations(day, all_camera_utms, exclude=True)
print(date, len(camera_utms))
day_densities = []
camera_angles = bf.get_camera_angles(camera_utms, center_utm[center])
for camera, obs in day.items():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
print('excluding observation...')
continue
fractions.append(obs['fraction_total'])
cameras.append(camera)
density = obs['total'] / 360
totals.append(density)
angles.append(camera_angles[camera])
colors.append(camera_colors[camera])
day_density.append(density)
if density > max_bats_per_degree:
max_bats_per_degree = density
max_camera = camera
max_date = date
if density < min_bats_per_degree:
min_bats_per_degree = density
min_camera = camera
min_date = date
print()
total_ind = 0
axs.set_thetalim(-np.pi, np.pi)
axs.scatter(angles, totals, color=colors)
# if axis_labels_day_ind == day_ind:
axs.set_xticks(angles)
_ = axs.set_xticklabels(cameras, fontweight='bold')
axs.set_title(f"{date}")
axs.set_rlim(0, 12000)
axs.text(np.radians(0),
3*axs.get_rmax()/4.,
'Bats per degree',
rotation=0,
ha='center', va='center')
_ = axs.set_rticks([4000, 8000])
title = f"{date} bat density "
if save:
bf.save_fig(plots_save_folder, title, fig=fig)
print(f"min camera {min_camera}, {min_date}: {min_bats_per_degree}")
print(f"max camera {max_camera}, {max_date}: {max_bats_per_degree}")
camera_angles
axis_labels_day_ind = 0
save=True
center = 'middle'
all_figs = []
all_axs = []
for day_ind, (date, day) in enumerate(all_observations.items()):
fig, axs = plt.subplots(1, 1, subplot_kw=dict(polar=True))
all_figs.append(fig)
all_axs.append(axs)
max_bats_per_degree = 0
max_bats_per_camera = 0
max_height = 0
for day_ind, ((date, day), fig, axs) in enumerate(zip(all_observations.items(), all_figs, all_axs)):
day_total, day_total_mean = bf.get_day_total(day, center_utm[center], all_camera_utms,
FRAME_WIDTH, WINGSPAN, exclude=exclude)
fractions = []
cameras = []
distances = []
angles = []
colors = []
camera_utms = bf.get_camera_locations(day, all_camera_utms, exclude=True)
print(date, len(camera_utms))
camera_angles = bf.get_camera_angles(camera_utms, center_utm[center])
camera_distances = bf.get_camera_distances(camera_utms, center_utm[center])
for camera, obs in day.items():
colors.append(camera_colors[camera])
cameras.append(camera)
distances.append(camera_distances[camera])
angles.append(camera_angles[camera])
total_ind = 0
axs.set_thetalim(-np.pi, np.pi)
axs.scatter(angles, distances, color=colors)
# if axis_labels_day_ind == day_ind:
axs.set_xticks(angles)
_ = axs.set_xticklabels(cameras, fontweight='bold')
axs.set_title(f"{date}")
# axs.set_rlim(0, 12000)
axs.text(np.radians(0),
3*axs.get_rmax()/4.,
'Meters',
rotation=0,
ha='center', va='center',
color='gray')
_ = axs.set_rticks([400, 800, 1200])
axs.tick_params(axis='x', colors='gray')
axs.tick_params(axis='y', colors='gray') #setting up Y-axis tick color to black
axs.spines['polar'].set_color('gray')
title = f"{date} camera distance gray"
if save:
bf.save_fig(plots_save_folder, title, fig=fig)
axis_labels_day_ind = 0
save=True
center = 'middle'
all_figs = []
all_axs = []
for day_ind, (date, day) in enumerate(all_observations.items()):
fig, axs = plt.subplots(1, 1, subplot_kw=dict(polar=True))
all_figs.append(fig)
all_axs.append(axs)
max_bats = 0
max_camera = None
max_date = None
min_bats = 100000
min_camera = None
min_date = None
max_height = 0
for day_ind, ((date, day), fig, axs) in enumerate(zip(all_observations.items(), all_figs, all_axs)):
day_total, day_total_mean = bf.get_day_total(day, center_utm[center], all_camera_utms,
FRAME_WIDTH, WINGSPAN, exclude=exclude)
fractions = []
cameras = []
totals = []
contribution = []
angles = []
colors = []
camera_utms = bf.get_camera_locations(day, all_camera_utms, exclude=True)
print(date, len(camera_utms))
day_density = []
camera_angles = bf.get_camera_angles(camera_utms, center_utm[center])
for camera, obs in day.items():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
print('excluding observation...')
continue
fractions.append(obs['fraction_total'])
cameras.append(camera)
camera_total = obs['total'] / 360 #* obs['fraction_total']
totals.append(camera_total)
angles.append(camera_angles[camera])
colors.append(camera_colors[camera])
day_density.append(density)
if camera_total > max_bats:
max_bats = camera_total
max_camera = camera
max_date = date
if camera_total < min_bats:
min_bats = camera_total
min_camera = camera
min_date = date
total_ind = 0
axs.set_thetalim(-np.pi, np.pi)
# axs.scatter(angles, totals, color=colors)
axs.bar(angles, totals)
# if axis_labels_day_ind == day_ind:
axs.set_xticks(angles)
angles = np.array(angles)
angles_compass = (90-angles*180/np.pi).astype(int)
angles_compass = np.where(angles_compass < 0, angles_compass+360, angles_compass)
angles_compass = [str(a) + u"\u00b0" for a in angles_compass]
_ = axs.set_xticklabels(angles_compass, fontweight='bold')
axs.set_title(f"{date}")
axs.set_rlim(0, max_bats+500)
axs.text(np.radians(50),
3*axs.get_rmax()/4.,
'Bats per degree',
rotation=50,
ha='center', va='center')
_ = axs.set_rticks([3000, 6000, 9000])
title = f"{date} bats per degree per camera radial histogram degree labels"
if save:
bf.save_fig(plots_save_folder, title, fig=fig)
print(f"min camera {min_camera}, {min_date}: {min_bats_per_degree}")
print(f"max camera {max_camera}, {max_date}: {max_bats_per_degree}")
axis_labels_day_ind = 0
save=True
center = 'middle'
all_figs = []
all_axs = []
for day_ind, (date, day) in enumerate(all_observations.items()):
fig, axs = plt.subplots(1, 1, subplot_kw=dict(polar=True))
all_figs.append(fig)
all_axs.append(axs)
max_bats = 0
max_camera = None
max_date = None
min_bats = 100000
min_camera = None
min_date = None
max_height = 0
for day_ind, ((date, day), fig, axs) in enumerate(zip(all_observations.items(), all_figs, all_axs)):
day_total, day_total_mean = bf.get_day_total(day, center_utm[center], all_camera_utms,
FRAME_WIDTH, WINGSPAN, exclude=exclude)
densities = np.ones(360)
camera_utms = bf.get_camera_locations(day, all_camera_utms, exclude=True)
camera_borders = bf.get_camera_borders(camera_utms, center_utm[center])
print(date, len(camera_utms))
camera_angles = bf.get_camera_angles(camera_utms, center_utm[center])
for camera, obs in day.items():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
print('excluding observation...')
continue
fractions.append(obs['fraction_total'])
cameras.append(camera)
camera_total = obs['total'] / 360 #* obs['fraction_total']
lower = camera_angles[camera] - camera_borders[camera]['clock_angle']
upper = camera_angles[camera] - camera_borders[camera]['cclock_angle']
lower = lower * 180 / np.pi
upper = upper * 180 / np.pi
lower = (lower).astype(int)
lower = np.where(lower < 0, lower+360, lower)
upper = (upper).astype(int)
upper = np.where(upper < 0, upper+360, upper)
if lower > upper:
densities[int(lower):] = camera_total
densities[:int(upper)] = camera_total
else:
densities[int(lower):int(upper)+1] = camera_total
# print(lower, upper)
# break
axs.set_thetalim(0, 2*np.pi)
# axs.scatter(angles, totals, color=colors)
size = 5
axs.bar((np.arange(360)*(np.pi/180))[::size], densities[::size]/np.max(densities[::size]),
width=size*np.pi/180, edgecolor = "blue", color='blue')
if axis_labels_day_ind == day_ind:
angles = [90, 45, 0, 315, 270, 225, 180, 135]
angles = np.array(angles)
angles_compass = [str(a) + u"\u00b0" for a in angles]
_ = axs.set_xticklabels(angles_compass, fontweight='bold')
axs.set_title(f"{date}")
_ = axs.set_rticks([.25, .5, .75, 1])
axs.set_yticklabels([])
# axs.set_rlim(0, max_bats+500)
# axs.text(np.radians(50),
# 3*axs.get_rmax()/4.,
# 'Bats per degree',
# rotation=50,
# ha='center', va='center')
# _ = axs.set_rticks([])
title = f"{date} fly out fractions"
if save:
bf.save_fig(plots_save_folder, title, fig=fig)
# print(f"min camera {min_camera}, {min_date}: {min_bats_per_degree}")
# print(f"max camera {max_camera}, {max_date}: {max_bats_per_degree}")
fig, axs = plt.subplots(1, 1, subplot_kw=dict(polar=True), figsize=(20,20))
np.arange(360)/()
plt.scatter(np.arange(360), densities, s=1)
axis_labels_day_ind = 0
save=False
center = 'middle'
all_figs = []
all_axs = []
for day_ind, (date, day) in enumerate(all_observations.items()):
fig, axs = plt.subplots(1, 1, subplot_kw=dict(polar=True))
all_figs.append(fig)
all_axs.append(axs)
max_bats_per_degree = 0
max_bats_per_camera = 0
max_height = 0
zipped = zip(all_observations.items(), all_figs, all_axs)
for day_ind, ((date, day), fig, axs) in enumerate(zipped):
day_total, day_total_mean = bf.get_day_total(day, center_utm[center], all_camera_utms,
FRAME_WIDTH, WINGSPAN, exclude=exclude)
fractions = []
cameras = []
totals = []
contribution = []
angles = []
colors = []
polarizations = []
camera_utms = bf.get_camera_locations(day, all_camera_utms, exclude=True)
print(date, len(camera_utms))
camera_angles = bf.get_camera_angles(camera_utms, center_utm[center])
for camera, obs in day.items():
fractions.append(obs['fraction_total'])
cameras.append(camera)
totals.append(obs['total']/360)
angles.append(camera_angles[camera])
colors.append(camera_colors[camera])
polarizations.append(bf.calculate_polarization(all_tracks[date][camera]))
max_bats_per_degree_day = np.max(totals)
if max_bats_per_degree_day > max_bats_per_degree:
max_bats_per_degree = max_bats_per_degree_day
total_ind = 0
axs.set_thetalim(-np.pi, np.pi)
axs.scatter(angles, polarizations, color=colors)
# if axis_labels_day_ind == day_ind:
axs.set_xticks(angles)
_ = axs.set_xticklabels(cameras, fontweight='bold')
axs.set_title(f"{date}")
axs.set_rlim(0, 1.0)
axs.text(np.radians(0),
3*axs.get_rmax()/4.,
'Polarization',
rotation=0,
ha='center', va='center')
title = f'{date} bat polarization center {center}'
if save:
bf.save_fig(plots_save_folder, title, fig=fig)
polarizations = []
days = []
for date_ind, (date, day_tracks) in enumerate(all_tracks.items()):
for camera, tracks in day_tracks.items():
polarizations.append(bf.calculate_polarization(tracks))
days.append(camera)
plt.scatter(days, polarizations)
date = '19Nov'
num_tracks = len(all_tracks[date]['BBC'])
draw_tracks = True
print(f"{num_tracks} tracks")
for camera, camera_tracks in all_tracks[date].items():
if camera != "MusolePath":
continue
num_tracks = len(camera_tracks)
p_vals = []
for ind in range(num_tracks // 1000):
if draw_tracks:
plt.figure()
tracks = []
for track_ind in range(ind*1000, (ind+1)*1000, 50):
track = camera_tracks[track_ind]['track']
tracks.append(camera_tracks[track_ind])
if draw_tracks:
plt.scatter( track[:, 0], track[:,1], s=1)
plt.plot([track[0, 0], track[-1, 0]], [track[0,1], track[-1, 1]])
plt.scatter([track[0, 0], track[-1, 0]],
[track[0,1], track[-1, 1]], c=['red', 'blue'])
plt.gca().set_aspect('equal')
p_vals.append(bf.calculate_polarization(tracks))
if draw_tracks:
plt.title(p_vals[-1])
plt.figure()
plt.plot(p_vals)
plt.title(camera)
len(all_tracks['19Nov']['Chyniangale'])
for ind in range(9):
tracks = all_tracks['19Nov']['Chyniangale'][ind*1000:(ind+1)*1000]
print(bf.calculate_polarization(tracks))
for camera, tracks in day_tracks.items():
camera_straightness = []
for track in tracks:
camera_straightness.append(
straightness(track['track']))
plt.figure()
plt.scatter(np.arange(len(camera_straightness)), camera_straightness, alpha=.1)
np.stack([x, y], 1).shape
track_ind = 1000
track = tracks[track_ind]
track = np.copy(track['track'])
plt.figure(figsize=(20,20))
plt.scatter(track[:, 0], track[:, 1], s=1)
x = np.convolve(track[:, 0], kernel, mode='valid')
y = np.convolve(track[:, 1], kernel, mode='valid')
plt.scatter(x, y, s=1)
diff / np.linalg.norm(diff)
root_folder = ".../kasanka-bats/processed/deep-learning"
root_frame_folder = ".../Elements/bats"
frame_files = sorted(
glob.glob(os.path.join(root_frame_folder,
date,
camera,
"*/*.jpg"
)
)
)
frame_ind = 10000
frame = plt.imread(frame_files[frame_ind])
print(frame.shape)
bf.draw_tracks_on_frame(frame, frame_ind, tracks,
positions=None, figure_scale=60,
track_width=2, position_alpha=.5,
draw_whole_track=False, shift=0)
plt.figure(figsize=(20, 20))
plt.imshow(frame)
track_ind = 10002
tracks[track_ind]['track'][-1] - tracks[track_ind]['track'][0]
tracks[track_ind]['track'][0]
tracks[track_ind]['track'][-1]
###Output
_____no_output_____
|
sagemaker-debugger/tensorflow_nlp_sentiment_analysis/sentiment-analysis-tf-distributed-training-bringyourownscript.ipynb
|
###Markdown
Profile machine learning training with Amazon SageMaker Debugger Gain high precision insights of horovod based distributed machine learning training jobsThis notebook demonstrates how to: * Execute distributed training on Amazon SageMaker using Horovod framework. * Execute distributed training using script mode which allows you to use a training script similar to one you would use outside SageMaker.* Execute SageMaker Debugger profiling rules against training jobs in process.* Visualize the system and framework metrics using the SMDebug client library.* Analyze autogenerated profiling report and implement recommendations suggested by SageMaker Debugger. **Table of Contents** 1. [Introduction](intro)2. [Section 1 - Setup](setup)3. [Section 2 - Train sentiment analysis CNN model with custom Debugger profiling configuration](train)3. [Section 3 - Interactive analysis using the SMDebug visualization tools](analysis)5. [Section 4 - Analyze report generated by Debugger](profiler-report)6. [Section 5 - Analyze recommendations from the report](analyze-profiler-recommendations)7. [Section 6 - Implement recommendations from the report](implement-profiler-recommendations)8. [Conclusion](conclusion) Introduction Training machine learning models is a time and compute intensive process requiring multiple training runs with different hyperparameters before a model yields acceptable accuracy. CPU and GPU based distributed training withframeworks such as Horovord and Parameter Servers address this issue by allowing training to be easilyscalable to a cluster of resources. However, distributed training makes it harder to identify and debugresource bottleneck problems. Gaining insights into the training in progress, both at the machine learningframework level and the underlying compute resources level, is critical step towards understanding theresource usage patterns and reducing resource wastage. Analyzing bottleneck issues is necessary tomaximize the utilization of compute resources and optimize model training performance to deliver state-of-the-art machine learning models with target accuracy.Amazon SageMaker is a fully managed service that enables developers and data scientists to quickly and easily build, train, and deploy ML models at scale. Amazon SageMaker Debugger is a feature of SageMaker training that makes it easy to train machine learning (ML) models faster by capturing real-time metrics such as learning gradients and weights, providing transparency into the training process, so you can correct anomalies such as losses, overfitting, and overtraining. With the newly introduced profiling capability, SageMaker Debugger now automatically monitors system resources such as CPU, GPU, network, IO, and memory providing a complete resource utilization view of training jobs.In this notebook, we demonstrate the Amazon SageMaker Debugger profiling capabilities using the sentiment analysis use case. Use case - Sentiment Analysis with TensorFlow and KerasSentiment analysis is a very common text analytics task that involves determining whether a text sample is positive or negative about its subject. There are several different algorithms for performing this task, including statistical algorithms and deep learning algorithms. With respect to deep learning, a Convolutional Neural Net (CNN) is sometimes used for this purpose. In this notebook we'll use a CNN built with TensorFlow to perform sentiment analysis in Amazon SageMaker on the IMDB dataset, which consists of movie reviews labeled as having positive or negative sentiment. Step 0 - Install and check the SageMaker Python SDK versionTo use the new Debugger profiling features, ensure that you have the right versions of SageMaker and SMDebug SDKs installed. Check the library versions.
###Code
import sagemaker
sagemaker.__version__
###Output
_____no_output_____
###Markdown
**Important**: If the SageMaker version is less than 2.19.0 and if you are using an existing SageMaker Studio or Notebook instance, you must update the environment to use the latest SageMaker Python SDK. Follow instructions at [Update Amazon SageMaker Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-tasks-update.html) and [Notebook Instance Software Updates](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-software-updates.html) in the [Amazon SageMaker developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html). Section 1 - Setup In this section, you will import the necessary libraries, set up variables, and examine data to train the sentiment analysis model.Let's start by specifying:* The AWS region used to host your model.* The IAM role associated with this SageMaker notebook instance.* The S3 bucket used to store the data used to train your model, any additional model data, and the data captured from model invocations. 1.1 Import necessary libraries
###Code
import pandas as pd
import numpy as np
import os
import boto3
import time
# import debugger libraries
import sagemaker
from sagemaker.tensorflow import TensorFlow
from sagemaker.debugger import ProfilerConfig, FrameworkProfile
from tensorflow.keras.preprocessing import sequence
from tensorflow.python.keras.datasets import imdb
###Output
_____no_output_____
###Markdown
1.2 AWS region and IAM Role
###Code
import sagemaker
region = sagemaker.Session().boto_region_name
print("AWS Region: {}".format(region))
role = sagemaker.get_execution_role()
print("RoleArn: {}".format(role))
###Output
_____no_output_____
###Markdown
1.3 S3 bucket and prefixes
###Code
s3_prefix = "tf-hvd-sentiment-silent"
traindata_s3_prefix = "{}/data/train".format(s3_prefix)
testdata_s3_prefix = "{}/data/test".format(s3_prefix)
sagemaker_session = sagemaker.Session()
###Output
_____no_output_____
###Markdown
1.4 Process training data We'll begin by loading the reviews dataset, and padding the reviews, so all reviews have the same length. Each review is represented as an array of numbers, where each number represents an indexed word. Training data for both Local Mode and Hosted Training must be saved as files, so we'll also save the transformed data to files.
###Code
max_features = 20000
maxlen = 400
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), "train sequences")
print(len(x_test), "test sequences")
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print("x_train shape:", x_train.shape)
print("x_test shape:", x_test.shape)
# Each review is an array of numbers where each number is an indexed word
print(x_train[:10])
data_dir = os.path.join(os.getcwd(), "data")
os.makedirs(data_dir, exist_ok=True)
train_dir = os.path.join(os.getcwd(), "data/train")
os.makedirs(train_dir, exist_ok=True)
test_dir = os.path.join(os.getcwd(), "data/test")
os.makedirs(test_dir, exist_ok=True)
csv_test_dir = os.path.join(os.getcwd(), "data/csv-test")
os.makedirs(csv_test_dir, exist_ok=True)
np.save(os.path.join(train_dir, "x_train.npy"), x_train)
np.save(os.path.join(train_dir, "y_train.npy"), y_train)
np.save(os.path.join(test_dir, "x_test.npy"), x_test)
np.save(os.path.join(test_dir, "y_test.npy"), y_test)
np.savetxt(
os.path.join(csv_test_dir, "csv-test.csv"),
np.array(x_test[:100], dtype=np.int32),
fmt="%d",
delimiter=",",
)
train_s3 = sagemaker_session.upload_data(path="./data/train/", key_prefix=traindata_s3_prefix)
test_s3 = sagemaker_session.upload_data(path="./data/test/", key_prefix=testdata_s3_prefix)
inputs = {"train": train_s3, "test": test_s3}
print(inputs)
###Output
_____no_output_____
###Markdown
Section 2 - Train sentiment analysis CNN model with custom profiler configuration In this section we use SageMaker's hosted training using Uber's Horovod framework, which uses compute resources separate from this notebook instance. Hosted training spins up one or more instances (cluster) for training, and then tears the cluster down when training is complete. Horovod is a distributed deep learning training framework for TensorFlow, Keras, PyTorch, and Apache MXNet. The objective is to take a single-GPU training script and successfully scale it to train across many GPUs in parallel. Once a training script has been written for scale with Horovod, it can run on a single-GPU, multiple-GPUs, or even multiple hosts without any further code changes.With the SageMaker Python SDK, you can train and host TensorFlow models on Amazon SageMaker. For more information, see [Use TensorFlow with the SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/using_tf.html) in the SageMaker Python SDK documentation.For our training, we will use three p3.8xlarge instances to begin with and change our training configuration based on profiling recommendations from Amazon SageMaker Debugger. Amazon EC2 P3 instances deliver high performance compute in the cloud with up to 8 NVIDIAยฎ V100 Tensor Core GPUs and up to 100 Gbps of networking throughput for machine learning and HPC applications. The p3.8xlarge instance comes with 4 GPUs and 32 vCPU cores with 10 Gbps networking performance. Please refer to the EC2 Instance Types page for more details. 2.1 Setup training job We will use the standard SageMaker Estimator API for TensorFlow to create training jobs. Profiling configuration will be enabled by default to emit framework and system metrics for our analysis. Define hyperparameters such as number of epochs, batch size, and data augmentation. * You can increase batch size to increase system utilization, but it may result in CPU bottleneck problems. Data preprocessing of a large batch size with augmentation requires a heavy computation. * You can disable `data_augmentation` to see the impact on the system utilization.* We've set the number of epochs to enable training to run quicker, please adjust this accordingly for your use case.
###Code
hyperparameters = {"epoch": 1, "batch_size": 256, "data_augmentation": True}
###Output
_____no_output_____
###Markdown
Take your AWS account limits into consideration while setting up the `instance_type` and `instance_count` of the cluster.
###Code
distributions = {
"mpi": {
"enabled": True,
"processes_per_host": 2,
"custom_mpi_options": "-verbose -x HOROVOD_TIMELINE=./hvd_timeline.json -x NCCL_DEBUG=INFO -x OMPI_MCA_btl_vader_single_copy_mechanism=none",
}
}
model_dir = "/opt/ml/model"
train_instance_type = "ml.p3.8xlarge"
instance_count = 2
###Output
_____no_output_____
###Markdown
2.2 Define profiler configuration With the following **`profiler_config`** parameter configuration, Debugger calls the default settings of monitoring, collecting system metrics every 500 milliseconds. For collecting framework metrics, you can set target steps and target time intervals in detail.
###Code
profiler_config = ProfilerConfig(
framework_profile_params=FrameworkProfile(start_step=2, num_steps=7)
)
###Output
_____no_output_____
###Markdown
With this `profiler_config` settings, Debugger will collect system metrics every 500 milliseconds and framework metrics on the specified steps (from step 2 to 9). For a complete list of parameters and profiling configurations, see [Configure Debugger Using Amazon SageMaker Python SDK](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-configuration.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). 2.3 Configure training job using TensorFlow estimator and pass in the profiler configuration.While constructing a SageMaker estimator, specify the TensorFlow framework version and supported python version. For a complete list of the supported framework versions and the corresponding python version to use, see [Supported Frameworks and Algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.htmldebugger-supported-frameworks) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html).**Note**: In the following estimator, the exact `image_uri` was pointed to use the latest AWS TensorFlow deep learning container image. For a complete list of AWS deep learning containers, see [General Framework Containers](https://github.com/aws/deep-learning-containers/blob/master/available_images.mdgeneral-framework-containers) in the [AWS Deep Learning Containers](https://github.com/aws/deep-learning-containers/) repository. The Debugger's new profiling features are available for TensorFlow 2.3.1 and PyTorch 1.6.0.
###Code
estimator = TensorFlow(
role=sagemaker.get_execution_role(),
base_job_name="tf-keras-silent",
model_dir=model_dir,
instance_count=instance_count,
instance_type=train_instance_type,
entry_point="sentiment-distributed.py",
source_dir="./tf-sentiment-script-mode",
framework_version="2.3.1",
py_version="py37",
profiler_config=profiler_config,
script_mode=True,
hyperparameters=hyperparameters,
distribution=distributions,
)
###Output
_____no_output_____
###Markdown
We then simply call `fit` to start the actual hosted training
###Code
estimator.fit(inputs, wait=False)
###Output
_____no_output_____
###Markdown
Section 3 - Interactive analysis using the SMDebug visualization tools In this section, we introduce interactive analysis of the data captured by SageMaker Debugger. It is organized in order of training phases: initialization, training, and finalization. The profiling data results are categorized as System Metrics and Algorithm (Framework) Metrics.Once the training job initiates, SageMaker Debugger starts collecting system and framework metrics. The smdebug library provides profiler analysis tools that enable you to access and analyze the profiling data. The following code cells are to set up a TrainingJob object to retrieve the system and framework metrics when they become available in the default S3 bucket. Once the metrics are available, you can query, plot, and analyze the profiling metrics data throughout this notebook. Let's check the profiler artifact path where the system metrics and framework metrics are stored.
###Code
estimator.latest_job_profiler_artifacts_path()
###Output
_____no_output_____
###Markdown
3.1 Read profiling data: system metricsOnce the training job is running, SageMaker collects system and framework metrics. The following code cell is waiting for the system metrics to become available in S3. Once they are available you will be able to query and plot those metrics.
###Code
from smdebug.profiler.system_metrics_reader import S3SystemMetricsReader
path = estimator.latest_job_profiler_artifacts_path()
system_metrics_reader = S3SystemMetricsReader(path)
sagemaker_client = boto3.client("sagemaker")
training_job_name = estimator.latest_training_job.name
print(f"Training job name: {training_job_name}")
training_job_status = ""
training_job_secondary_status = ""
while system_metrics_reader.get_timestamp_of_latest_available_file() == 0:
system_metrics_reader.refresh_event_file_list()
client = sagemaker_client.describe_training_job(TrainingJobName=training_job_name)
if "TrainingJobStatus" in client:
training_job_status = f"TrainingJobStatus: {client['TrainingJobStatus']}"
if "SecondaryStatus" in client:
training_job_secondary_status = f"TrainingJobSecondaryStatus: {client['SecondaryStatus']}"
print(
f"Profiler data from system not available yet. {training_job_status}. {training_job_secondary_status}."
)
time.sleep(20)
print("\n\nProfiler data from system is available")
###Output
_____no_output_____
###Markdown
Helper function to convert timestamps into UTC:
###Code
from datetime import datetime
def timestamp_to_utc(timestamp):
utc_dt = datetime.utcfromtimestamp(timestamp)
return utc_dt.strftime("%Y-%m-%d %H:%M:%S")
###Output
_____no_output_____
###Markdown
Now that the data is available we can query and inspect it. We get the latest available timestamp and query all the events within the given time range:
###Code
system_metrics_reader.refresh_event_file_list()
last_timestamp = system_metrics_reader.get_timestamp_of_latest_available_file()
events = system_metrics_reader.get_events(0, last_timestamp * 1000000) # UTC time in micro seconds
print(
"Found",
len(events),
"recorded system metric events. Latest recorded event:",
timestamp_to_utc(last_timestamp / 1000000),
) # UTC time in seconds to datetime
###Output
_____no_output_____
###Markdown
We can iterate over the list of recorded events. Let's have a look on the first event.
###Code
print(
"Event name:",
events[0].name,
"\nTimestamp:",
timestamp_to_utc(events[0].timestamp),
"\nValue:",
events[0].value,
)
###Output
_____no_output_____
###Markdown
3.2 GPU and CPU usage MetricHistogram computes a histogram on GPU and CPU utilization values. Bins are between 0 and 100. Good system utilization means that the center of the distribution should be between 80 to 90. In case of multi-GPU training: if distributions of GPU utilization values are not similar it indicates an issue with workload distribution.The following cell will plot the histograms per metric. In order to only plot specific metrics, define the list `select_dimensions` and `select_events`. A dimension can be CPUUtilization, GPUUtilization, GPUMemoryUtilization IOPS. With CPUUtilization dimension, CPU uiltization histogram for each single core and total CPU usage will be plotted. In case of GPU, it will visualize utilization and memory for each GPU. In case of IOPS, it will plot IO wait time per CPU. If `select_events` is specified then only metrics that match the name in `select_metrics` will be shown. If neither `select_dimensions` nor `select_events` are specified, all available metrics will be visualized. One can also specify a start and endtime.
###Code
from smdebug.profiler.analysis.notebook_utils.metrics_histogram import MetricsHistogram
system_metrics_reader.refresh_event_file_list()
metrics_histogram = MetricsHistogram(system_metrics_reader)
metrics_histogram.plot()
###Output
_____no_output_____
###Markdown
3.3 Read profiling data: framework annotations
###Code
from smdebug.profiler.algorithm_metrics_reader import S3AlgorithmMetricsReader
framework_metrics_reader = S3AlgorithmMetricsReader(path)
events = []
while framework_metrics_reader.get_timestamp_of_latest_available_file() == 0 or len(events) == 0:
framework_metrics_reader.refresh_event_file_list()
last_timestamp = framework_metrics_reader.get_timestamp_of_latest_available_file()
events = framework_metrics_reader.get_events(0, last_timestamp)
print("Profiler data from framework not available yet")
time.sleep(20)
print("\n\n Profiler data from framework is available")
###Output
_____no_output_____
###Markdown
The following code cell retrieves all recorded events from Amazon S3.
###Code
framework_metrics_reader.refresh_event_file_list()
last_timestamp = framework_metrics_reader.get_timestamp_of_latest_available_file()
events = framework_metrics_reader.get_events(0, last_timestamp)
print(
"Found",
len(events),
"recorded framework annotations. Latest event recorded ",
timestamp_to_utc(last_timestamp / 1000000),
)
###Output
_____no_output_____
###Markdown
Like before we can inspect the recorded events. Since we are reading framework metrics there is now a start and end time for each event.
###Code
print(
"Event name:",
events[0].event_name,
"\nStart time:",
timestamp_to_utc(events[0].start_time / 1000000000),
"\nEnd time:",
timestamp_to_utc(events[0].end_time / 1000000000),
"\nDuration:",
events[0].duration,
"nanosecond",
)
###Output
_____no_output_____
###Markdown
3.4 Outliers in step durationStepHistogram creates histograms of step duration values. Significant outliers are indication of system bottlenecks. In contrast to SetpTimelineChart it helps identify clusters of step duration values. As a simple example: time spent during training phase (forward and backward pass) will likely be different to time spent during validation phase (forward pass), so we would expect at least two clusters.
###Code
from smdebug.profiler.analysis.notebook_utils.step_histogram import StepHistogram
framework_metrics_reader.refresh_event_file_list()
step_histogram = StepHistogram(framework_metrics_reader)
step_histogram.plot()
###Output
_____no_output_____
###Markdown
3.5 HeatmapThe following code cell creates a heatmap where each row corresponds to one metric (CPU core and GPU utilizations) and x-axis is the duration of the training job. It allows you to more easily spot CPU bottlenecks (utilization on GPU is low but a utilization of one or more cores is high).
###Code
from smdebug.profiler.analysis.notebook_utils.heatmap import Heatmap
view_heatmap = Heatmap(
system_metrics_reader,
framework_metrics_reader,
select_dimensions=["CPU", "GPU"], # optional - comment this line out to see all dimensions.
# select_events=["total"], # optional - comment this line out to see all events.
plot_height=900,
)
###Output
_____no_output_____
###Markdown
3.6 Run loop to fetch latest profiler data and update chartsThe following code cell runs while your training job is in progress and refreshes the plots in the previous sections. Execution using papermill encountered an exception here and stopped:
###Code
from bokeh.io import push_notebook
import time
last_timestamp = system_metrics_reader.get_timestamp_of_latest_available_file()
description = sagemaker_client.describe_training_job(TrainingJobName=training_job_name)
while description["TrainingJobStatus"] == "InProgress":
system_metrics_reader.refresh_event_file_list()
framework_metrics_reader.refresh_event_file_list()
current_timestamp = system_metrics_reader.get_timestamp_of_latest_available_file()
description = sagemaker_client.describe_training_job(TrainingJobName=training_job_name)
if current_timestamp > last_timestamp:
print(
"New data available, updating dashboards. Current timestamp is",
timestamp_to_utc(current_timestamp / 1000000),
)
view_heatmap.update_data(current_timestamp)
push_notebook(handle=view_heatmap.target)
metrics_histogram.update_data(current_timestamp)
push_notebook(handle=metrics_histogram.target)
step_histogram.update_data(current_timestamp)
push_notebook(handle=step_histogram.target)
last_timestamp = current_timestamp
time.sleep(30)
###Output
_____no_output_____
###Markdown
Section 4 - Analyze report generated by Debugger In this section we will analyze the report generated by the profiler rule processing job. We will showcase a few sections of the report. For complete details, please download the report from the S3 bucket and review.Also note that the exact details in the report generated for your training job may be different from what you see in this section. 4.1 View the location of the report generated.
###Code
rule_output_path = estimator.output_path + estimator.latest_training_job.job_name + "/rule-output"
print(
f"You will find the profiler report under `{rule_output_path}/` after the training has finished"
)
###Output
_____no_output_____
###Markdown
To check if the report is generated, list directories and files recursively
###Code
! aws s3 ls {rule_output_path} --recursive
###Output
_____no_output_____
###Markdown
Download the report and rule output files recursively using `aws s3 cp` The following command saves all of the rule output files to the **ProfilerReport-1234567890** folder under your current working directory.
###Code
! aws s3 cp {rule_output_path} ./ --recursive
###Output
_____no_output_____
###Markdown
The following script automatically finds the **ProfilerReport** folder name and returns a link to the downloaded report.
###Code
from IPython.display import FileLink
profiler_report_name = [
rule["RuleConfigurationName"]
for rule in estimator.latest_training_job.rule_job_summary()
if "Profiler" in rule["RuleConfigurationName"]
][0]
profiler_report_name
display(
"Click link below to view the profiler report",
FileLink(profiler_report_name + "/profiler-output/profiler-report.html"),
)
###Output
_____no_output_____
###Markdown
For more information about how to find, download, and browse Debugger profiling reports, see [SageMaker Debugger Profiling Report](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-profiling-report.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). 4.2 Profile Report - Framework metrics summary In this section of the report, you will see a pie chart similar to the below which shows how much time the training job spent in "training", "validation" phase or "others". 4.3 Profile Report - Identify most expensive CPU operator Table in this section of the report shows a list of operators that your training job run on CPU. The most expensive operator on CPU was "ExecutorState::Process" with 16 % 4.4 Profile Report - Identify most expensive GPU operator The table shows the percentage of the time and the absolute cumulative time spent on the most frequently called GPU operators. 4.5 Access Debugger Insights in Amazon SageMaker StudioIn addition to interactive analysis of the Debugger output data and analyzing the autogenerated profiling report, you can also access Debugger insights dashboard from Amazon SageMaker Studio. To get started with Amazon SageMaker Studio using Debugger, see [Debugger on Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-on-studio.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). Section 5 - Analyze recommendations from the report The **Rules Summary** section of the report aggregates all of the rule evaluation results, analysis, rule descriptions, and suggestions. The following table shows a summary of the executed profiler rules. The table is sorted by the rules that triggered most frequently. In training job this was the case for rule LowGPUUtilization. It has processed 1001 datapoints and triggered 8 times.You may see a different rule summary based on the data and the training configuration you use. From the analysis so far and the top recommendations from the table above, there seems to be scope for improving resource utilization and make our training efficient. Based on this change the training configuration settings and re run the training. Section 6 - Implement recommendations from the reportIn the section, we will rerun the training job with the changed configuration. The training instances are changed from p3.8xlarge to p3.2xlarge instances, the number of instances is reduced to 2 and only one process per host for MPI is configured to increase the number of data loaders. The Batch Size is also changed to 512. We will use the same profiling configuration as the previous job.After second training job with the new settings is complete, there are new system metrics, framework metrics and a new report generated.
###Code
hyperparameters = {"epoch": 5, "batch_size": 512, "data_augmentation": True}
distributions = {
"mpi": {
"enabled": True,
"processes_per_host": 1,
"custom_mpi_options": "-verbose -x HOROVOD_TIMELINE=./hvd_timeline.json -x NCCL_DEBUG=INFO -x OMPI_MCA_btl_vader_single_copy_mechanism=none",
}
}
model_dir = "/opt/ml/model"
train_instance_type = "ml.p3.2xlarge"
instance_count = 2
estimator_new = TensorFlow(
role=sagemaker.get_execution_role(),
base_job_name="tf-keras-silent",
model_dir=model_dir,
instance_count=instance_count,
instance_type=train_instance_type,
entry_point="sentiment-distributed.py",
source_dir="./tf-sentiment-script-mode",
framework_version="2.3.1",
py_version="py37",
profiler_config=profiler_config,
script_mode=True,
hyperparameters=hyperparameters,
distribution=distributions,
)
estimator_new.fit(inputs, wait=False)
###Output
_____no_output_____
###Markdown
Call to actionTo understand the impact of the training configuration changes, compare the report analysis from the two training jobs. Repeat the process of analyzing the profiler report, implementing the recommendations and comparing with the previous run, till you are satisfied.
###Code
rule_output_path = (
estimator_new.output_path + estimator_new.latest_training_job.job_name + "/rule-output"
)
print(
f"You will find the profiler report under {rule_output_path}/ after the training has finished"
)
###Output
_____no_output_____
###Markdown
Download the new report and files recursively using `aws s3 cp`
###Code
! aws s3 cp {rule_output_path} ./ --recursive
###Output
_____no_output_____
###Markdown
Retrieve a file link to the new profiling report.
###Code
from IPython.display import FileLink
profiler_report_name = [
rule["RuleConfigurationName"]
for rule in estimator_new.latest_training_job.rule_job_summary()
if "Profiler" in rule["RuleConfigurationName"]
][0]
profiler_report_name
display(
"Click link below to view the profiler report",
FileLink(profiler_report_name + "/profiler-output/profiler-report.html"),
)
###Output
_____no_output_____
###Markdown
Profile machine learning training with Amazon SageMaker Debugger Gain high precision insights of horovod based distributed machine learning training jobsThis notebook demonstrates how to: * Execute distributed training on Amazon SageMaker using Horovod framework. * Execute distributed training using script mode which allows you to use a training script similar to one you would use outside SageMaker.* Execute SageMaker Debugger profiling rules against training jobs in process.* Visualize the system and framework metrics using the SMDebug client library.* Analyze autogenerated profiling report and implement recommendations suggested by SageMaker Debugger. **Table of Contents** 1. [Introduction](intro)2. [Section 1 - Setup](setup)3. [Section 2 - Train sentiment analysis CNN model with custom Debugger profiling configuration](train)3. [Section 3 - Interactive analysis using the SMDebug visualization tools](analysis)5. [Section 4 - Analyze report generated by Debugger](profiler-report)6. [Section 5 - Analyze recommendations from the report](analyze-profiler-recommendations)7. [Section 6 - Implement recommendations from the report](implement-profiler-recommendations)8. [Conclusion](conclusion) Introduction Training machine learning models is a time and compute intensive process requiring multiple training runs with different hyperparameters before a model yields acceptable accuracy. CPU and GPU based distributed training withframeworks such as Horovord and Parameter Servers address this issue by allowing training to be easilyscalable to a cluster of resources. However, distributed training makes it harder to identify and debugresource bottleneck problems. Gaining insights into the training in progress, both at the machine learningframework level and the underlying compute resources level, is critical step towards understanding theresource usage patterns and reducing resource wastage. Analyzing bottleneck issues is necessary tomaximize the utilization of compute resources and optimize model training performance to deliver state-of-the-art machine learning models with target accuracy.Amazon SageMaker is a fully managed service that enables developers and data scientists to quickly and easily build, train, and deploy ML models at scale. Amazon SageMaker Debugger is a feature of SageMaker training that makes it easy to train machine learning (ML) models faster by capturing real-time metrics such as learning gradients and weights, providing transparency into the training process, so you can correct anomalies such as losses, overfitting, and overtraining. With the newly introduced profiling capability, SageMaker Debugger now automatically monitors system resources such as CPU, GPU, network, IO, and memory providing a complete resource utilization view of training jobs.In this notebook, we demonstrate the Amazon SageMaker Debugger profiling capabilities using the sentiment analysis use case. Use case - Sentiment Analysis with TensorFlow and KerasSentiment analysis is a very common text analytics task that involves determining whether a text sample is positive or negative about its subject. There are several different algorithms for performing this task, including statistical algorithms and deep learning algorithms. With respect to deep learning, a Convolutional Neural Net (CNN) is sometimes used for this purpose. In this notebook we'll use a CNN built with TensorFlow to perform sentiment analysis in Amazon SageMaker on the IMDB dataset, which consists of movie reviews labeled as having positive or negative sentiment. Step 0 - Install and check the SageMaker Python SDK versionTo use the new Debugger profiling features, ensure that you have the right versions of SageMaker and SMDebug SDKs installed. Check the library versions.
###Code
import sagemaker
sagemaker.__version__
###Output
_____no_output_____
###Markdown
**Important**: If the SageMaker version is less than 2.19.0 and if you are using an existing SageMaker Studio or Notebook instance, you must update the environment to use the latest SageMaker Python SDK. Follow instructions at [Update Amazon SageMaker Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-tasks-update.html) and [Notebook Instance Software Updates](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-software-updates.html) in the [Amazon SageMaker developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html). Section 1 - Setup In this section, you will import the necessary libraries, set up variables, and examine data to train the sentiment analysis model.Let's start by specifying:* The AWS region used to host your model.* The IAM role associated with this SageMaker notebook instance.* The S3 bucket used to store the data used to train your model, any additional model data, and the data captured from model invocations. 1.1 Import necessary libraries
###Code
import pandas as pd
import numpy as np
import os
import boto3
import time
# import debugger libraries
import sagemaker
from sagemaker.tensorflow import TensorFlow
from sagemaker.debugger import ProfilerConfig, FrameworkProfile
from tensorflow.keras.preprocessing import sequence
from tensorflow.python.keras.datasets import imdb
###Output
_____no_output_____
###Markdown
1.2 AWS region and IAM Role
###Code
import sagemaker
region = sagemaker.Session().boto_region_name
print("AWS Region: {}".format(region))
role = sagemaker.get_execution_role()
print("RoleArn: {}".format(role))
###Output
_____no_output_____
###Markdown
1.3 S3 bucket and prefixes
###Code
s3_prefix = "tf-hvd-sentiment-silent"
traindata_s3_prefix = "{}/data/train".format(s3_prefix)
testdata_s3_prefix = "{}/data/test".format(s3_prefix)
sagemaker_session = sagemaker.Session()
###Output
_____no_output_____
###Markdown
1.4 Process training data We'll begin by loading the reviews dataset, and padding the reviews, so all reviews have the same length. Each review is represented as an array of numbers, where each number represents an indexed word. Training data for both Local Mode and Hosted Training must be saved as files, so we'll also save the transformed data to files.
###Code
max_features = 20000
maxlen = 400
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), "train sequences")
print(len(x_test), "test sequences")
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print("x_train shape:", x_train.shape)
print("x_test shape:", x_test.shape)
# Each review is an array of numbers where each number is an indexed word
print(x_train[:10])
data_dir = os.path.join(os.getcwd(), "data")
os.makedirs(data_dir, exist_ok=True)
train_dir = os.path.join(os.getcwd(), "data/train")
os.makedirs(train_dir, exist_ok=True)
test_dir = os.path.join(os.getcwd(), "data/test")
os.makedirs(test_dir, exist_ok=True)
csv_test_dir = os.path.join(os.getcwd(), "data/csv-test")
os.makedirs(csv_test_dir, exist_ok=True)
np.save(os.path.join(train_dir, "x_train.npy"), x_train)
np.save(os.path.join(train_dir, "y_train.npy"), y_train)
np.save(os.path.join(test_dir, "x_test.npy"), x_test)
np.save(os.path.join(test_dir, "y_test.npy"), y_test)
np.savetxt(
os.path.join(csv_test_dir, "csv-test.csv"),
np.array(x_test[:100], dtype=np.int32),
fmt="%d",
delimiter=",",
)
train_s3 = sagemaker_session.upload_data(path="./data/train/", key_prefix=traindata_s3_prefix)
test_s3 = sagemaker_session.upload_data(path="./data/test/", key_prefix=testdata_s3_prefix)
inputs = {"train": train_s3, "test": test_s3}
print(inputs)
###Output
_____no_output_____
###Markdown
Section 2 - Train sentiment analysis CNN model with custom profiler configuration In this section we use SageMaker's hosted training using Uber's Horovod framework, which uses compute resources separate from this notebook instance. Hosted training spins up one or more instances (cluster) for training, and then tears the cluster down when training is complete. Horovod is a distributed deep learning training framework for TensorFlow, Keras, PyTorch, and Apache MXNet. The objective is to take a single-GPU training script and successfully scale it to train across many GPUs in parallel. Once a training script has been written for scale with Horovod, it can run on a single-GPU, multiple-GPUs, or even multiple hosts without any further code changes.With the SageMaker Python SDK, you can train and host TensorFlow models on Amazon SageMaker. For more information, see [Use TensorFlow with the SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/using_tf.html) in the SageMaker Python SDK documentation.For our training, we will use three p3.8xlarge instances to begin with and change our training configuration based on profiling recommendations from Amazon SageMaker Debugger. Amazon EC2 P3 instances deliver high performance compute in the cloud with up to 8 NVIDIAยฎ V100 Tensor Core GPUs and up to 100 Gbps of networking throughput for machine learning and HPC applications. The p3.8xlarge instance comes with 4 GPUs and 32 vCPU cores with 10 Gbps networking performance. Please refer to the EC2 Instance Types page for more details. 2.1 Setup training job We will use the standard SageMaker Estimator API for TensorFlow to create training jobs. Profiling configuration will be enabled by default to emit framework and system metrics for our analysis. Define hyperparameters such as number of epochs, batch size, and data augmentation. * You can increase batch size to increase system utilization, but it may result in CPU bottleneck problems. Data preprocessing of a large batch size with augmentation requires a heavy computation. * You can disable `data_augmentation` to see the impact on the system utilization.* We've set the number of epochs to enable training to run quicker, please adjust this accordingly for your use case.
###Code
hyperparameters = {"epoch": 1, "batch_size": 256, "data_augmentation": True}
###Output
_____no_output_____
###Markdown
Take your AWS account limits into consideration while setting up the `instance_type` and `instance_count` of the cluster.
###Code
distributions = {
"mpi": {
"enabled": True,
"processes_per_host": 2,
"custom_mpi_options": "-verbose -x HOROVOD_TIMELINE=./hvd_timeline.json -x NCCL_DEBUG=INFO -x OMPI_MCA_btl_vader_single_copy_mechanism=none",
}
}
model_dir = "/opt/ml/model"
train_instance_type = "ml.p3.8xlarge"
instance_count = 2
###Output
_____no_output_____
###Markdown
2.2 Define profiler configuration With the following **`profiler_config`** parameter configuration, Debugger calls the default settings of monitoring, collecting system metrics every 500 milliseconds. For collecting framework metrics, you can set target steps and target time intervals in detail.
###Code
profiler_config = ProfilerConfig(
framework_profile_params=FrameworkProfile(start_step=2, num_steps=7)
)
###Output
_____no_output_____
###Markdown
With this `profiler_config` settings, Debugger will collect system metrics every 500 milliseconds and framework metrics on the specified steps (from step 2 to 9). For a complete list of parameters and profiling configurations, see [Configure Debugger Using Amazon SageMaker Python SDK](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-configuration.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). 2.3 Configure training job using TensorFlow estimator and pass in the profiler configuration.While constructing a SageMaker estimator, specify the TensorFlow framework version and supported python version. For a complete list of the supported framework versions and the corresponding python version to use, see [Supported Frameworks and Algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.htmldebugger-supported-frameworks) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html).**Note**: In the following estimator, the exact `image_uri` was pointed to use the latest AWS TensorFlow deep learning container image. For a complete list of AWS deep learning containers, see [General Framework Containers](https://github.com/aws/deep-learning-containers/blob/master/available_images.mdgeneral-framework-containers) in the [AWS Deep Learning Containers](https://github.com/aws/deep-learning-containers/) repository. The Debugger's new profiling features are available for TensorFlow 2.3.1 and PyTorch 1.6.0.
###Code
estimator = TensorFlow(
role=sagemaker.get_execution_role(),
base_job_name="tf-keras-silent",
model_dir=model_dir,
instance_count=instance_count,
instance_type=train_instance_type,
entry_point="sentiment-distributed.py",
source_dir="./tf-sentiment-script-mode",
framework_version="2.3.1",
py_version="py37",
profiler_config=profiler_config,
script_mode=True,
hyperparameters=hyperparameters,
distribution=distributions,
)
###Output
_____no_output_____
###Markdown
We then simply call `fit` to start the actual hosted training
###Code
estimator.fit(inputs, wait=False)
###Output
_____no_output_____
###Markdown
Section 3 - Interactive analysis using the SMDebug visualization tools In this section, we introduce interactive analysis of the data captured by SageMaker Debugger. It is organized in order of training phases: initialization, training, and finalization. The profiling data results are categorized as System Metrics and Algorithm (Framework) Metrics.Once the training job initiates, SageMaker Debugger starts collecting system and framework metrics. The smdebug library provides profiler analysis tools that enable you to access and analyze the profiling data. The following code cells are to set up a TrainingJob object to retrieve the system and framework metrics when they become available in the default S3 bucket. Once the metrics are available, you can query, plot, and analyze the profiling metrics data throughout this notebook. Let's check the profiler artifact path where the system metrics and framework metrics are stored.
###Code
estimator.latest_job_profiler_artifacts_path()
###Output
_____no_output_____
###Markdown
3.1 Read profiling data: system metricsOnce the training job is running, SageMaker collects system and framework metrics. The following code cell is waiting for the system metrics to become available in S3. Once they are available you will be able to query and plot those metrics.
###Code
from smdebug.profiler.system_metrics_reader import S3SystemMetricsReader
path = estimator.latest_job_profiler_artifacts_path()
system_metrics_reader = S3SystemMetricsReader(path)
sagemaker_client = boto3.client("sagemaker")
training_job_name = estimator.latest_training_job.name
print(f"Training job name: {training_job_name}")
training_job_status = ""
training_job_secondary_status = ""
while system_metrics_reader.get_timestamp_of_latest_available_file() == 0:
system_metrics_reader.refresh_event_file_list()
client = sagemaker_client.describe_training_job(TrainingJobName=training_job_name)
if "TrainingJobStatus" in client:
training_job_status = f"TrainingJobStatus: {client['TrainingJobStatus']}"
if "SecondaryStatus" in client:
training_job_secondary_status = f"TrainingJobSecondaryStatus: {client['SecondaryStatus']}"
print(
f"Profiler data from system not available yet. {training_job_status}. {training_job_secondary_status}."
)
time.sleep(20)
print("\n\nProfiler data from system is available")
###Output
_____no_output_____
###Markdown
Helper function to convert timestamps into UTC:
###Code
from datetime import datetime
def timestamp_to_utc(timestamp):
utc_dt = datetime.utcfromtimestamp(timestamp)
return utc_dt.strftime("%Y-%m-%d %H:%M:%S")
###Output
_____no_output_____
###Markdown
Now that the data is available we can query and inspect it. We get the latest available timestamp and query all the events within the given time range:
###Code
system_metrics_reader.refresh_event_file_list()
last_timestamp = system_metrics_reader.get_timestamp_of_latest_available_file()
events = system_metrics_reader.get_events(0, last_timestamp * 1000000) # UTC time in micro seconds
print(
"Found",
len(events),
"recorded system metric events. Latest recorded event:",
timestamp_to_utc(last_timestamp / 1000000),
) # UTC time in seconds to datetime
###Output
_____no_output_____
###Markdown
We can iterate over the list of recorded events. Let's have a look on the first event.
###Code
print(
"Event name:",
events[0].name,
"\nTimestamp:",
timestamp_to_utc(events[0].timestamp),
"\nValue:",
events[0].value,
)
###Output
_____no_output_____
###Markdown
3.2 GPU and CPU usage MetricHistogram computes a histogram on GPU and CPU utilization values. Bins are between 0 and 100. Good system utilization means that the center of the distribution should be between 80 to 90. In case of multi-GPU training: if distributions of GPU utilization values are not similar it indicates an issue with workload distribution.The following cell will plot the histograms per metric. In order to only plot specific metrics, define the list `select_dimensions` and `select_events`. A dimension can be CPUUtilization, GPUUtilization, GPUMemoryUtilization IOPS. With CPUUtilization dimension, CPU uiltization histogram for each single core and total CPU usage will be plotted. In case of GPU, it will visualize utilization and memory for each GPU. In case of IOPS, it will plot IO wait time per CPU. If `select_events` is specified then only metrics that match the name in `select_metrics` will be shown. If neither `select_dimensions` nor `select_events` are specified, all available metrics will be visualized. One can also specify a start and endtime.
###Code
from smdebug.profiler.analysis.notebook_utils.metrics_histogram import MetricsHistogram
system_metrics_reader.refresh_event_file_list()
metrics_histogram = MetricsHistogram(system_metrics_reader)
metrics_histogram.plot()
###Output
_____no_output_____
###Markdown
3.3 Read profiling data: framework annotations
###Code
from smdebug.profiler.algorithm_metrics_reader import S3AlgorithmMetricsReader
framework_metrics_reader = S3AlgorithmMetricsReader(path)
events = []
while framework_metrics_reader.get_timestamp_of_latest_available_file() == 0 or len(events) == 0:
framework_metrics_reader.refresh_event_file_list()
last_timestamp = framework_metrics_reader.get_timestamp_of_latest_available_file()
events = framework_metrics_reader.get_events(0, last_timestamp)
print("Profiler data from framework not available yet")
time.sleep(20)
print("\n\n Profiler data from framework is available")
###Output
_____no_output_____
###Markdown
The following code cell retrieves all recorded events from Amazon S3.
###Code
framework_metrics_reader.refresh_event_file_list()
last_timestamp = framework_metrics_reader.get_timestamp_of_latest_available_file()
events = framework_metrics_reader.get_events(0, last_timestamp)
print(
"Found",
len(events),
"recorded framework annotations. Latest event recorded ",
timestamp_to_utc(last_timestamp / 1000000),
)
###Output
_____no_output_____
###Markdown
Like before we can inspect the recorded events. Since we are reading framework metrics there is now a start and end time for each event.
###Code
print(
"Event name:",
events[0].event_name,
"\nStart time:",
timestamp_to_utc(events[0].start_time / 1000000000),
"\nEnd time:",
timestamp_to_utc(events[0].end_time / 1000000000),
"\nDuration:",
events[0].duration,
"nanosecond",
)
###Output
_____no_output_____
###Markdown
3.4 Outliers in step durationStepHistogram creates histograms of step duration values. Significant outliers are indication of system bottlenecks. In contrast to SetpTimelineChart it helps identify clusters of step duration values. As a simple example: time spent during training phase (forward and backward pass) will likely be different to time spent during validation phase (forward pass), so we would expect at least two clusters.
###Code
from smdebug.profiler.analysis.notebook_utils.step_histogram import StepHistogram
framework_metrics_reader.refresh_event_file_list()
step_histogram = StepHistogram(framework_metrics_reader)
step_histogram.plot()
###Output
_____no_output_____
###Markdown
3.5 HeatmapThe following code cell creates a heatmap where each row corresponds to one metric (CPU core and GPU utilizations) and x-axis is the duration of the training job. It allows you to more easily spot CPU bottlenecks (utilization on GPU is low but a utilization of one or more cores is high).
###Code
from smdebug.profiler.analysis.notebook_utils.heatmap import Heatmap
view_heatmap = Heatmap(
system_metrics_reader,
framework_metrics_reader,
select_dimensions=["CPU", "GPU"], # optional - comment this line out to see all dimensions.
# select_events=["total"], # optional - comment this line out to see all events.
plot_height=900,
)
###Output
_____no_output_____
###Markdown
3.6 Run loop to fetch latest profiler data and update chartsThe following code cell runs while your training job is in progress and refreshes the plots in the previous sections. Execution using papermill encountered an exception here and stopped:
###Code
from bokeh.io import push_notebook
import time
last_timestamp = system_metrics_reader.get_timestamp_of_latest_available_file()
description = sagemaker_client.describe_training_job(TrainingJobName=training_job_name)
while description["TrainingJobStatus"] == "InProgress":
system_metrics_reader.refresh_event_file_list()
framework_metrics_reader.refresh_event_file_list()
current_timestamp = system_metrics_reader.get_timestamp_of_latest_available_file()
description = sagemaker_client.describe_training_job(TrainingJobName=training_job_name)
if current_timestamp > last_timestamp:
print(
"New data available, updating dashboards. Current timestamp is",
timestamp_to_utc(current_timestamp / 1000000),
)
view_heatmap.update_data(current_timestamp)
push_notebook(handle=view_heatmap.target)
metrics_histogram.update_data(current_timestamp)
push_notebook(handle=metrics_histogram.target)
step_histogram.update_data(current_timestamp)
push_notebook(handle=step_histogram.target)
last_timestamp = current_timestamp
time.sleep(30)
###Output
_____no_output_____
###Markdown
Section 4 - Analyze report generated by Debugger In this section we will analyze the report generated by the profiler rule processing job. We will showcase a few sections of the report. For complete details, please download the report from the S3 bucket and review.Also note that the exact details in the report generated for your training job may be different from what you see in this section. 4.1 View the location of the report generated.
###Code
rule_output_path = estimator.output_path + estimator.latest_training_job.job_name + "/rule-output"
print(
f"You will find the profiler report under `{rule_output_path}/` after the training has finished"
)
###Output
_____no_output_____
###Markdown
To check if the report is generated, list directories and files recursively
###Code
! aws s3 ls {rule_output_path} --recursive
###Output
_____no_output_____
###Markdown
Download the report and rule output files recursively using `aws s3 cp` The following command saves all of the rule output files to the **ProfilerReport-1234567890** folder under your current working directory.
###Code
! aws s3 cp {rule_output_path} ./ --recursive
###Output
_____no_output_____
###Markdown
The following script automatically finds the **ProfilerReport** folder name and returns a link to the downloaded report.
###Code
from IPython.display import FileLink
profiler_report_name = [
rule["RuleConfigurationName"]
for rule in estimator.latest_training_job.rule_job_summary()
if "Profiler" in rule["RuleConfigurationName"]
][0]
profiler_report_name
display(
"Click link below to view the profiler report",
FileLink(profiler_report_name + "/profiler-output/profiler-report.html"),
)
###Output
_____no_output_____
###Markdown
For more information about how to find, download, and browse Debugger profiling reports, see [SageMaker Debugger Profiling Report](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-profiling-report.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). 4.2 Profile Report - Framework metrics summary In this section of the report, you will see a pie chart similar to the below which shows how much time the training job spent in "training", "validation" phase or "others". 4.3 Profile Report - Identify most expensive CPU operator Table in this section of the report shows a list of operators that your training job run on CPU. The most expensive operator on CPU was "ExecutorState::Process" with 16 % 4.4 Profile Report - Identify most expensive GPU operator The table shows the percentage of the time and the absolute cumulative time spent on the most frequently called GPU operators. 4.5 Access Debugger Insights in Amazon SageMaker StudioIn addition to interactive analysis of the Debugger output data and analyzing the autogenerated profiling report, you can also access Debugger insights dashboard from Amazon SageMaker Studio. To get started with Amazon SageMaker Studio using Debugger, see [Debugger on Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-on-studio.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). Section 5 - Analyze recommendations from the report The **Rules Summary** section of the report aggregates all of the rule evaluation results, analysis, rule descriptions, and suggestions. The following table shows a summary of the executed profiler rules. The table is sorted by the rules that triggered most frequently. In training job this was the case for rule LowGPUUtilization. It has processed 1001 datapoints and triggered 8 times.You may see a different rule summary based on the data and the training configuration you use. From the analysis so far and the top recommendations from the table above, there seems to be scope for improving resource utilization and make our training efficient. Based on this change the training configuration settings and re run the training. Section 6 - Implement recommendations from the reportIn the section, we will rerun the training job with the changed configuration. The training instances are changed from p3.8xlarge to p3.2xlarge instances, the number of instances is reduced to 2 and only one process per host for MPI is configured to increase the number of data loaders. The Batch Size is also changed to 512. We will use the same profiling configuration as the previous job.After second training job with the new settings is complete, there are new system metrics, framework metrics and a new report generated.
###Code
hyperparameters = {"epoch": 5, "batch_size": 512, "data_augmentation": True}
distributions = {
"mpi": {
"enabled": True,
"processes_per_host": 1,
"custom_mpi_options": "-verbose -x HOROVOD_TIMELINE=./hvd_timeline.json -x NCCL_DEBUG=INFO -x OMPI_MCA_btl_vader_single_copy_mechanism=none",
}
}
model_dir = "/opt/ml/model"
train_instance_type = "ml.p3.2xlarge"
instance_count = 2
estimator_new = TensorFlow(
role=sagemaker.get_execution_role(),
base_job_name="tf-keras-silent",
model_dir=model_dir,
instance_count=instance_count,
instance_type=train_instance_type,
entry_point="sentiment-distributed.py",
source_dir="./tf-sentiment-script-mode",
framework_version="2.3.1",
py_version="py37",
profiler_config=profiler_config,
script_mode=True,
hyperparameters=hyperparameters,
distribution=distributions,
)
estimator_new.fit(inputs, wait=False)
###Output
_____no_output_____
###Markdown
Call to actionTo understand the impact of the training configuration changes, compare the report analysis from the two training jobs. Repeat the process of analyzing the profiler report, implementing the recommendations and comparing with the previous run, till you are satisfied.
###Code
rule_output_path = (
estimator_new.output_path + estimator_new.latest_training_job.job_name + "/rule-output"
)
print(
f"You will find the profiler report under {rule_output_path}/ after the training has finished"
)
###Output
_____no_output_____
###Markdown
Download the new report and files recursively using `aws s3 cp`
###Code
! aws s3 cp {rule_output_path} ./ --recursive
###Output
_____no_output_____
###Markdown
Retrieve a file link to the new profiling report.
###Code
from IPython.display import FileLink
profiler_report_name = [
rule["RuleConfigurationName"]
for rule in estimator_new.latest_training_job.rule_job_summary()
if "Profiler" in rule["RuleConfigurationName"]
][0]
profiler_report_name
display(
"Click link below to view the profiler report",
FileLink(profiler_report_name + "/profiler-output/profiler-report.html"),
)
###Output
_____no_output_____
###Markdown
Profile machine learning training with Amazon SageMaker Debugger Gain high precision insights of horovod based distributed machine learning training jobsThis notebook demonstrates how to: * Execute distributed training on Amazon SageMaker using Horovod framework. * Execute distributed training using script mode which allows you to use a training script similar to one you would use outside SageMaker.* Execute SageMaker Debugger profiling rules against training jobs in process.* Visualize the system and framework metrics using the SMDebug client library.* Analyze autogenerated profiling report and implement recommendations suggested by SageMaker Debugger. **Table of Contents** 1. [Introduction](intro)2. [Section 1 - Setup](setup)3. [Section 2 - Train sentiment analysis CNN model with custom Debugger profiling configuration](train)3. [Section 3 - Interactive analysis using the SMDebug visualization tools](analysis)5. [Section 4 - Analyze report generated by Debugger](profiler-report)6. [Section 5 - Analyze recommendations from the report](analyze-profiler-recommendations)7. [Section 6 - Implement recommendations from the report](implement-profiler-recommendations)8. [Conclusion](conclusion) Introduction Training machine learning models is a time and compute intensive process requiring multiple training runs with different hyperparameters before a model yields acceptable accuracy. CPU and GPU based distributed training withframeworks such as Horovord and Parameter Servers address this issue by allowing training to be easilyscalable to a cluster of resources. However, distributed training makes it harder to identify and debugresource bottleneck problems. Gaining insights into the training in progress, both at the machine learningframework level and the underlying compute resources level, is critical step towards understanding theresource usage patterns and reducing resource wastage. Analyzing bottleneck issues is necessary tomaximize the utilization of compute resources and optimize model training performance to deliver state-of-the-art machine learning models with target accuracy.Amazon SageMaker is a fully managed service that enables developers and data scientists to quickly and easily build, train, and deploy ML models at scale. Amazon SageMaker Debugger is a feature of SageMaker training that makes it easy to train machine learning (ML) models faster by capturing real-time metrics such as learning gradients and weights, providing transparency into the training process, so you can correct anomalies such as losses, overfitting, and overtraining. With the newly introduced profiling capability, SageMaker Debugger now automatically monitors system resources such as CPU, GPU, network, IO, and memory providing a complete resource utilization view of training jobs.In this notebook, we demonstrate the Amazon SageMaker Debugger profiling capabilities using the sentiment analysis use case. Use case - Sentiment Analysis with TensorFlow and KerasSentiment analysis is a very common text analytics task that involves determining whether a text sample is positive or negative about its subject. There are several different algorithms for performing this task, including statistical algorithms and deep learning algorithms. With respect to deep learning, a Convolutional Neural Net (CNN) is sometimes used for this purpose. In this notebook we'll use a CNN built with TensorFlow to perform sentiment analysis in Amazon SageMaker on the IMDB dataset, which consists of movie reviews labeled as having positive or negative sentiment. Step 0 - Install and check the SageMaker Python SDK versionTo use the new Debugger profiling features, ensure that you have the latest versions of SageMaker and SMDebug SDKs installed. The following cell updates the libraries and restarts the Jupyter kernel to apply the updates.
###Code
import sys
import IPython
install_needed = True # should only be True once
if install_needed:
print("installing deps and restarting kernel")
!{sys.executable} -m pip install -U sagemaker
!{sys.executable} -m pip install -U smdebug
IPython.Application.instance().kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Check the SageMaker version.
###Code
import sagemaker
sagemaker.__version__
###Output
_____no_output_____
###Markdown
**Important**: If the SageMaker version is less than 2.19.0 and if you are using an existing SageMaker Studio or Notebook instance, you must update the environment to use the latest SageMaker Python SDK. Follow instructions at [Update Amazon SageMaker Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-tasks-update.html) and [Notebook Instance Software Updates](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-software-updates.html) in the [Amazon SageMaker developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html). Section 1 - Setup In this section, you will import the necessary libraries, set up variables, and examine data to train the sentiment analysis model.Let's start by specifying:* The AWS region used to host your model.* The IAM role associated with this SageMaker notebook instance.* The S3 bucket used to store the data used to train your model, any additional model data, and the data captured from model invocations. 1.1 Import necessary libraries
###Code
import pandas as pd
import numpy as np
import os
import boto3
import time
# import debugger libraries
import sagemaker
from sagemaker.tensorflow import TensorFlow
from sagemaker.debugger import ProfilerConfig, FrameworkProfile
from tensorflow.keras.preprocessing import sequence
from tensorflow.python.keras.datasets import imdb
###Output
_____no_output_____
###Markdown
1.2 AWS region and IAM Role
###Code
#Get Execution role
role=sagemaker.get_execution_role()
print("RoleArn:", role)
session=boto3.session.Session()
region=session.region_name
print("Region:", region)
###Output
_____no_output_____
###Markdown
1.3 S3 bucket and prefixes
###Code
s3_prefix='tf-hvd-sentiment-silent'
traindata_s3_prefix='{}/data/train'.format(s3_prefix)
testdata_s3_prefix='{}/data/test'.format(s3_prefix)
sagemaker_session=sagemaker.Session()
###Output
_____no_output_____
###Markdown
1.4 Process training data We'll begin by loading the reviews dataset, and padding the reviews, so all reviews have the same length. Each review is represented as an array of numbers, where each number represents an indexed word. Training data for both Local Mode and Hosted Training must be saved as files, so we'll also save the transformed data to files.
###Code
max_features=20000
maxlen=400
(x_train, y_train), (x_test, y_test)=imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
x_train=sequence.pad_sequences(x_train, maxlen=maxlen)
x_test=sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# Each review is an array of numbers where each number is an indexed word
print(x_train[:10])
data_dir=os.path.join(os.getcwd(), 'data')
os.makedirs(data_dir, exist_ok=True)
train_dir=os.path.join(os.getcwd(), 'data/train')
os.makedirs(train_dir, exist_ok=True)
test_dir=os.path.join(os.getcwd(), 'data/test')
os.makedirs(test_dir, exist_ok=True)
csv_test_dir=os.path.join(os.getcwd(), 'data/csv-test')
os.makedirs(csv_test_dir, exist_ok=True)
np.save(os.path.join(train_dir, 'x_train.npy'), x_train)
np.save(os.path.join(train_dir, 'y_train.npy'), y_train)
np.save(os.path.join(test_dir, 'x_test.npy'), x_test)
np.save(os.path.join(test_dir, 'y_test.npy'), y_test)
np.savetxt(os.path.join(csv_test_dir, 'csv-test.csv'), np.array(x_test[:100], dtype=np.int32), fmt='%d', delimiter=",")
train_s3=sagemaker_session.upload_data(path='./data/train/', key_prefix=traindata_s3_prefix)
test_s3=sagemaker_session.upload_data(path='./data/test/', key_prefix=testdata_s3_prefix)
inputs={'train':train_s3, 'test': test_s3}
print(inputs)
###Output
_____no_output_____
###Markdown
Section 2 - Train sentiment analysis CNN model with custom profiler configuration In this section we use SageMaker's hosted training using Uber's Horovod framework, which uses compute resources separate from this notebook instance. Hosted training spins up one or more instances (cluster) for training, and then tears the cluster down when training is complete. Horovod is a distributed deep learning training framework for TensorFlow, Keras, PyTorch, and Apache MXNet. The objective is to take a single-GPU training script and successfully scale it to train across many GPUs in parallel. Once a training script has been written for scale with Horovod, it can run on a single-GPU, multiple-GPUs, or even multiple hosts without any further code changes.With the SageMaker Python SDK, you can train and host TensorFlow models on Amazon SageMaker. For more information, see [Use TensorFlow with the SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/using_tf.html) in the SageMaker Python SDK documentation.For our training, we will use three p3.8xlarge instances to begin with and change our training configuration based on profiling recommendations from Amazon SageMaker Debugger. Amazon EC2 P3 instances deliver high performance compute in the cloud with up to 8 NVIDIAยฎ V100 Tensor Core GPUs and up to 100 Gbps of networking throughput for machine learning and HPC applications. The p3.8xlarge instance comes with 4 GPUs and 32 vCPU cores with 10 Gbps networking performance. Please refer to the EC2 Instance Types page for more details. 2.1 Setup training job We will use the standard SageMaker Estimator API for TensorFlow to create training jobs. Profiling configuration will be enabled by default to emit framework and system metrics for our analysis. Define hyperparameters such as number of epochs, batch size, and data augmentation. * You can increase batch size to increase system utilization, but it may result in CPU bottleneck problems. Data preprocessing of a large batch size with augmentation requires a heavy computation. * You can disable `data_augmentation` to see the impact on the system utilization.* We've set the number of epochs to enable training to run quicker, please adjust this accordingly for your use case.
###Code
hyperparameters={
'epoch': 25,
'batch_size': 256,
'data_augmentation': True
}
###Output
_____no_output_____
###Markdown
Take your AWS account limits into consideration while setting up the `instance_type` and `instance_count` of the cluster.
###Code
distributions={
"mpi": {
"enabled": True,
"processes_per_host": 3,
"custom_mpi_options": "-verbose -x HOROVOD_TIMELINE=./hvd_timeline.json -x NCCL_DEBUG=INFO -x OMPI_MCA_btl_vader_single_copy_mechanism=none",
}
}
model_dir='/opt/ml/model'
train_instance_type='ml.p3.8xlarge'
instance_count=3
###Output
_____no_output_____
###Markdown
2.2 Define profiler configuration With the following **`profiler_config`** parameter configuration, Debugger calls the default settings of monitoring, collecting system metrics every 500 milliseconds. For collecting framework metrics, you can set target steps and target time intervals in detail.
###Code
profiler_config=ProfilerConfig(
framework_profile_params=FrameworkProfile(start_step=2, num_steps=7)
)
###Output
_____no_output_____
###Markdown
With this `profiler_config` settings, Debugger will collect system metrics every 500 milliseconds and framework metrics on the specified steps (from step 2 to 9). For a complete list of parameters and profiling configurations, see [Configure Debugger Using Amazon SageMaker Python SDK](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-configuration.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). 2.3 Configure training job using TensorFlow estimator and pass in the profiler configuration.While constructing a SageMaker estimator, specify the TensorFlow framework version and supported python version. For a complete list of the supported framework versions and the corresponding python version to use, see [Supported Frameworks and Algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.htmldebugger-supported-frameworks) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html).**Note**: In the following estimator, the exact `image_uri` was pointed to use the latest AWS TensorFlow deep learning container image. For a complete list of AWS deep learning containers, see [General Framework Containers](https://github.com/aws/deep-learning-containers/blob/master/available_images.mdgeneral-framework-containers) in the [AWS Deep Learning Containers](https://github.com/aws/deep-learning-containers/) repository. The Debugger's new profiling features are available for TensorFlow 2.3.1 and PyTorch 1.6.0.
###Code
estimator=TensorFlow(
role=sagemaker.get_execution_role(),
base_job_name= 'tf-keras-silent',
model_dir=model_dir,
instance_count=instance_count,
instance_type=train_instance_type,
entry_point= 'sentiment-distributed.py',
source_dir='./tf-sentiment-script-mode',
image_uri=f"763104351884.dkr.ecr.{region}.amazonaws.com/tensorflow-training:2.3.1-gpu-py37-cu110-ubuntu18.04",
#framework_version="2.3.1",
#py_version='py37',
profiler_config=profiler_config,
script_mode=True,
hyperparameters=hyperparameters,
distribution=distributions,
)
###Output
_____no_output_____
###Markdown
We then simply call `fit` to start the actual hosted training
###Code
estimator.fit(inputs, wait=False)
###Output
_____no_output_____
###Markdown
Section 3 - Interactive analysis using the SMDebug visualization tools In this section, we introduce interactive analysis of the data captured by SageMaker Debugger. It is organized in order of training phases: initialization, training, and finalization. The profiling data results are categorized as System Metrics and Algorithm (Framework) Metrics.Once the training job initiates, SageMaker Debugger starts collecting system and framework metrics. The smdebug library provides profiler analysis tools that enable you to access and analyze the profiling data. The following code cells are to set up a TrainingJob object to retrieve the system and framework metrics when they become available in the default S3 bucket. Once the metrics are available, you can query, plot, and analyze the profiling metrics data throughout this notebook. Let's import and check the SMDebug version.
###Code
import smdebug
smdebug.__version__
###Output
_____no_output_____
###Markdown
3.1 Read profiling data: system metricsOnce the training job is running, SageMaker collects system and framework metrics. The following code cell is waiting for the system metrics to become available in S3. Once they are available you will be able to query and plot those metrics.
###Code
from smdebug.profiler.system_metrics_reader import S3SystemMetricsReader
path=estimator.latest_job_profiler_artifacts_path()
system_metrics_reader=S3SystemMetricsReader(path)
sagemaker_client=boto3.client('sagemaker')
training_job_name=estimator.latest_training_job.name
print(f"Training job name: {training_job_name}")
training_job_status=''
training_job_secondary_status=''
while system_metrics_reader.get_timestamp_of_latest_available_file() == 0:
system_metrics_reader.refresh_event_file_list()
client=sagemaker_client.describe_training_job(
TrainingJobName=training_job_name
)
if 'TrainingJobStatus' in client:
training_job_status=f"TrainingJobStatus: {client['TrainingJobStatus']}"
if 'SecondaryStatus' in client:
training_job_secondary_status=f"TrainingJobSecondaryStatus: {client['SecondaryStatus']}"
print(f"Profiler data from system not available yet. {training_job_status}. {training_job_secondary_status}.")
time.sleep(20)
print("\n\nProfiler data from system is available")
###Output
_____no_output_____
###Markdown
Helper function to convert timestamps into UTC:
###Code
from datetime import datetime
def timestamp_to_utc(timestamp):
utc_dt=datetime.utcfromtimestamp(timestamp)
return utc_dt.strftime('%Y-%m-%d %H:%M:%S')
###Output
_____no_output_____
###Markdown
Now that the data is available we can query and inspect it. We get the latest available timestamp and query all the events within the given time range:
###Code
system_metrics_reader.refresh_event_file_list()
last_timestamp=system_metrics_reader.get_timestamp_of_latest_available_file()
events=system_metrics_reader.get_events(0, last_timestamp)
print("Found", len(events), "recorded system metric events. Latest recorded event:",
timestamp_to_utc(last_timestamp/1000000))
###Output
_____no_output_____
###Markdown
We can iterate over the list of recorded events. Let's have a look on the first event.
###Code
print("Event name:", events[0].name,
"\nTimestamp:", timestamp_to_utc(events[0].timestamp),
"\nValue:", events[0].value)
###Output
_____no_output_____
###Markdown
3.2 GPU and CPU usage MetricHistogram computes a histogram on GPU and CPU utilization values. Bins are between 0 and 100. Good system utilization means that the center of the distribution should be between 80 to 90. In case of multi-GPU training: if distributions of GPU utilization values are not similar it indicates an issue with workload distribution.The following cell will plot the histograms per metric. In order to only plot specific metrics, define the list `select_dimensions` and `select_events`. A dimension can be CPUUtilization, GPUUtilization, GPUMemoryUtilization IOPS. With CPUUtilization dimension, CPU uiltization histogram for each single core and total CPU usage will be plotted. In case of GPU, it will visualize utilization and memory for each GPU. In case of IOPS, it will plot IO wait time per CPU. If `select_events` is specified then only metrics that match the name in `select_metrics` will be shown. If neither `select_dimensions` nor `select_events` are specified, all available metrics will be visualized. One can also specify a start and endtime.
###Code
from smdebug.profiler.analysis.notebook_utils.metrics_histogram import MetricsHistogram
system_metrics_reader.refresh_event_file_list()
metrics_histogram=MetricsHistogram(system_metrics_reader)
metrics_histogram.plot()
###Output
_____no_output_____
###Markdown
3.3 Read profiling data: framework annotations
###Code
from smdebug.profiler.algorithm_metrics_reader import S3AlgorithmMetricsReader
framework_metrics_reader=S3AlgorithmMetricsReader(path)
events=[]
while framework_metrics_reader.get_timestamp_of_latest_available_file() == 0 or len(events) == 0:
framework_metrics_reader.refresh_event_file_list()
last_timestamp=framework_metrics_reader.get_timestamp_of_latest_available_file()
events=framework_metrics_reader.get_events(0, last_timestamp)
print("Profiler data from framework not available yet")
time.sleep(20)
print("\n\n Profiler data from framework is available")
###Output
_____no_output_____
###Markdown
The following code cell retrieves all recorded events from Amazon S3.
###Code
framework_metrics_reader.refresh_event_file_list()
last_timestamp=framework_metrics_reader.get_timestamp_of_latest_available_file()
events=framework_metrics_reader.get_events(0, last_timestamp)
print("Found", len(events), "recorded framework annotations. Latest event recorded ",
timestamp_to_utc(last_timestamp/1000000))
###Output
_____no_output_____
###Markdown
Like before we can inspect the recorded events. Since we are reading framework metrics there is now a start and end time for each event.
###Code
print("Event name:", events[0].event_name,
"\nStart time:", timestamp_to_utc(events[0].start_time/1000000000),
"\nEnd time:", timestamp_to_utc(events[0].end_time/1000000000),
"\nDuration:", events[0].duration, "nanosecond")
###Output
_____no_output_____
###Markdown
3.4 Outliers in step durationStepHistogram creates a histogram of step duration values. Significant outliers are an indication of a bottleneck. In contrast to SetpTimelineChart it allows to more easily identify clusters of step duration values. As a simple example: time spent during training phase (forward and backward pass) will likely be different to time spent during validation phase (forward pass), so we would expect at least two clusters.
###Code
from smdebug.profiler.analysis.notebook_utils.step_histogram import StepHistogram
framework_metrics_reader.refresh_event_file_list()
step_histogram=StepHistogram(framework_metrics_reader)
###Output
_____no_output_____
###Markdown
3.5 HeatmapThe following code cell creates a heatmap where each row corresponds to one metric (CPU core and GPU utilizations) and x-axis is the duration of the training job. It allows you to more easily spot CPU bottlenecks (utilization on GPU is low but a utilization of one or more cores is high).
###Code
from smdebug.profiler.analysis.notebook_utils.heatmap import Heatmap
view_heatmap=Heatmap(
system_metrics_reader,
framework_metrics_reader,
select_dimensions=["CPU", "GPU"], # optional - comment this line out to see all dimensions.
# select_events=["total"], # optional - comment this line out to see all events.
plot_height=900
)
###Output
_____no_output_____
###Markdown
3.6 Run loop to fetch latest profiler data and update chartsThe following code cell runs while your training job is in progress and refreshes the plots in the previous sections.
###Code
from bokeh.io import push_notebook
import time
last_timestamp=system_metrics_reader.get_timestamp_of_latest_available_file()
while description['TrainingJobStatus'] == "InProgress":
system_metrics_reader.refresh_event_file_list()
framework_metrics_reader.refresh_event_file_list()
current_timestamp=system_metrics_reader.get_timestamp_of_latest_available_file()
description=client.describe_training_job(TrainingJobName=job_name)
if current_timestamp > last_timestamp:
print("New data available, updating dashboards. Current timestamp is",
timestamp_to_utc(current_timestamp/1000000))
view_heatmap.update_data(current_timestamp)
push_notebook(handle=view_heatmap.target)
metrics_histogram.update_data(current_timestamp)
push_notebook(handle=metrics_histogram.target)
step_histogram.update_data(current_timestamp)
push_notebook(handle=step_histogram.target)
last_timestamp=current_timestamp
time.sleep(10)
###Output
_____no_output_____
###Markdown
Section 4 - Analyze report generated by Debugger In this section we will analyze the report generated by the profiler rule processing job. We will showcase a few sections of the report. For complete details, please download the report from the S3 bucket and review.Also note that the exact details in the report generated for your training job may be different from what you see in this section. 4.1 View the location of the report generated.
###Code
rule_output_path=estimator.output_path + estimator.latest_training_job.job_name + "/rule-output"
print(f"You will find the profiler report under `{rule_output_path}/` after the training has finished")
###Output
_____no_output_____
###Markdown
To check if the report is generated, list directories and files recursively
###Code
! aws s3 ls {rule_output_path} --recursive
###Output
_____no_output_____
###Markdown
Download the report and rule output files recursively using `aws s3 cp` The following command saves all of the rule output files to the ProfilerReport-1234567890 folder under your current working directory.
###Code
! aws s3 cp {rule_output_path} ./ --recursive
###Output
_____no_output_____
###Markdown
For more information about how to find, download, and browse Debugger profiling reports, see [SageMaker Debugger Profiling Report](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-profiling-report.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). 4.2 Profile Report - Framework metrics summary In this section of the report, you will see a pie chart similar to the below which shows how much time the training job spent in "training", "validation" phase or "others". 4.3 Profile Report - Identify most expensive CPU operator Table in this section of the report shows a list of operators that your training job run on CPU. The most expensive operator on CPU was "ExecutorState::Process" with 16 % 4.4 Profile Report - Identify most expensive GPU operator The table shows the percentage of the time and the absolute cumulative time spent on the most frequently called GPU operators. 4.5 Access Debugger Insights in Amazon SageMaker StudioIn addition to interactive analysis of the Debugger output data and analyzing the autogenerated profiling report, you can also access Debugger insights dashboard from Amazon SageMaker Studio. To get started with Amazon SageMaker Studio using Debugger, see [Debugger on Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-on-studio.html) in the [Amazon SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). Section 5 - Analyze recommendations from the report The **Rules Summary** section of the report aggregates all of the rule evaluation results, analysis, rule descriptions, and suggestions. The following table shows a summary of the executed profiler rules. The table is sorted by the rules that triggered most frequently. In training job this was the case for rule LowGPUUtilization. It has processed 1001 datapoints and triggered 8 times.You may see a different rule summary based on the data and the training configuration you use. From the analysis so far and the top recommendations from the table above, there seems to be scope for improving resource utilization and make our training efficient. Based on this change the training configuration settings and re run the training. Section 6 - Implement recommendations from the reportIn the section, we will rerun the training job with the changed configuration. The training instances are changed from p3.8xlarge to p3.2xlarge instances, the number of instances is reduced to 2 and only one process per host for MPI is configured to increase the number of data loaders. The Batch Size is also changed to 512. We will use the same profiling configuration as the previous job.After second training job with the new settings is complete, there are new system metrics, framework metrics and a new report generated.
###Code
hyperparameters={
'epoch': 25,
'batch_size': 512,
'data_augmentation': True}
distributions={
"mpi": {
"enabled": True,
"processes_per_host": 1,
"custom_mpi_options": "-verbose -x HOROVOD_TIMELINE=./hvd_timeline.json -x NCCL_DEBUG=INFO -x OMPI_MCA_btl_vader_single_copy_mechanism=none",
}
}
model_dir='/opt/ml/model'
train_instance_type='ml.p3.2xlarge'
instance_count=2
estimator_new=TensorFlow(
role=sagemaker.get_execution_role(),
base_job_name='tf-keras-silent',
model_dir=model_dir,
instance_count=instance_count,
instance_type=train_instance_type,
entry_point= 'sentiment-distributed.py',
source_dir='./tf-sentiment-script-mode',
image_uri=f"763104351884.dkr.ecr.{region}.amazonaws.com/tensorflow-training:2.3.1-gpu-py37-cu110-ubuntu18.04",
#framework_version="2.3.1",
#py_version='py37',
profiler_config=profiler_config,
script_mode=True,
hyperparameters=hyperparameters,
distribution=distributions,
)
estimator_new.fit()
###Output
_____no_output_____
###Markdown
Call to actionTo understand the impact of the training configuration changes, compare the report analysis from the two training jobs. Repeat the process of analyzing the profiler report, implementing the recommendations and comparing with the previous run, till you are satisfied.
###Code
rule_output_path=estimator_new.output_path + estimator_new.latest_training_job.job_name + "/rule-output"
print(f"You will find the profiler report under {rule_output_path}/ after the training has finished")
###Output
_____no_output_____
###Markdown
Download the new report and files recursively using `aws s3 cp`
###Code
! aws s3 cp {rule_output_path} ./ --recursive
###Output
_____no_output_____
|
01-Computer-Vision-Fundamentals/Quiz-3.1-Canny-Edges.ipynb
|
###Markdown
Quiz 3: Canny EdgesTry using Canny on your own and fiddle with the parameters for the Gaussian smoothing and Edge Detection to optimize for detecting the lane lines well without detecting a lot of other stuff. Your result should look like the example shown below.
###Code
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
image = mpimg.imread('../img/exit-ramp.jpg')
#plt.imshow(image)
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
#plt.imshow(gray, cmap= 'gray')
#Define Kernel Size
kernel_size = 3
blur_gray = cv2.GaussianBlur(gray,(kernel_size,kernel_size),0)
#Change below thresholds to get matching Image.
low_threshold = 1
high_threshold = 10
edges = cv2.Canny(blur_gray,low_threshold,high_threshold)
plt.imshow(edges, cmap='Greys_r')
###Output
_____no_output_____
|
exercises/Customization/Magics.ipynb
|
###Markdown
Customizing IPython - Magics IPython extends Python by adding shell-like commands called **magics**.
###Code
%lsmagic
import numpy
%timeit A=numpy.random.random((1000,1000))
%%timeit -n 1
A=numpy.random.random((1000,1000))
b = A.sum()
###Output
1 loops, best of 3: 22.2 ms per loop
###Markdown
Defining your own magic As we have seen already, IPython has cell and line magics. You can define your own magics using any Python function and the `register_magic_function` method:
###Code
ip = get_ipython()
import time
def sleep_magic(line):
"""A simple function for sleeping"""
t = float(line)
time.sleep(t)
ip.register_magic_function?
ip.register_magic_function(sleep_magic, "line", "sleep")
%sleep 2
%sleep?
###Output
_____no_output_____
###Markdown
Exercise Define `%tic` and `%toc` magics, which can be use for simple timings, e.g. where```pythonfor p in range(1,4): N = 10**p print "N=%i" % N %tic A = np.random.random((N,N)) np.linalg.eigvals(A) %toc```each `%toc` will print the time since the last `%tic`. Create separate `tic` and `toc` functions that read and writea global time variable.
###Code
%load soln/tictocf.py
import numpy as np
import sys
for p in range(1,4):
N = 10**p
print("N=%i" % N)
sys.stdout.flush()
%tic
A = np.random.random((N,N))
np.linalg.eigvals(A)
%toc
###Output
N=10
644 ยตs
N=100
15.8 ms
N=1000
9.05 s
###Markdown
Cell Magic **Cell magics** take two args:1. the **line** on the same line of the magic 2. the **cell** the multiline body of the cell after the first line
###Code
def dummy_cell_magic(line, cell):
"""dummy cell magic for displaying the line and cell it is passed"""
print("line: %r" % line)
print("cell: %r" % cell)
ip.register_magic_function(dummy_cell_magic, "cell", "dummy")
%%dummy this is the line
this
is the
cell
def parse_magic_line(line):
"""parse a magic line into a name and eval'd expression"""
name, values_s = line.split(None, 1)
values = eval(values_s, get_ipython().user_ns)
return name, values
parse_magic_line("x range(5)")
###Output
_____no_output_____
###Markdown
Excercise Can you write and register a **cell magic** that automates the outer iteration,timing a block for various values of a particular variable:
###Code
%load soln/scalemagic.py
%%scale N [ int(10**p) for p in range(1,4) ]
A = np.random.random((N,N))
np.linalg.eigvals(A)
%%scale N [ int(2**p) for p in np.linspace(6, 11, 11) ]
A = np.random.random((N,N))
np.linalg.eigvals(A)
###Output
N=64
7.77 ms
N=90
10.4 ms
N=128
23 ms
N=181
59.6 ms
N=256
170 ms
N=362
351 ms
N=512
1.46 s
N=724
2.98 s
N=1024
16.9 s
N=1448
37.2 s
N=2048
234 s
###Markdown
Executing Notebooks We can load a notebook into memory using `IPython.nbformat`.
###Code
import io
import os
import IPython.nbformat as nbf
def load_notebook(filename):
"""load a notebook object from a filename"""
if not os.path.exists(filename) and not filename.endswith(".ipynb"):
filename = filename + ".ipynb"
with io.open(filename) as f:
return nbf.read(f, as_version=4)
nb = load_notebook("_Sample")
###Output
_____no_output_____
###Markdown
**A notebook is just a dictionary** with attribute access for convenience.
###Code
nb.keys()
cells = nb.cells
cells
###Output
_____no_output_____
###Markdown
We can see all the cells and their type
###Code
for cell in cells:
print()
print('----- %s -----' % cell.cell_type)
print(cell.source)
###Output
----- markdown -----
# A sample notebook
----- code -----
print('hello')
----- code -----
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.plot(np.random.random(100))
----- markdown -----
A function for displaying the summary of a notebook object.
It prints a simple summary, such as:
```
1 markdown cells, total: 4 lines
5 code cells, total: 4 lines
1 heading cells, total: 1 lines
```
----- code -----
def nb_info(nb):
"""display a summary of the contents of a notebook"""
cell_counts = {}
cell_lines = {}
for cell in nb.cells:
cell_type = cell.cell_type
count = cell_counts.setdefault(cell_type, 0)
lines = cell_counts.setdefault(cell_type, 0)
cell_counts[cell_type] = count + 1
try:
content = cell.source
except AttributeError:
content = cell.input
cell_lines[cell_type] = lines + len(content.splitlines())
for cell_type in cell_counts:
print("%3i %10s cells, total: %3i lines" % (cell_counts[cell_type], cell_type, cell_lines[cell_type]))
###Markdown
Now I can run all of the **code cells** with `get_ipython().run_cell`
###Code
for cell in cells:
ip = get_ipython()
if cell.cell_type == 'code':
ip.run_cell(cell.source, silent=True)
###Output
hello
###Markdown
And we can now use the function that was defined in that notebook:
###Code
nb_info(nb)
###Output
2 markdown cells, total: 9 lines
3 code cells, total: 20 lines
###Markdown
Exercise Can you write and register an `%nbrun` line magic to run a notebook?```python%nbrun Sample```
###Code
%load soln/nbrun.py
%nbrun _Sample
###Output
hello
|
fdtd/fd_ac2d_heterogeneous_solution.ipynb
|
###Markdown
Computational Seismology Finite Differences Method - Acoustic Waves in 2D ---This notebook is part of the supplementary material to [Computational Seismology: A Practical Introduction](https://global.oup.com/academic/product/computational-seismology-9780198717416?cc=de&lang=en&), Oxford University Press, 2016. Authors:* Heiner Igel ([@heinerigel](https://github.com/heinerigel))* Florian Wรถlfl ([@flo-woelfl](https://github.com/flo-woelft))* Lion Krischer ([@krischer](https://github.com/krischer))--- This exercise covers the following aspects:* presenting you with an implementation of the 2D acoustic wave equation * allowing you to explore the benefits of using high-order finite-difference operators* understanding the concepts of stability (Courant criterion)* exploration of numerical dispersion and numerical grid anisotropy* changing the earth model and exploring some effects of structural heterogeneities (e.g., fault zones)--- Basic EquationsThe acoustic wave equation in 2D is $$\ddot{p}(x,z,t) \ = \ c(x,z)^2 (\partial_x^2 p(x,z,t) + \partial_z^2 p(x,z,t)) \ + s(x,z,t)$$and we replace the time-dependent (upper index time, lower indices space) part by$$ \frac{p_{j,k}^{n+1} - 2 p_{j,k}^n + p_{j,k}^{n-1}}{\mathrm{d}t^2} \ = \ c_j^2 ( \partial_x^2 p + \partial_z^2 p) \ + s_{j,k}^n$$solving for $p_{j,k}^{n+1}$. The extrapolation scheme is$$p_{j,k}^{n+1} \ = \ c_j^2 \mathrm{d}t^2 \left[ \partial_x^2 p + \partial_z^2 p \right]+ 2p_{j,k}^n - p_{j,k}^{n-1} + \mathrm{d}t^2 s_{j,k}^n$$The space derivatives are determined by $$\partial_x^2 p \ = \ \frac{p_{j+1,k}^{n} - 2 p_{j,k}^n + p_{j-1,k}^{n}}{\mathrm{d}x^2}$$$$\partial_z^2 p \ = \ \frac{p_{j,k+1}^{n} - 2 p_{j,k}^n + p_{j,k-1}^{n}}{\mathrm{d}z^2} $$--- Exercises 1. Getting startedBefore you start it is good practice to immediately make a copy of the original notebook (e.g., X_orig.ipynb). Run the simulation code. Relate the time extrapolation loop with the numerical algorithm we developed in the course. Understand the input parameters for the simulation and the plots that are generated. Modify source and receiver locations and observe the effects on the seismograms. 2. StabilityIntroduce a new parameter (e.g., eps) and calculate the Courant criterion. Determine numerically the stability limit of the code as accurately as possible by increasing the time step. Print the max value of the pressure field at each time step and observe the evolution of it in the case of stable and unstable simulations. (Hint: The Courant criterion is defined as $eps = (velocity * dt) / dx$ . With this information you can calculate the maximum possible, stable time step. ) 3. High-order operatorsExtend the code by adding the option to use a 5-point difference operator (see problem 1 of exercise sheet). Compare simulations with the 3-point and 5-point operator. Is the stability limit still the same? Make it an option to change between 3-pt and 5-pt operator. Estimate the number of points per wavelength and investigate the accuracy of the simulation by looking for signs of numerical dispersion in the resulting seismograms. The 5-pt weights are: $[-1/12, 4/3, -5/2, 4/3, -1/12]/dx^2$. 4. Numerical anisotropyIncrease the frequency of the wavefield by varying f0. Investigate the angular dependence of the wavefield. Why does the wavefield look anisotropic? Which direction is the most accurate and why? What happens if you set the source time function to a spike (zero everywhere except one element with value 1). 5. Heterogeneous modelsNow let us explore the power of the finite-difference method by varying the internal structure of the model. Here we can only modify the velocity c that can vary at each grid point (any restrictions?). Here are some suggestions. Investigate the influence of the structure by analysing the snapshots and the seismograms. * Add a low(high) velocity layer near the surface. Put the source at zs=2.* Add a vertical low velocity zone (fault zone) of a certain width (e.g. 10 grid points), and discuss the resulting wavefield* Simulate topography by setting the pressure to 0 above the surface. Use a Gaussian hill shape or a random topography.* etc. 6. Source-receiver reciprocity Initialize a strongly heterogeneous 2D velocity model of your choice and simulate waves propagating from an internal source point ($x_s, z_s$) to an internal receiver ($x_r, z_r$). Show that by reversing source and receiver you obtain the same seismogram. 7. Time reversalTime reversal. Define in an arbitrary 2D velocity model a source at the centre of the domain an a receiver circle at an appropriate distance around the source. Simulate a wavefield, record it at the receiver ring and store the results. Reverse the synthetic seismograms and inject the as sources at the receiver points. What happens? Do you know examples where this principle is used? ---
###Code
# This is a configuration step for the exercise. Please run it before the simulation code!
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Below is the 2D acoustic simulation code: Solutions:
###Code
# Simple finite difference solver
# Acoustic wave equation p_tt = c^2 p_xx + src
# 2-D regular grid
nx = 200 # grid points in x
nz = 200 # grid points in z
nt = 750 # number of time steps
dx = 10.0 # grid increment in x
dt = 0.001 # Time step
c0 = 3000.0 # velocity (can be an array)
isx = nx // 2 # source index x
isz = nz // 2 # source index z
ist = 100 # shifting of source time function
f0 = 100.0 # dominant frequency of source (Hz)
isnap = 10 # snapshot frequency
T = 1.0 / f0 # dominant period
nop = 3 # length of operator
# Model type, available are "homogeneous", "fault_zone",
# "surface_low_velocity_zone", "random", "topography",
# "slab"
model_type = "slab"
# Receiver locations
irx = np.array([60, 80, 100, 120, 140])
irz = np.array([5, 5, 5, 5, 5])
seis = np.zeros((len(irx), nt))
# Initialize pressure at different time steps and the second
# derivatives in each direction
p = np.zeros((nz, nx))
pold = np.zeros((nz, nx))
pnew = np.zeros((nz, nx))
pxx = np.zeros((nz, nx))
pzz = np.zeros((nz, nx))
# Initialize velocity model
c = np.zeros((nz, nx))
if model_type == "homogeneous":
c += c0
elif model_type == "fault_zone":
c += c0
c[:, nx // 2 - 5: nx // 2 + 5] *= 0.8
elif model_type == "surface_low_velocity_zone":
c += c0
c[1:10,:] *= 0.8
elif model_type == "random":
pert = 0.4
r = 2.0 * (np.random.rand(nz, nx) - 0.5) * pert
c += c0 * (1 + r)
elif model_type == "topography":
c += c0
c[0 : 10, 10 : 50] = 0
c[0 : 10, 105 : 115] = 0
c[0 : 30, 145 : 170] = 0
c[10 : 40, 20 : 40] = 0
c[0 : 15, 50 : 105] *= 0.8
elif model_type == "slab":
c += c0
c[110 : 125, 0 : 125] = 1.4 * c0
for i in range(110, 180):
c[i , i-5 : i + 15 ] = 1.4 * c0
else:
raise NotImplementedError
cmax = c.max()
# Source time function Gaussian, nt + 1 as we loose the last one by diff
src = np.empty(nt + 1)
for it in range(nt):
src[it] = np.exp(-1.0 / T ** 2 * ((it - ist) * dt) ** 2)
# Take the first derivative
src = np.diff(src) / dt
src[nt - 1] = 0
v = max([np.abs(src.min()), np.abs(src.max())])
# Initialize animated plot
image = plt.imshow(pnew, interpolation='nearest', animated=True,
vmin=-v, vmax=+v, cmap=plt.cm.RdBu)
# Plot the receivers
for x, z in zip(irx, irz):
plt.text(x, z, '+')
plt.text(isx, isz, 'o')
plt.colorbar()
plt.xlabel('ix')
plt.ylabel('iz')
plt.ion()
plt.show(block=False)
# required for seismograms
ir = np.arange(len(irx))
# Output Courant criterion
print("Courant Criterion eps :")
print(cmax*dt/dx)
# Time extrapolation
for it in range(nt):
if nop==3:
# calculate partial derivatives, be careful around the boundaries
for i in range(1, nx - 1):
pzz[:, i] = p[:, i + 1] - 2 * p[:, i] + p[:, i - 1]
for j in range(1, nz - 1):
pxx[j, :] = p[j - 1, :] - 2 * p[j, :] + p[j + 1, :]
if nop==5:
# calculate partial derivatives, be careful around the boundaries
for i in range(2, nx - 2):
pzz[:, i] = -1./12*p[:,i+2]+4./3*p[:,i+1]-5./2*p[:,i]+4./3*p[:,i-1]-1./12*p[:,i-2]
for j in range(2, nz - 2):
pxx[j, :] = -1./12*p[j+2,:]+4./3*p[j+1,:]-5./2*p[j,:]+4./3*p[j-1,:]-1./12*p[j-2,:]
pxx /= dx ** 2
pzz /= dx ** 2
# Time extrapolation
pnew = 2 * p - pold + dt ** 2 * c ** 2 * (pxx + pzz)
# Add source term at isx, isz
pnew[isz, isx] = pnew[isz, isx] + src[it]
# Plot every isnap-th iteration
if it % isnap == 0: # you can change the speed of the plot by increasing the plotting interval
plt.title("Max P: %.2f" % p.max())
image.set_data(pnew)
plt.gcf().canvas.draw()
pold, p = p, pnew
# Save seismograms
seis[ir, it] = p[irz[ir], irx[ir]]
###Output
_____no_output_____
###Markdown
The cell below allows you to plot source time function, seismic velocites, and the resulting seismograms in windows inside the notebook. Remember to rerun after you simulated again!
###Code
# Plot the source time function and the seismograms
plt.ioff()
plt.figure(figsize=(12, 12))
plt.subplot(221)
time = np.arange(nt) * dt
plt.plot(time, src)
plt.title('Source time function')
plt.xlabel('Time (s) ')
plt.ylabel('Source amplitude ')
plt.subplot(222)
ymax = seis.ravel().max()
for ir in range(len(seis)):
plt.plot(time, seis[ir, :] + ymax * ir)
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.subplot(223)
ymax = seis.ravel().max()
for ir in range(len(seis)):
plt.plot(time, seis[ir, :] + ymax * ir)
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.subplot(224)
# The velocity model is influenced by the Earth model above
plt.title('Velocity Model')
plt.imshow(c)
plt.xlabel('ix')
plt.ylabel('iz')
plt.colorbar()
plt.show()
###Output
_____no_output_____
|
2018/Mapping Examples/Ipyleaflet_test.ipynb
|
###Markdown
Testing of ipyleafletAlternative for maping is FoliumInstalled for:- python>=3.6- primarily relying on conda install- ipyleaflet was installed via pip as conda broke with numpy exceptions- install nodejs via conda- all other install steps were followed via the [install instructions](https://ipyleaflet.readthedocs.io/en/latest/installation.htmlusing-conda)- multiple conda kernels are on the development machine so ipykernels needs to be installed too
###Code
import warnings
warnings.filterwarnings(action='ignore', message="numpy.dtype size changed,")
from ipyleaflet import Map, basemaps, basemap_to_tiles, Circle
import pandas as pd
import cmocean
import numpy as np
import datetime
import time
###Output
_____no_output_____
###Markdown
Setup the viewThe user will need to set the following parameters`network = True` The base image is drawn from MODIS retrievals from the most recent day - internet must be available`center=(lat,lon)` - (latN,lonE)`zoom=4` level of zoomed in or out 1(far)-9(close)
###Code
#setup basemap and view that will be updated
def set_basemap():
network=True
center=(65, 200)
zoom=4
if network:
m = Map(
layers=(basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR,
(datetime.datetime.today()-datetime.timedelta(1)).strftime('%Y-%m-%d')), ),
center=center,
zoom=zoom
)
else:
m = Map(
center=center,
zoom=zoom
)
return m
image_date=(datetime.datetime.today()-datetime.timedelta(1))
print(f'The base image is from: {image_date:%B %d, %Y}')
###Output
The base image is from: August 21, 2018
###Markdown
Adding Data Locations Color-Coded by valueWe add a circle where the inner and outer ring are coded by color and location. Currently the size of the circle is fixed, but this could present another variable.
###Code
def add_circle(lat,lon,value,color):
circle.location = (lat, 360+lon)
circle.radius = 500
circle.value = value
circle.color = color
circle.fill_color = color
return circle
###Output
_____no_output_____
###Markdown
Since the leaflet routines take hex values, we have to convert the cmocean scale to hex (since i like this scale)
###Code
#use cmocean colormaps
def cmocean_to_hex(cmap, pl_entries):
h = 1.0/(pl_entries-1)
pl_colorscale = []
for k in range(pl_entries):
C = list(map(np.uint8, np.array(cmap(k*h)[:3])*255))
pl_colorscale.append([k*h, '#{:02x}{:02x}{:02x}'.format(C[0],C[1],C[2])],)
return pl_colorscale
cm_thermal = cmocean_to_hex(cmocean.cm.thermal,256)
cm_val = [a for a,b in iter(cm_thermal)]
cm_hex = [b for a,b in iter(cm_thermal)]
###Output
_____no_output_____
###Markdown
Read in IWG data file to be parsed/plotted
###Code
iwg_file="C:\\Users\\pmelctd\\Documents\\2018 Downloads\\20180525_221008_IWG.clean.csv"
###Output
_____no_output_____
###Markdown
Normalize SST to min and max values specified below Downsample the input data for speed and clarityresample values are strings eg:- '30s': 30 seconds- '1t': 1 minute- '90s': 90 seconds
###Code
def load_data():
df = pd.read_csv(iwg_file,
parse_dates=['TIME'],
index_col='TIME')
#set max and min ranges to scale to
maxval=10
minval=-2
df['norm'] = df['SST'].apply(lambda x: (x - (minval)) / (maxval - (-2)))
#set values outside of max/min to be the same as max/min
df['norm'][df['norm']>=1]=1
df['norm'][df['norm']<0]=0
df_downsample = df.resample('60s').median()
return df_downsample
m = set_basemap()
m
df_downsample=load_data()
for count in range(0,int(len(df_downsample)/1),1):
circle = Circle()
circle = add_circle(df_downsample['LAT'][count],
df_downsample['LON'][count],
df_downsample['norm'][count],
color=cm_hex[np.searchsorted(cm_val, df_downsample['norm'][count], side="left")])
m.add_layer(circle)
count+=1
###Output
C:\Users\pmelctd\Anaconda2\envs\py36\lib\site-packages\ipykernel_launcher.py:13: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
del sys.path[0]
C:\Users\pmelctd\Anaconda2\envs\py36\lib\site-packages\ipykernel_launcher.py:14: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.