code
stringlengths 2.5k
150k
| kind
stringclasses 1
value |
---|---|
```
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
import copy
def empty_mask(size):
return np.zeros((size,size))
def circular_mask(size):
y,x = np.mgrid[:size, :size]
M = np.zeros((size,size))
x0 = y0 = (size-1)/2
r = size/4
M[(x-x0)**2+(y-y0)**2<=r**2]=1
return M
def rectangle_mask(size):
y,x = np.mgrid[:size, :size]
M = np.zeros((size,size))
x0 = y0 = (size-1)/2
r = size/4
M[((x-x0)**2<=r**2)*((y-y0)**2<=r**2)]=1
return M
def get_plane_wave(E0,k,size):
y,x = np.mgrid[:size, :size]
a = np.pi*0/180
E = E0*np.exp(-1j*k*(x*np.cos(a)+y*np.sin(a)))
return(E)
def get_greenfun(r,k):
return (1j/4)*scipy.special.hankel1(0,k*r)
def get_green_matrix(k,size):
j,i = np.mgrid[:size, :size]
ij_block = np.sqrt((i-1/2)**2+j**2)
green_mat = get_greenfun(ij_block,k)
return green_mat
# def get_toeplitz_mat(ij_block):
# ij_block = copy.deepcopy(ij_block)
# T = np.block([[ij_block,ij_block[:,:0:-1]],
# [ij_block[:0:-1,:],ij_block[:0:-1,:0:-1]]])
# return T
def get_toeplitz_mat(ij_block):
ij_block = copy.deepcopy(ij_block)
T1 = np.hstack((ij_block,ij_block[:,:0:-1]))
T2 = np.hstack((ij_block[:0:-1,:],ij_block[:0:-1,:0:-1]))
T = np.vstack((T1,T2))
return T
def G_matvec(vec,k):
size = int(np.sqrt(vec.shape[0]))
G_block = get_green_matrix(k,size)
G = get_toeplitz_mat(G_block)
mat = np.zeros((2*size-1,2*size-1),dtype = np.complex64)
mat_block = vec.reshape((-1,size))
mat[:size,:size] = mat_block
out_mat = np.fft.ifft2(np.fft.fft2(G)*np.fft.fft2(mat))
out = out_mat[:size,:size].reshape((-1,1))
return out
def get_eps_from_mask(e,mask):
return (e-1)*mask.reshape((-1,1))+1
def matvec(x,eps,k):
x = x.reshape((-1,1))
#print(x)
size = x.shape[0]
chi = k**2*(eps - 1)
return x-G_matvec(x*chi,k)
def old_matvec(x,mask,k,e):
eps = get_eps_from_mask(e,mask)
return matvec(x,eps,k)
def visualize(data,title = "",cmap='jet',):
plt.title(title)
neg = plt.imshow(data, cmap=cmap, interpolation='none')
plt.colorbar(neg)
plt.show()
def solve(E,eps0,eps1):
return E
size = 16
e =1.5# 2.25
k = 2*np.pi/(size/1)
F = get_plane_wave(1,k,size)
#mask = empty_mask(size)
#mask = rectangle_mask(size)
mask = circular_mask(size)
eps = get_eps_from_mask(e,mask)
visualize(F.real,"Initial field (real part)")
visualize(mask,"Mask","gray")
import scipy.sparse.linalg as spla
import inspect
import time
x_last = get_plane_wave(1,k,size).reshape(-1,1)
def plot__solution_re_im_abs_mask(solution, size):
solution_re = solution.real.reshape(-1,size)
solution_im = solution.imag.reshape(-1,size)
solution_abs = np.abs(solution).reshape(-1,size)
solution_abs_mask = np.abs(solution).reshape(-1,size)*(1-mask)
visualize(solution_re,"Real")
visualize(solution_im,"Imag")
visualize(solution_abs,"Abs","gray")
visualize(solution_abs_mask,"Abs with mask")
return solution_re, solution_im, solution_abs, solution_abs_mask
def plot_relative_residuals_norms(t, residuals, relative_vector):
plt.semilogy(t, residuals/np.linalg.norm(relative_vector), 'x-', label="Generalized Minimal RESidual iterations")
plt.legend()
plt.title('Relative residual (depends on time), number of iterations = %i' % len(residuals))
plt.xlabel('Seconds')
plt.ylabel('Relative residual norm')
plt.show()
plt.semilogy(np.arange(len(residuals), 0, -1), residuals/np.linalg.norm(relative_vector), label="Generalized Minimal RESidual iterations")
plt.legend()
plt.title('Relative residual (depends on number of step), number of iterations = %i' % len(residuals))
plt.xlabel('Number of step')
plt.ylabel('Relative residual norm')
plt.show()
def gmres_solver(A, b, x0, maxiter, tol,
draw_graph_flag = False,
convergence_info = False,
display_convergence_info = False,
display_achieved_tolerance = False):
gmres_residuals_with_t = []
t0 = time.time()
solution, info = spla.gmres(A, b, x0=x0, maxiter = maxiter, tol = tol, restart = maxiter, callback = lambda x:
gmres_residuals_with_t.append([(inspect.currentframe().f_back).f_locals['resid'], time.time()])
)
if len(gmres_residuals_with_t)>1:
gmres_residuals_with_t = np.array(gmres_residuals_with_t).T
gmres_residuals_with_t[1] = gmres_residuals_with_t[1]-t0
gmres_t, gmres_residuals = gmres_residuals_with_t
else:
gmres_t, gmres_residuals = [],[]
if (display_convergence_info == True):
if (info == 0):
print("Status: Converged, successful exit")
else:
if (info > 0):
print("Status: Convergence to tolerance not achieved, number of iterations")
else:
print("Status: Illegal input or breakdown")
if ( draw_graph_flag == True ):
plot_relative_residuals_norms(gmres_t, gmres_residuals, b)
if ( display_achieved_tolerance == True):
print('Achieved tolerance = ', np.linalg.norm(A.dot(solution.reshape(-1,1))-b)/np.linalg.norm(b))
if (convergence_info == True):
return solution, info
return solution
def launch_solver(eps, k, x0 = None ,maxiter=300, tol = 1e-6):
global x_last
size = int(np.sqrt(eps.shape[0]))
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = get_plane_wave(1,k,size).reshape(-1,1)
if x0 is None:
x0 = x_last
solution, info = gmres_solver(A, b, x0,
maxiter=maxiter,
tol=tol,
convergence_info = True)
x_last = solution.reshape(-1,1)
return solution, info
def show_residuals(eps, k, maxiter=300, tol = 1e-6):
size = int(np.sqrt(eps.shape[0]))
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = get_plane_wave(1,k,size).reshape(-1,1)
x0 = np.ones(size**2).reshape(-1,1)
gmres_solver(A, b, x0,
maxiter=maxiter,
tol=tol,
draw_graph_flag = True)
t = time.time()
solution, info = launch_solver(eps=eps, k=k)
print(t-time.time())
show_residuals(eps=eps, k=k)
solution_re, solution_im, solution_abs, solution_abs_mask = plot__solution_re_im_abs_mask(solution, size)
def choose_direction(eps, k, maxiter=300, tol=1e-6, x=None):
if x is None:
x, info = launch_solver(eps=eps, k=k, maxiter=maxiter, tol=tol)
x_abs = np.abs(x)
x_max = np.max(x_abs)
indeces = np.argwhere( x_abs == x_max )
choose_direction = np.zeros(x.shape[0], dtype = np.complex64)
choose_direction[indeces] = (np.sign(x.real)/2+1j*np.sign(x.imag)/2)[indeces]/indeces.shape[0]
return choose_direction
def get_Jacobi_diagonal(mask, e, k, eps = None, x0 = None , maxiter=300, tol = 1e-6):
if eps is None:
eps = get_eps_from_mask(e,mask)
solution, info = launch_solver(eps=eps, x0=x0, k=k, maxiter=maxiter, tol = tol)
solution_with_coeff = k**2*(e-1)*solution
zero_vector = np.zeros(solution_with_coeff.shape[0], dtype = np.complex64)
Jacobi_diagonal = np.zeros(solution.shape[0], dtype = np.complex64 )
for i in range(solution.shape[0]):
solution_sparse_column = zero_vector.copy()
solution_sparse_column[i] = solution_with_coeff[i]
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = G_matvec(solution_sparse_column, k)
Jacobi_diagonal[i] = gmres_solver(A=A, b=b, x0=solution, maxiter=maxiter, tol=tol)[i]
return Jacobi_diagonal
def get_grad(mask, e=e, k=k, x = None, eps = None, x0 = None , maxiter=300, tol = 1e-6):
if eps is None:
eps = get_eps_from_mask(e,mask)
solution, info = launch_solver(eps=eps, x0=x0, k=k, maxiter=maxiter, tol = tol)
direction = choose_direction(eps=eps, k=k, maxiter=maxiter, tol=tol, x=solution)
solution_with_coeff = k**2*(e-1)*solution
zero_vector = np.zeros(solution_with_coeff.shape[0], dtype = np.complex64)
Jacobi_diagonal = np.zeros(solution.shape[0], dtype = np.complex64 )
for i in np.argwhere(direction!=0):
solution_sparse_column = zero_vector.copy()
solution_sparse_column[i] = solution_with_coeff[i]
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = G_matvec(solution_sparse_column, k)
Jacobi_diagonal[i] = gmres_solver(A=A, b=b, x0=solution, maxiter=maxiter, tol=tol)[i]
return np.abs(Jacobi_diagonal)
print(get_grad(mask, e, k, maxiter=300, tol = 1e-6))
from scipy.optimize import minimize
def plot_solution(y):
mask = get_fild_value(y,20)
print(np.min(mask))
print(np.max(mask))
eps = get_eps_from_mask(e,mask).reshape((-1,1))
print(np.min(eps))
print(np.max(eps))
field, info = launch_solver(eps=eps, k=k)
visualize(mask,"Mask","gray")
#visualize(field.real.reshape(-1,size),"Field (Real part)")
visualize(np.abs(field).reshape(-1,size),"Field (Abs)")
print(objective(y))
print(np.max(np.abs(field)))
i=0
def get_fild_value(y,p):
x = (np.tanh(p*y)+1)/2
return x
def callback(x):
global i
i+=1
print(i)
def penalty(x,p):
return np.sum(1-x**p-(1-x)**p)
#return np.sum(x*(1-x))
#obj = 0
def objective(y):
mask = get_fild_value(y,4)
eps = get_eps_from_mask(e,mask).reshape((-1,1))
field, info = launch_solver(eps=eps, k=k)
#global obj
mask = get_fild_value(y,20)
eps = get_eps_from_mask(e,mask).reshape((-1,1))
field, info = launch_solver(eps=eps, k=k)
if info !=0:
raise RuntimeError()
obj = -np.max(np.abs(field))#+penalty(mask,20)*1
#print(obj)
return obj
# x_empty_ind = np.argwhere((-0.1<mask)*(mask<0.1))
# x_empty = x[x_empty_ind]
# x_empty = x
# if info != 0:
# raise RuntimeError()
# if x_empty.shape[0]!=0:
# #print(np.max(x_empty.imag))
# obj = -np.max(np.abs(x_empty))+penalty(mask,20)*0.001
# else:
# obj = penalty(mask,20)*0.001
# #print(obj)
# return obj
def get_random_mask(size):
mask = np.random.rand(size,size)
return mask
# def search_with_restarts(num):
#y = np.random.random(size,size)
# mask =circular_mask(size)
noize = (get_random_mask(size)-0.5)*10
# mask = (mask + noize)/np.max(noize+0.001)
y = circular_mask(size)-0.5+noize
obj0 = objective(y)
mask = get_fild_value(y,20)
plot_solution(y)
#bns = tuple((0,1) for _ in range(size**2))
sol = minimize(objective,y,method = "BFGS",options={'maxiter': 10, 'gtol':1e-9}, callback = callback)
best_y = sol.x.reshape(-1,size)
plot_solution(best_y)
print(obj0)
# import cvxpy as cvx
# size = 2
# k = 2*np.pi/(size/7)
# F = get_plane_wave(1,k,size)
# x = cvx.Variable(size**2)
# eps = cvx.Variable(size**2)
# y = cvx.Variable(1)
# # lambda val: matvec2(val,eps,k,e
# obj = cvx.Maximize(y)
# #A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda val: val)
# #print(A.dot([1,1,0,0]))
# costrs = [x>F.reshape(-1,1),y>=x]
# prob = cvx.Problem(obj,costrs)
# prob.solve()
# print(prob.value)
```
| github_jupyter |
# DASHBOARD LINK
https://public.tableau.com/profile/altaf.lakhi2442#!/vizhome/UnbankedExploration/Dashboard1
```
import pandas as pd
import seaborn as sns
CPS_df = pd.read_csv("../data/processed/CPS_2009_2017_clean.csv")
ACS_df = pd.read_csv("../data/processed/ACS_2011_2017_clean.csv")
NFCS_df = pd.read_csv("../data/processed/NFCS_2009_2018_clean.csv")
frames = [CPS_df, ACS_df, NFCS_df]
#declaring STATE list
STATES = ["Alabama","Alaska","Arizona","Arkansas","California","Colorado",
"Connecticut","Delaware","District of Columbia", "Florida","Georgia","Hawaii",
"Idaho","Illinois", "Indiana","Iowa","Kansas","Kentucky","Louisiana","Maine",
"Maryland","Massachusetts","Michigan","Minnesota","Mississippi","Missouri","Montana",
"Nebraska","Nevada","New Hampshire","New Jersey","New Mexico","New York",
"North Carolina","North Dakota","Ohio","Oklahoma","Oregon","Pennsylvania",
"Rhode Island","South Carolina","South Dakota","Tennessee","Texas","Utah",
"Vermont","Virginia","Washington","West Virginia","Wisconsin","Wyoming"]
#generating state:state_number dictionary
STATE_FIPS = list(frames[0].STATEFIP.unique())
STATE = {}
for state, name in zip(STATE_FIPS, STATES):
STATE[state] = name
#generating STATE column for pertinent dfs
CPS_df["STATE"] = CPS_df.STATEFIP.map(STATE)
ACS_df["STATE"] = ACS_df.STATEFIP.map(STATE)
counties = pd.read_csv("../data/external/county_fips_master.csv", engine='python')
```
# Aggregatting CPS Data
```
pop_prop = pd.read_csv("../data/interim/population_proportions")
pop_prop.head()
pop_prop = pop_prop[["YEAR", "BUNBANKED", "STATEFIP"]]
pop_prop
state_year_agg = []
for year in pop_prop.YEAR.unique():
holder = pop_prop[pop_prop.YEAR == year]
state_year_agg.append(holder)
#national_agg_sums = [pop_prop[pop_prop.STATEFIP == state].BUNBANKED.sum() for state in pop_prop.STATEFIP.unique()]
#print(f"{year}")
#display(holder)
state_survey_pop_agg = pd.concat(state_year_agg)
state_survey_pop_agg["STATE"] = state_survey_pop_agg.STATEFIP.map(STATE)
state_survey_pop_agg
state_survey_pop_agg.rename(columns = {"BUNBANKED": "SURVEY_POP"}, inplace = True)
state_survey_pop_agg
CPS_agg = pd.DataFrame()
CPS_agg["STATE"] = CPS_df.STATE
CPS_agg["UNDERBANKED"] = CPS_df.BUNBANKED
CPS_agg["YEAR"] = CPS_df.YEAR
#copying aggregation before grouping for additional breakdowns
CPS_reason_agg = CPS_agg.copy(deep=True)
CPS_agg = CPS_agg.groupby(["YEAR", "STATE"]).count()
CPS_agg = CPS_agg.reset_index()
CPS_agg
state_survey_pop_agg = state_survey_pop_agg[state_survey_pop_agg.YEAR.isin(CPS_agg.YEAR.unique())].reset_index()
state_survey_pop_agg
CPS_agg["SURVEY_POP"] = state_survey_pop_agg.SURVEY_POP
CPS_agg
CPS_agg.to_csv("../data/processed/Dashboard_Data/CPS_STATE_Aggregate.csv")
#Isolating the specific northwest while
PNW = ["Washington", "Oregon", "Wyoming", "Montana", "Idaho"]
PNW_CPS_agg = CPS_agg[CPS_agg.STATE.isin(PNW)]
PNW_CPS_agg
PNW_CPS_agg.to_csv("../data/processed/Dashboard_Data/CPS_PNW_STATE_Aggregate.csv")
```
----------------------------------------------------------------------------------------------
# Aggregatting ACS Data
```
#ACS_df = pd.read_csv("../data/processed/ACS_2011_2017_clean")
#ACS_df["STATE"] = ACS_df.STATEFIP.map(STATE)
ACS_df.head()
ACS_df.HHWT
ACS_df = ACS_df.drop(columns = ['Unnamed: 0'])
filtering_columns = ACS_df.columns
filtering_columns = filtering_columns.drop(["STATE", "YEAR", "SAMPLE", "REGION", 'STATEFIP'])
filtering_columns
pivot_df = ACS_df.copy(deep=True)
#using filter to generate multiple pivot tables for data vizualization
for _filter in filtering_columns:
pivot_df[f"{_filter}_COUNTS"] = pivot_df[_filter]
pivot_df_final = pivot_df[["YEAR", "REGION", "STATE", _filter, f"{_filter}_COUNTS"]].groupby(["YEAR", "REGION", "STATE", _filter]).count()
#display(pivot_df[["YEAR", "REGION", "STATE", _filter, f"{_filter}_COUNTS"]].groupby(["YEAR", "REGION", "STATE", _filter]).count())
#display(pivot_df_final)
pivot_df_final.to_csv(f"../data/processed/Dashboard_Data/{_filter}_ACS_AGG.csv")
ACS_df.groupby(["YEAR", "REGION", "STATE", "CINETHH"]).count()#.value_counts()
ACS_df.columns
```
* HHINCOME = House Hold Income
* MARST = Marital Status
* OCC2010 = Occupation
* CINETHH = Access to Internet
* CILAPTOP = Laptop, desktop, or notebook computer
* CISMRTPHN = Smartphone
* CITABLET = Tablet or other portable wireless computer
* CIHAND = Handheld Computer
* CIHISPEED = Broadband (high speed) Internet service such as cable, fiber optic, or DSL service
* CISAT = Satellite internet service
* CIDIAL = Dial-up Service
* CIOTHSVC = Other Internet Service
```
ACS_agg = pd.DataFrame()
ACS_agg["STATE"] = ACS_df.STATE
ACS_agg["OCC2010"] = ACS_df.OCC2010
ACS_agg["CINETHH"] = ACS_df.CINETHH
ACS_agg["CILAPTOP"] = ACS_df.CILAPTOP
ACS_agg["CISMRTPHN"] = ACS_df.CISMRTPHN
ACS_agg["CITABLET"] = ACS_df.CITABLET
ACS_agg["CIHAND"] = ACS_df.CIHAND
ACS_agg["CIHISPEED"] = ACS_df.CIHISPEED
ACS_agg["CISAT"] = ACS_df.CISAT
ACS_agg["CIDIAL"] = ACS_df.CIDIAL
ACS_agg["CIOTHSVC"] = ACS_df.CIOTHSVC
ACS_agg["YEAR"] = ACS_df.YEAR
ACS_agg = ACS_agg.groupby(["STATE", "YEAR"]).count()
ACS_agg = ACS_agg.reset_index()
ACS_agg
ACS_agg.to_csv("../data/processed/Dashboard_Data/ACS_STATE_Aggregate.csv")
```
----------------------------------------------------------------------------------------------
# Aggregating NFCS
```
NFCS_df.head()
NFCS_df.drop("Unnamed: 0", axis=1,inplace=True)
#declaring STATE list
STATES = ["Alabama","Alaska","Arizona","Arkansas","California","Colorado",
"Connecticut","Delaware","District of Columbia", "Florida","Georgia","Hawaii",
"Idaho","Illinois", "Indiana","Iowa","Kansas","Kentucky","Louisiana","Maine",
"Maryland","Massachusetts","Michigan","Minnesota","Mississippi","Missouri","Montana",
"Nebraska","Nevada","New Hampshire","New Jersey","New Mexico","New York",
"North Carolina","North Dakota","Ohio","Oklahoma","Oregon","Pennsylvania",
"Rhode Island","South Carolina","South Dakota","Tennessee","Texas","Utah",
"Vermont","Virginia","Washington","West Virginia","Wisconsin","Wyoming"]
#generating state:state_number dictionary
STATE_NFCS = list(NFCS_df.STATE.unique())
STATE_NFCS.sort()
STATE = {}
for state, name in zip(STATE_NFCS, STATES):
STATE[state] = name
NFCS_df.STATE = NFCS_df.STATE.map(STATE)
NFCS_df.STATE
NFCS_agg = NFCS_df.groupby(["STATE", "YEAR"]).count()
NFCS_agg
factors = list(NFCS_df.columns)
factors.remove("STATE")
factors.remove("YEAR")
#using filter to generate multiple pivot tables for data vizualization
pivot_df = NFCS_df.copy(deep=True)
for factor in factors:
pivot_df[f"{factor}_COUNTS"] = pivot_df[factor]
pivot_df_final = pivot_df[["YEAR", "STATE", factor, f"{factor}_COUNTS"]].groupby(["YEAR", "STATE", factor]).count()
#display(pivot_df[["YEAR", "REGION", "STATE", factor, f"{factor}_COUNTS"]].groupby(["YEAR", "REGION", "STATE", factor]).count())
display(pivot_df_final)
pivot_df_final.to_csv(f"../data/processed/Dashboard_Data/{factor}_NFCS_AGG.csv")
NFCS_agg.to_csv("../data/processed/Dashboard_Data/NFCS_STATE_Aggregate.csv")
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Configuration
_**Setting up your Azure Machine Learning services workspace and configuring your notebook library**_
---
---
## Table of Contents
1. [Introduction](#Introduction)
1. What is an Azure Machine Learning workspace
1. [Setup](#Setup)
1. Azure subscription
1. Azure ML SDK and other library installation
1. Azure Container Instance registration
1. [Configure your Azure ML Workspace](#Configure%20your%20Azure%20ML%20workspace)
1. Workspace parameters
1. Access your workspace
1. Create a new workspace
1. Create compute resources
1. [Next steps](#Next%20steps)
---
## Introduction
This notebook configures your library of notebooks to connect to an Azure Machine Learning (ML) workspace. In this case, a library contains all of the notebooks in the current folder and any nested folders. You can configure this notebook library to use an existing workspace or create a new workspace.
Typically you will need to run this notebook only once per notebook library as all other notebooks will use connection information that is written here. If you want to redirect your notebook library to work with a different workspace, then you should re-run this notebook.
In this notebook you will
* Learn about getting an Azure subscription
* Specify your workspace parameters
* Access or create your workspace
* Add a default compute cluster for your workspace
### What is an Azure Machine Learning workspace
An Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.
## Setup
This section describes activities required before you can access any Azure ML services functionality.
### 1. Azure Subscription
In order to create an Azure ML Workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com). Later in this notebook you will need information such as your subscription ID in order to create and access AML workspaces.
### 2. Azure ML SDK and other library installation
If you are running in your own environment, follow [SDK installation instructions](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment). If you are running in Azure Notebooks or another Microsoft managed environment, the SDK is already installed.
Also install following libraries to your environment. Many of the example notebooks depend on them
```
(myenv) $ conda install -y matplotlib tqdm scikit-learn
```
Once installation is complete, the following cell checks the Azure ML SDK version:
```
import azureml.core
print("This notebook was created using version 1.0.74.1 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
```
## Configure your Azure ML workspace
### Workspace parameters
To use an AML Workspace, you will need to import the Azure ML SDK and supply the following information:
* Your subscription id
* A resource group name
* (optional) The region that will host your workspace
* A name for your workspace
You can get your subscription ID from the [Azure portal](https://portal.azure.com).
You will also need access to a [_resource group_](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.
The region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.
The name for your workspace is unique within the subscription and should be descriptive enough to discern among other AML Workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.
The following cell allows you to specify your workspace parameters. This cell uses the python method `os.getenv` to read values from environment variables which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values.
If you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.
Replace the default values in the cell below with your workspace parameters
```
import os
subscription_id = os.getenv("SUBSCRIPTION_ID", default="<my-subscription-id>")
resource_group = os.getenv("RESOURCE_GROUP", default="<my-resource-group>")
workspace_name = os.getenv("WORKSPACE_NAME", default="<my-workspace-name>")
workspace_region = os.getenv("WORKSPACE_REGION", default="eastus2")
```
### Access your workspace
The following cell uses the Azure ML SDK to attempt to load the workspace specified by your parameters. If this cell succeeds, your notebook library will be configured to access the workspace from all notebooks using the `Workspace.from_config()` method. The cell can fail if the specified workspace doesn't exist or you don't have permissions to access it.
```
from azureml.core import Workspace
try:
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
print("Workspace configuration succeeded. Skip the workspace creation steps below")
except:
print("Workspace not accessible. Change your parameters or create a new workspace below")
```
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Challenge Notebook
## Problem: Given an array of (unix_timestamp, num_people, EventType.ENTER or EventType.EXIT), find the busiest period.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
* [Solution Notebook](#Solution-Notebook)
## Constraints
* Can we assume the input array is valid?
* Check for None
* Can we assume the elements of the input array are valid?
* Yes
* Is the input sorted by time?
* No
* Can you have enter and exit elements for the same timestamp?
* Yes you can, order of enter and exit is not guaranteed
* Could we have multiple enter events (or multiple exit events) for the same timestamp?
* No
* What is the format of the output?
* An array of timestamps [t1, t2]
* Can we assume the starting number of people is zero?
* Yes
* Can we assume the inputs are valid?
* No
* Can we assume this fits memory?
* Yes
## Test Cases
* None -> TypeError
* [] -> None
* General case
<pre>
timestamp num_people event_type
1 2 EventType.ENTER
3 1 EventType.ENTER
3 2 EventType.EXIT
7 3 EventType.ENTER
8 2 EventType.EXIT
9 2 EventType.EXIT
result = Period(7, 8)
</pre>
## Algorithm
Refer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
## Code
```
from enum import Enum
class Data(object):
def __init__(self, timestamp, num_people, event_type):
self.timestamp = timestamp
self.num_people = num_people
self.event_type = event_type
def __lt__(self, other):
return self.timestamp < other.timestamp
class Period(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start == other.start and self.end == other.end
def __repr__(self):
return str(self.start) + ', ' + str(self.end)
class EventType(Enum):
ENTER = 0
EXIT = 1
class Solution(object):
def find_busiest_period(self, data):
# TODO: Implement me
pass
```
## Unit Test
**The following unit test is expected to fail until you solve the challenge.**
```
# %load test_find_busiest_period.py
import unittest
class TestSolution(unittest.TestCase):
def test_find_busiest_period(self):
solution = Solution()
self.assertRaises(TypeError, solution.find_busiest_period, None)
self.assertEqual(solution.find_busiest_period([]), None)
data = [
Data(3, 2, EventType.EXIT),
Data(1, 2, EventType.ENTER),
Data(3, 1, EventType.ENTER),
Data(7, 3, EventType.ENTER),
Data(9, 2, EventType.EXIT),
Data(8, 2, EventType.EXIT),
]
self.assertEqual(solution.find_busiest_period(data), Period(7, 8))
print('Success: test_find_busiest_period')
def main():
test = TestSolution()
test.test_find_busiest_period()
if __name__ == '__main__':
main()
```
## Solution Notebook
Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
| github_jupyter |
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
$ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
<font style="font-size:28px;" align="left"><b>Probabilistic States </b></font>
<br>
_prepared by Abuzer Yakaryilmaz_
<br><br>
[<img src="../qworld/images/watch_lecture.jpg" align="left">](https://youtu.be/tJjrF7WgT1g)
<br><br><br>
Suppose that Asja tosses a fair coin secretly.
As we do not see the result, our information about the outcome will be probabilistic:
$\rightarrow$ The outcome is heads with probability $0.5$ and the outcome will be tails with probability $0.5$.
If the coin has a bias $ \dfrac{Pr(Head)}{Pr(Tail)} = \dfrac{3}{1}$, then our information about the outcome will be as follows:
$\rightarrow$ The outcome will be heads with probability $ 0.75 $ and the outcome will be tails with probability $ 0.25 $.
<i><u>Explanation</u>: The probability of getting heads is three times of the probability of getting tails.
<ul>
<li>The total probability is 1. </li>
<li> We divide the whole probability 1 into four parts (three parts are for heads and one part is for tail),
<li> one part is $ \dfrac{1}{4} = 0.25$,</li>
<li> and then give three parts for heads ($0.75$) and one part for tails ($0.25$).</li>
</ul></i>
<h3> Listing probabilities as a column </h3>
We have two different outcomes: heads (0) and tails (1).
We use a column of size 2 to show the probabilities of getting heads and getting tails.
For the fair coin, our information after the coin-flip will be $ \myvector{0.5 \\ 0.5} $.
For the biased coin, it will be $ \myvector{0.75 \\ 0.25} $.
The first entry shows the probability of getting heads, and the second entry shows the probability of getting tails.
$ \myvector{0.5 \\ 0.5} $ and $ \myvector{0.75 \\ 0.25} $ are two examples of 2-dimensional (column) vectors.
<h3> Task 1 </h3>
Suppose that Balvis secretly flips a coin having the bias $ \dfrac{Pr(Heads)}{Pr(Tails)} = \dfrac{1}{4}$.
Represent your information about the outcome as a column vector.
<h3> Task 2 </h3>
Suppose that Fyodor secretly rolls a loaded (tricky) dice with the bias
$$ Pr(1):Pr(2):Pr(3):Pr(4):Pr(5):Pr(6) = 7:5:4:2:6:1 . $$
Represent your information about the result as a column vector. Remark that the size of your column vector should be 6.
You may use python for your calculations.
```
#
# your code is here
#
```
<a href="CS16_Probabilistic_States_Solutions.ipynb#task2">click for our solution</a>
<h3> Vector representation </h3>
Suppose that we have a system with 4 distiguishable states: $ s_1 $, $s_2 $, $s_3$, and $s_4$.
We expect the system to be in one of them at any moment.
By speaking with probabilities, we say that the system is in one of the states with probability 1, and in any other state with probability 0.
By using our column representation, we can show each state as a column vector (by using the vectors in standard basis of $ \mathbb{R}^4 $):
$
e_1 = \myvector{1\\ 0 \\ 0 \\ 0}, e_2 = \myvector{0 \\ 1 \\ 0 \\ 0}, e_3 = \myvector{0 \\ 0 \\ 1 \\ 0},
\mbox{ and } e_4 = \myvector{0 \\ 0 \\ 0 \\ 1}.
$
This representation helps us to represent our information on a system when it is in more than one state with certain probabilities.
Remember the case in which the coins are tossed secretly.
For example, suppose that the system is in states $ s_1 $, $ s_2 $, $ s_3 $, and $ s_4 $ with probabilities $ 0.20 $, $ 0.25 $, $ 0.40 $, and $ 0.15 $, respectively.
(<i>The total probability must be 1, i.e., $ 0.20+0.25+0.40+0.15 = 1.00 $</i>)
Then, we can say that the system is in the following probabilistic state:
$ 0.20 \cdot e_1 + 0.25 \cdot e2 + 0.40 \cdot e_3 + 0.15 \cdot e4 $
$ = 0.20 \cdot \myvector{1\\ 0 \\ 0 \\ 0} + 0.25 \cdot \myvector{0\\ 1 \\ 0 \\ 0} + 0.40 \cdot \myvector{0\\ 0 \\ 1 \\ 0} + 0.15 \cdot \myvector{0\\ 0 \\ 0 \\ 1} $
$ = \myvector{0.20\\ 0 \\ 0 \\ 0} + \myvector{0\\ 0.25 \\ 0 \\ 0} + \myvector{0\\ 0 \\0.40 \\ 0} + \myvector{0\\ 0 \\ 0 \\ 0.15 } = \myvector{ 0.20 \\ 0.25 \\ 0.40 \\ 0.15 }, $
where the summation of entries must be 1.
<h3> Probabilistic state </h3>
A probabilistic state is a linear combination of the vectors in the standard basis.
Here coefficients (scalars) must satisfy certain properties:
<ol>
<li> Each coefficient is non-negative </li>
<li> The summation of coefficients is 1 </li>
</ol>
Alternatively, we can say that a probabilistic state is a probability distribution over deterministic states.
We can show all information as a single mathematical object, which is called as a stochastic vector.
<i> Remark that the state of any linear system is a linear combination of the vectors in the basis. </i>
<h3> Task 3 </h3>
For a system with 4 states, randomly create a probabilistic state, and print its entries, e.g., $ 0.16~~0.17~~0.02~~0.65 $.
<i>Hint: You may pick your random numbers between 0 and 100 (or 1000), and then normalize each value by dividing the summation of all numbers.</i>
```
#
# your solution is here
#
```
<a href="CS16_Probabilistic_States_Solutions.ipynb#task3">click for our solution</a>
<h3> Task 4 [extra] </h3>
As given in the hint for Task 3, you may pick your random numbers between 0 and $ 10^k $. For better precision, you may take bigger values of $ k $.
Write a function that randomly creates a probabilisitic state of size $ n $ with a precision up to $ k $ digits.
Test your function.
```
#
# your solution is here
#
```
| github_jupyter |
# Sentiment Analysis: Data Gathering 1 (Vader)
The original sentiments of domain dataset are unclean, especially for the neutral sentiment. Instead of manually going through and correcting sentiments by hand certain techniques are employed to assist this process. This notebook implements the first data annotation pipeline for the sentiment analysis task, which utilizes NLTK's VADER sentiment classifier in order to quickly get a different baseline sentiment to compare with the original. This process has been performed iteratively by manually inspecting the results and modiying VADER's internal library, which contains pre-defined weights towards certain sentiments.
Data used here are texts that have been cleaned from stopwords (see ma_eda_all.ipynb)
since certain words / phrases affect the results negatively, e.g. "kind regards", "good day", etc.
Data used is also the normalized version in order to better target certain words and update the weights within VADER's vocabulary since some words, e.g. "worn", "hole", etc., are considered more negative in this domain as opposed to what VADER would classify it normally.
### Notes
* Data: feedback_39k
* Texts have been removed from certain stopwords that might skew the results of VADER
* Using normalized words to better target words
* Tuned by updating vocabulary of VADER
### Goal
* Add additional column for VADER sentiments pos/neu/neg
### Results
* Passable results to help with manual tasks
* Very different sentiment distributions than original sentiments
* Not good if too few words
```
import nltk
nltk.download('vader_lexicon')
nltk.download('punkt')
import re
import pandas as pd
import seaborn as sns; sns.set()
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sns.set(style='white', context='notebook', palette='deep')
from google.colab import drive
drive.mount('/content/drive')
PROJECT_PATH = '/content/drive/MyDrive/Colab/data/ma_data/'
DATA = PROJECT_PATH + 'feedback_all_normalized.csv'
DATA_EXPORT = PROJECT_PATH + 'feedback_all_vader_1.csv'
sia = SentimentIntensityAnalyzer()
print(sia.lexicon)
domain_words = {"bruise": -3.0, "pity": -3.0, "thanks": 0.0, "glue": -2.0, "shortcoming": -3.0, "break": -3.0, "inflamed": -2.0, "reminder": -1.0, "reliable": 3.0, "uncomplicated": 2.0, "fast": 2.0, "kindly": 0.0, "confuse": -2.0, "blister": -3.0, "flaw": -3.0, "stain": -3.0, "complain": -2.0, "dissolve": -3.0, "apalled": -4.0, "discolor": -3.0, "spot": -2.0, "big": -1.5, "small": -1.5, "broken": -3.0, "worn": -3.0, "torn": -3.0, "hole": -3.0, "dirt": -3.0}
sia.lexicon.update(domain_words)
df_raw = pd.read_csv(DATA)
df_raw[6:11]
df = df_raw.copy()
%%time
pos_treshold = 0.8
neg_treshold = -0.25
df['vader'] = df['normalized_with_stopwords'].apply(lambda x: 'POSITIVE' if sia.polarity_scores(str(x))['compound'] >= pos_treshold
else ('NEGATIVE' if sia.polarity_scores(str(x))['compound'] <= neg_treshold
else 'NONE'))
df['vader score'] = df['normalized_with_stopwords'].apply(lambda x: sia.polarity_scores(str(x))['compound'])
df.iloc[idx, 8]
# Original sentiment distribution
df["sentiment"].value_counts(normalize=True)
# Vader initial predictions
df["vader"].value_counts(normalize=True)
# No including stopwords
df["vader"].value_counts(normalize=True)
# With more stopwords v2
df["vader"].value_counts(normalize=True)
# With more stopwords v3
df["vader"].value_counts(normalize=True)
test_sia = "material error on the belt loop leather color flake off"
sia.polarity_scores(test_sia)
df_export = df[["feedback_text_en", "sentiment", "vader", "vader score", "delivery", "feedback_return", "product", "monetary", "one_hot_labels", "feedback_normalized", "normalized_with_stopwords"]]
df_export.to_csv(DATA_EXPORT)
```
| github_jupyter |
# Datasets and Neural Networks
This notebook will step through the process of loading an arbitrary dataset in PyTorch, and creating a simple neural network for regression.
# Datasets
We will first work through loading an arbitrary dataset in PyTorch. For this project, we chose the <a href="http://www.cs.toronto.edu/~delve/data/abalone/desc.html">delve abalone dataset</a>.
First, download and unzip the dataset from the link above, then unzip `Dataset.data.gz` and move `Dataset.data` into `hackpack-ml/models/data`.
We are given the following attribute information in the spec:
```
Attributes:
1 sex u M F I # Gender or Infant (I)
2 length u (0,Inf] # Longest shell measurement (mm)
3 diameter u (0,Inf] # perpendicular to length (mm)
4 height u (0,Inf] # with meat in shell (mm)
5 whole_weight u (0,Inf] # whole abalone (gr)
6 shucked_weight u (0,Inf] # weight of meat (gr)
7 viscera_weight u (0,Inf] # gut weight (after bleeding) (gr)
8 shell_weight u (0,Inf] # after being dried (gr)
9 rings u 0..29 # +1.5 gives the age in years
```
```
import math
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
import pandas as pd
from torch.utils.data import Dataset, DataLoader
```
Pandas is a data manipulation library that works really well with structured data. We can use Pandas DataFrames to load the dataset.
```
col_names = ['sex', 'length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight', 'rings']
abalone_df = pd.read_csv('../data/Dataset.data', sep=' ', names=col_names)
abalone_df.head(n=3)
```
We define a subclass of PyTorch Dataset for our Abalone dataset.
```
class AbaloneDataset(data.Dataset):
"""Abalone dataset. Provides quick iteration over rows of data."""
def __init__(self, csv):
"""
Args: csv (string): Path to the Abalone dataset.
"""
self.features = ['sex', 'length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight']
self.y = ['rings']
self.abalone_df = pd.read_csv(csv, sep=' ', names=(self.features + self.y))
# Turn categorical data into machine interpretable format (one hot)
self.abalone_df['sex'] = pd.get_dummies(self.abalone_df['sex'])
def __len__(self):
return len(self.abalone_df)
def __getitem__(self, idx):
"""Return (x,y) pair where x are abalone features and y is age."""
features = self.abalone_df.iloc[idx][self.features].values
y = self.abalone_df.iloc[idx][self.y]
return torch.Tensor(features).float(), torch.Tensor(y).float()
```
# Neural Networks
The task is to predict the age (number of rings) of abalone from physical measurements. We build a simple neural network with one hidden layer to model the regression.
```
class Net(nn.Module):
def __init__(self, feature_size):
super(Net, self).__init__()
# feature_size input channels (8), 1 output channels
self.fc1 = nn.Linear(feature_size, 4)
self.fc2 = nn.Linear(4, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
```
We instantiate an Abalone dataset instance and create DataLoaders for train and test sets.
```
dataset = AbaloneDataset('../data/Dataset.data')
train_split, test_split = math.floor(len(dataset) * 0.8), math.ceil(len(dataset) * 0.2)
trainset = [dataset[i] for i in range(train_split)]
testset = [dataset[train_split + j] for j in range(test_split)]
batch_sz = len(trainset) # Compact data allows for big batch size
trainloader = data.DataLoader(trainset, batch_size=batch_sz, shuffle=True, num_workers=4)
testloader = data.DataLoader(testset, batch_size=batch_sz, shuffle=False, num_workers=4)
```
Now, we can initialize our network and define train and test functions
```
net = Net(len(dataset.features))
loss_fn = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.1)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
gpu_ids = [0] # On Colab, we have access to one GPU. Change this value as you see fit
def train(epoch):
"""
Trains our net on data from the trainloader for a single epoch
"""
net.train()
with tqdm(total=len(trainloader.dataset)) as progress_bar:
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad() # Clear any stored gradients for new step
outputs = net(inputs.float())
loss = loss_fn(outputs, targets) # Calculate loss between prediction and label
loss.backward() # Backpropagate gradient updates through net based on loss
optimizer.step() # Update net weights based on gradients
progress_bar.set_postfix(loss=loss.item())
progress_bar.update(inputs.size(0))
def test(epoch):
"""
Run net in inference mode on test data.
"""
net.eval()
# Ensures the net will not update weights
with torch.no_grad():
with tqdm(total=len(testloader.dataset)) as progress_bar:
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device).float(), targets.to(device).float()
outputs = net(inputs)
loss = loss_fn(outputs, targets)
progress_bar.set_postfix(testloss=loss.item())
progress_bar.update(inputs.size(0))
```
Now that everything is prepared, it's time to train!
```
test_freq = 5 # Frequency to run model on validation data
for epoch in range(0, 200):
train(epoch)
if epoch % test_freq == 0:
test(epoch)
```
We use the network's eval mode to do a sample prediction to see how well it does.
```
net.eval()
sample = testset[0]
predicted_age = net(sample[0])
true_age = sample[1]
print(f'Input features: {sample[0]}')
print(f'Predicted age: {predicted_age.item()}, True age: {true_age[0]}')
```
Congratulations! You now know how to load your own datasets into PyTorch and run models on it. For an example of Computer Vision, check out the DenseNet notebook. Happy hacking!
| github_jupyter |
# Optimization with equality constraints
```
import math
import numpy as np
from scipy import optimize as opt
```
maximize $.4\,\log(x_1)+.6\,\log(x_2)$ s.t. $x_1+3\,x_2=50$.
```
I = 50
p = np.array([1, 3])
U = lambda x: (.4*math.log(x[0])+.6*math.log(x[1]))
x0 = (I/len(p))/np.array(p)
budget = ({'type': 'eq', 'fun': lambda x: I-np.sum(np.multiply(x, p))})
opt.minimize(lambda x: -U(x), x0, method='SLSQP', constraints=budget, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
def consumer(U, p, I):
budget = ({'type': 'eq', 'fun': lambda x: I-np.sum(np.multiply(x, p))})
x0 = (I/len(p))/np.array(p)
sol = opt.minimize(lambda x: -U(x), x0, method='SLSQP', constraints=budget, tol=1e-08,
options={'disp': False, 'ftol': 1e-08})
if sol.status == 0:
return {'x': sol.x, 'V': -sol.fun, 'MgU': -sol.jac, 'mult': -sol.jac[0]/p[0]}
else:
return 0
consumer(U, p, I)
delta=.01
(consumer(U, p, I+delta)['V']-consumer(U, p, I-delta)['V'])/(2*delta)
delta=.001
numerador = (consumer(U,p+np.array([delta, 0]), I)['V']-consumer(U,p+np.array([-delta, 0]), I)['V'])/(2*delta)
denominador = (consumer(U, p, I+delta)['V']-consumer(U, p, I-delta)['V'])/(2*delta)
-numerador/denominador
```
## Cost function
```
# Production function
F = lambda x: (x[0]**.8)*(x[1]**.2)
w = np.array([5, 4])
y = 1
constraint = ({'type': 'eq', 'fun': lambda x: y-F(x)})
x0 = np.array([.5, .5])
cost = opt.minimize(lambda x: w@x, x0, method='SLSQP', constraints=constraint, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
F(cost.x)
cost
```
## Exercise
```
a = 2
u = lambda c: -np.exp(-a*c)
R = 2
Z2 = np.array([.72, .92, 1.12, 1.32])
Z3 = np.array([.86, .96, 1.06, 1.16])
def U(x):
states = len(Z2)*len(Z3)
U = u(x[0])
for z2 in Z2:
for z3 in Z3:
U += (1/states)*u(x[1]*R+x[2]*z2+x[3]*z3)
return U
p = np.array([1, 1, .5, .5])
I = 4
# a=1
consumer(U, p, I)
# a=5
consumer(U, p, I)
# a=2
consumer(U, p, I)
import matplotlib.pyplot as plt
x = np.arange(0.0, 2.0, 0.01)
a = 2
u = lambda c: -np.exp(-a*c)
plt.plot(x, u(x))
a = -2
plt.plot(x, u(x))
```
# Optimization with inequality constraints
```
f = lambda x: -x[0]**3+x[1]**2-2*x[0]*(x[2]**2)
constraints =({'type': 'eq', 'fun': lambda x: 2*x[0]+x[1]**2+x[2]-5},
{'type': 'ineq', 'fun': lambda x: 5*x[0]**2-x[1]**2-x[2]-2})
constraints =({'type': 'eq', 'fun': lambda x: x[0]**3-x[1]})
x0 = np.array([.5, .5, 2])
opt.minimize(f, x0, method='SLSQP', constraints=constraints, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
```
| github_jupyter |
```
import sys
sys.path.append('../')
%load_ext autoreload
%autoreload 2
import sklearn
import copy
import numpy as np
import seaborn as sns
sns.set()
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
# from viz import viz
from bokeh.plotting import figure, show, output_notebook, output_file, save
from functions import merge_data
from sklearn.model_selection import RandomizedSearchCV
import load_data
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from fit_and_predict import fit_and_predict
```
## Params:
```
aggregate_by_state = False
outcome_type = 'cases'
```
## Basic Data Visualization
```
# Just something to quickly summarize the number of cases and distributions each day
# 'deaths' and 'cases' contain the time-series of the outbreak
df = load_data.load_county_level(data_dir = '../data/')
df = df.sort_values('#Deaths_3/30/2020', ascending=False)
# outcome_cases = load_data.outcome_cases # most recent day
# outcome_deaths = load_data.outcome_deaths
important_vars = load_data.important_keys(df)
very_important_vars = ['PopulationDensityperSqMile2010',
# 'MedicareEnrollment,AgedTot2017',
'PopulationEstimate2018',
'#ICU_beds',
'MedianAge2010',
'Smokers_Percentage',
'DiabetesPercentage',
'HeartDiseaseMortality',
'#Hospitals'
# 'PopMale60-642010',
# 'PopFmle60-642010',
# 'PopMale65-742010',
# 'PopFmle65-742010',
# 'PopMale75-842010',
# 'PopFmle75-842010',
# 'PopMale>842010',
# 'PopFmle>842010'
]
def sum_lists(list_of_lists):
arr = np.array(list(list_of_lists))
sum_arr = np.sum(arr,0)
return list(sum_arr)
if aggregate_by_state:
# Aggregate by State
state_deaths_df = df.groupby('StateNameAbbreviation').deaths.agg(sum_lists).to_frame()
state_cases_df = df.groupby('StateNameAbbreviation').cases.agg(sum_lists).to_frame()
df = pd.concat([state_cases_df,state_deaths_df],axis =1 )
# Distribution of the maximum number of cases
_cases = list(df['cases'])
max_cases = []
for i in range(len(df)):
max_cases.append(max(_cases[i]))
print('Number of counties with non-zero cases')
print(sum([v >0 for v in max_cases]))
# cases truncated below 20 and above 1000 for plot readability
plt.hist([v for v in max_cases if v > 20 and v < 1000],bins = 100)
sum(max_cases)
print(sum([v > 50 for v in max_cases]))
np.quantile(max_cases,.5)
# Distribution of the maximum number of cases
_deaths = list(df['deaths'])
max_deaths = []
for i in range(len(df)):
max_deaths.append(max(_deaths[i]))
print('Number of counties with non-zero deaths')
print(sum([v > 0 for v in max_deaths]))
# plt.hist(max_cases)
# print(sum([v >0 for v in max_cases]))
plt.hist([v for v in max_deaths if v > 5],bins=30)
sum(max_deaths)
max(max_deaths)
np.quantile(max_deaths,.7)
```
### Clean data
```
# Remove counties with zero cases
max_cases = [max(v) for v in df['cases']]
df['max_cases'] = max_cases
max_deaths = [max(v) for v in df['deaths']]
df['max_deaths'] = max_deaths
df = df[df['max_cases'] > 0]
```
## Predict data from model:
```
method_keys = []
# clear predictions
for m in method_keys:
del df[m]
# target_day = np.array([1])
# # Trains model on train_df and produces predictions for the final day for test_df and writes prediction
# # to a new column for test_df
# # fit_and_predict(df, method='exponential', outcome=outcome_type, mode='eval_mode',target_day=target_day)
# # fit_and_predict(df,method='shared_exponential', outcome=outcome_type, mode='eval_mode',target_day=target_day)
# # fit_and_predict(train_df, test_df,'shared_exponential', mode='eval_mode',demographic_vars=important_vars)
# # fit_and_predict(df,method='shared_exponential', outcome=outcome_type, mode='eval_mode',demographic_vars=very_important_vars,target_day=target_day)
# fit_and_predict(df, outcome=outcome_type, mode='eval_mode',demographic_vars=[],
# method='ensemble',target_day=target_day)
# fit_and_predict(df, outcome=outcome_type, mode='eval_mode',demographic_vars=[],
# method='ensemble',target_day=np.array([1,2,3]))
# # fit_and_predict(train_df, test_d f,method='exponential',mode='eval_mode',target_day = np.array([1,2]))
# # Finds the names of all the methods
# method_keys = [c for c in df if 'predicted' in c]
# method_keys
# for days_ahead in [1, 2, 3]:
# for method in ['exponential', 'shared_exponential', 'ensemble']:
# fit_and_predict(df, method=method, outcome=outcome_type, mode='eval_mode',target_day=np.array([days_ahead]))
# if method == 'shared_exponential':
# fit_and_predict(df,method='shared_exponential',
# outcome=outcome_type,
# mode='eval_mode',
# demographic_vars=very_important_vars,
# target_day=np.array([days_ahead]))
# method_keys = [c for c in df if 'predicted' in c]
# geo = ['countyFIPS', 'CountyNamew/StateAbbrev']
# method_keys = [c for c in df if 'predicted' in c]
# df_preds = df[method_keys + geo + ['deaths']]
# df_preds.to_pickle("multi_day_6.pkl")
```
## Ensemble predictions
```
exponential = {'model_type':'exponential'}
shared_exponential = {'model_type':'shared_exponential'}
demographics = {'model_type':'shared_exponential', 'demographic_vars':very_important_vars}
linear = {'model_type':'linear'}
# import fit_and_predict
# for d in [1, 2, 3]:
# df = fit_and_predict.fit_and_predict_ensemble(df,
# target_day=np.array([d]),
# mode='eval_mode',
# outcome=outcome_type,
# output_key=f'predicted_{outcome_type}_ensemble_{d}'
# )
import fit_and_predict
for d in [1, 3, 5, 7]:
df = fit_and_predict.fit_and_predict_ensemble(df,
target_day=np.array(range(1, d+1)),
mode='eval_mode',
outcome=outcome_type,
methods=[exponential,
shared_exponential,
demographics,
linear
],
output_key=f'predicted_{outcome_type}_ensemble_{d}_with_exponential'
)
method_keys = [c for c in df if 'predicted' in c]
# df = fit_and_predict.fit_and_predict_ensemble(df)
method_keys
```
## Evaluate and visualize models
### Compute MSE and log MSE on relevant cases
```
# TODO: add average rank as metric
# Computes the mse in log space and non-log space for all columns
def l1(arr1,arr2,norm=True):
"""
arr2 ground truth
arr1 predictions
"""
if norm:
sum_percent_dif = 0
for i in range(len(arr1)):
sum_percent_dif += np.abs(arr2[i]-arr1[i])/arr1[i]
return sum_percent_dif/len(arr1)
return sum([np.abs(a1-a2) for (a1,a2) in zip(arr1,arr2)])/len(arr1)
mse = sklearn.metrics.mean_squared_error
# Only evaluate points that exceed this number of deaths
# lower_threshold, upper_threshold = 10, 100000
lower_threshold, upper_threshold = 10, np.inf
# Log scaled
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1] + 1) for p in df[key][(outcome > lower_threshold)]] # * (outcome < upper_threshold)]]
print('Log scale MSE for '+key)
print(mse(np.log(outcome[(outcome > lower_threshold) * (outcome < upper_threshold)] + 1),preds))
# Log scaled
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1] + 1) for p in df[key][outcome > lower_threshold]]
print('Log scale l1 for '+key)
print(l1(np.log(outcome[outcome > lower_threshold] + 1),preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw MSE for '+key)
print(mse(outcome[outcome > lower_threshold],preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw l1 for '+key)
print(l1(outcome[outcome > lower_threshold],preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw l1 for '+key)
print(l1(outcome[outcome > lower_threshold],preds,norm=False))
```
### Plot residuals
```
# TODO: Create bounds automatically, create a plot function and call it instead of copying code, figure out way
# to plot more than two things at once cleanly
# Creates residual plots log scaled and raw
# We only look at cases with number of deaths greater than 5
def method_name_to_pretty_name(key):
# TODO: hacky, fix
words = key.split('_')
words2 = []
for w in words:
if not w.isnumeric():
words2.append(w)
else:
num = w
model_name = ' '.join(words2[2:])
# model_name = 'model'
if num == '1':
model_name += ' predicting 1 day ahead'
else:
model_name += ' predicting ' +w+' days ahead'
return model_name
# Make log plots:
bounds = [1.5, 7]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1]) for p in df[key][outcome > 5]]
plt.scatter(np.log(outcome[outcome > 5]),preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
# Make log plots zoomed in for the counties that have a fewer number of deaths
bounds = [1.5, 4]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1]) for p in df[key][outcome > 5]]
plt.scatter(np.log(outcome[outcome > 5]),preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
# Make non-log plots zoomed in for the counties that have a fewer number of deaths# We set bounds
bounds = [10,400]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > 5]]
plt.scatter(outcome[outcome > 5],preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
```
### Graph Visualizations
```
# Here we visualize predictions on a per county level.
# The blue lines are the true number of deaths, and the dots are our predictions for each model for those days.
def plot_prediction(row):
"""
Plots model predictions vs actual
row: dataframe row
window: autoregressive window size
"""
gold_key = outcome_type
for i,val in enumerate(row[gold_key]):
if val > 0:
start_point = i
break
# plt.plot(row[gold_key][start_point:], label=gold_key)
if len(row[gold_key][start_point:]) < 3:
return
sns.lineplot(list(range(len(row[gold_key][start_point:]))),row[gold_key][start_point:], label=gold_key)
for key in method_keys:
preds = row[key]
sns.scatterplot(list(range(len(row[gold_key][start_point:])))[-len(preds):],preds,label=method_name_to_pretty_name(key))
# plt.scatter(list(range(len(row[gold_key][start_point:])))[-len(preds):],preds,label=key)
# plt.legend()
# plt.show()
# sns.legend()
plt.title(row['CountyName']+' in '+row['StateNameAbbreviation'])
plt.ylabel(outcome_type)
plt.xlabel('Days since first death')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure(dpi=500)
plt.show()
# feature_vals = {
# 'PopulationDensityperSqMile2010' : 1.1525491065255939e-05,
# "MedicareEnrollment,AgedTot2017" : -2.119520577282583e-06,
# 'PopulationEstimate2018' : 2.8898343032154275e-07,
# '#ICU_beds' : -0.000647030727828718,
# 'MedianAge2010' : 0.05032666600339253,
# 'Smokers_Percentage' : -0.013410742818946319,
# 'DiabetesPercentage' : 0.04395318355581005,
# 'HeartDiseaseMortality' : 0.0015473771787186525,
# '#Hospitals': 0.019248102357644396,
# 'log(deaths)' : 0.8805209010821442,
# 'bias' : -1.871552103871495
# }
df = df.sort_values(by='max_deaths',ascending=False)
for i in range(len(df)):
row = df.iloc[i]
# If number of deaths greater than 10
if max(row['deaths']) > 10:
print(row['CountyName']+' in '+row['StateNameAbbreviation'])
plot_prediction(row)
for v in very_important_vars:
print(v+ ': '+str(row[v])) #+';\t contrib: '+ str(feature_vals[v]*float(row[v])))
print('\n')
```
| github_jupyter |
# 0) Carregamento as bibliotecas
```
# Mostra múltiplos resultados em uma única saída:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython.display import Math
import pandas as pd
import numpy as np
import geopandas as gpd
import os
import pysal
from pyproj import CRS
from shapely.geometry import Point, MultiPoint, Polygon, mapping
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import pickle
```
# 1) Leitura dos Banco de Dados:
**(a) Dados SIH 2019:**
```
df = pd.read_csv("NT02 - Bahia/SIH/sih_17-19.csv")
#pickle.dump(df, open('sih_2019', 'wb'))
#df = pickle.load(open('sih_2019','rb'))
df.info()
df.head()
df.rename(columns={'MES_CMPT':'Mes','DT_INTER':'DT_Inter','DT_SAIDA':'DT_Saida','MUNIC_RES':'Cod_Municipio_Res',
'MUNIC_MOV':'Cod_Municipio','DIAG_PRINC':'Diagnostico','PROC_REA':'Procedimento','COMPLEX':'Complexidade',
'QT_DIARIAS':'Quantidade Diarias'}, inplace=True)
df = df.astype({'Cod_Municipio_Res': 'str','Cod_Municipio':'str','DT_Inter':'str','DT_Saida':'str',
'Complexidade':'str','Procedimento':'str'})
df.info()
df['Complexidade'] = df['Complexidade'].replace(['2','3'],['Média','Alta'])
df.head()
```
* **Formatação para datas:**
```
from datetime import datetime
df['DT_Inter'] = df['DT_Inter'].apply(lambda x: pd.to_datetime(x, format = '%Y%m%d'))
df['DT_Saida'] = df['DT_Saida'].apply(lambda x: pd.to_datetime(x, format = '%Y%m%d'))
pickle.dump(df, open('sih', 'wb'))
df = pickle.load(open('sih','rb'))
df2 = df.drop_duplicates(subset ="N_AIH",keep = 'last')
len(df2) #Total de internações em hospitais baianos
len(df2[df2['Cod_Municipio_Res'].str.startswith('29')]) # Internações em hospitais baianos de indivíduos que moram na bahia
2550223/2579967
```
**(b) Shape municípios:**
```
mun = gpd.read_file("NT02 - Bahia/mun_br.shp")
mun = mun.to_crs(CRS("WGS84"));
mun.crs
mun.info()
mun.head()
mun.plot();
plt.show();
mun_ba = mun[mun['GEOCODIGO'].str.startswith('29')].copy()
mun_ba.head()
mun_ba[mun_ba['GEOCODIGO'].str.startswith('290160')]
mun_ba[mun_ba['NOME']=='Sítio do Quinto']
mun_ba[mun_ba['NOME']=='Antas']
mun_ba.plot();
plt.show();
```
**Adicionando a população de 2019 (IBGE):**
```
pop = gpd.read_file('NT02 - Bahia/IBGE - Estimativa popul 2019.shp')
pop.head()
mun_ba['Pop'] = 0
for i, row in mun_ba.iterrows():
mun_ba.loc[i,'Pop'] = pop[pop['Codigo']==row['GEOCODIGO']]['p_pop_2019'].values[0]
```
**Adicionando Casos até 24/04:**
```
casos = gpd.read_file('NT02 - Bahia/Evolução/data_shape_ba_mod(1).shp')
casos.info()
mun_ba['c20200424'] = 0
for i, row in mun_ba.iterrows():
mun_ba.loc[i,'c20200424'] = casos[casos['Codigo']==row['GEOCODIGO']]['2020-04-24'].values[0]
mun_ba['c20200424'] = mun_ba['c20200424'].fillna(0)
```
**Calculando prevalências (com base em 24/04):**
```
mun_ba['prev'] = (mun_ba['c20200424']/mun_ba['Pop'])*100000
mun_ba.sort_values(by='prev', ascending = False)
```
# (2) Internações nos Hospitais BA
**(a) Quantidade de indivíduos:**
```
mun_ba['Qtd_Tot'] = 0
mun_ba['Qtd_Fora'] = 0
mun_ba['Qtd_CplxM'] = 0
mun_ba['Qtd_CplxA'] = 0
mun_ba['Dia_Tot'] = 0
mun_ba['Dia_CplxM'] = 0
mun_ba['Dia_CplxA'] = 0
```
**Período de 01/07/2018 a 30/06/2019:**
```
from datetime import date
per = pd.date_range(date(2018,7,1), periods=365).tolist()
per[0]
per[-1]
# Entraram em alguma data até 30/06/2019 e saíram entre 01/07/2018 até 30/06/2019
df_BA = df2[(df2['DT_Inter'] <= per[-1]) & (df2['DT_Saida'] >= per[0]) & (df2['DT_Saida'] <= per[-1])]
#df_BA = df2[(df2['Cod_Municipio'].str.startswith('29')) & (df2['Cod_Municipio_Res'].str.startswith('29'))].copy()
df_BA.head()
for i, row in mun_ba.iterrows():
mun_ba.loc[i,'Qtd_Tot'] = len(df_BA[df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]])
mun_ba.loc[i,'Qtd_Fora'] = len(df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) & (df2['Cod_Municipio_Res']!=row['GEOCODIGO'][:-1])])
mun_ba.loc[i,'Qtd_CplxM'] = len(df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Média')])
mun_ba.loc[i,'Qtd_CplxA'] = len(df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Alta')])
mun_ba.loc[i,'Dia_Tot'] = df_BA[df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]]['Quantidade Diarias'].sum()
mun_ba.loc[i,'Dia_CplxM'] = df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Média')]['Quantidade Diarias'].sum()
mun_ba.loc[i,'Dia_CplxA'] = df_BA[(df_BA['Cod_Municipio']==row['GEOCODIGO'][:-1]) &
(df_BA['Complexidade']=='Alta')]['Quantidade Diarias'].sum()
fig, ax = plt.subplots(figsize=(15,15));
mun_ba.plot(ax = ax, column = 'Qtd_Tot');
mun_ba.to_file('NT02 - Bahia/intern_ba.shp')
mun_ba = gpd.read_file('NT02 - Bahia/intern_ba.shp')
```
# (3) Internações por dia em cada município
```
from datetime import date
datas = pd.date_range(date(2018,7,1), periods=365).tolist()
lst_mun_ba = list(mun_ba['GEOCODIGO'].apply(lambda x: x[:-1]).values)
datas[0]
datas[-1]
# Entraram em alguma data até 30/06/2019 e saíram entre 01/07/2018 até 30/06/2019
df2[(df2['DT_Inter'] <= datas[-1]) & (df2['DT_Saida'] >= datas[0]) & (df2['DT_Saida'] <= datas[-1]) & (df2['Cod_Municipio'] == '292740')]
ssa = []
for dt in datas:
ssa.append(len(df2[(df2['DT_Inter'] <= dt) & (df2['DT_Saida'] >= dt) & (df2['Cod_Municipio'] == '292740')]))
pd_ssa = pd.DataFrame(zip(ssa,datas), columns = ['intern', 'data'])
pd_ssa['datas'] = pd.to_datetime(pd_ssa['data'])
pd_ssa['intern'].plot(figsize = (20,10), style = 'o--', markersize = 5);
plt.ylim(0,max(pd_ssa['intern'])+1000);
plt.xlim(-1,365);
plt.show();
max(ssa)
min(ssa)
```
* **Série temporal para todos os municípios:**
```
ba_int = pd.DataFrame(index=datas, columns=mun_ba['GEOCODIGO'].apply(lambda x: x[:-1]).values)
list_mun = list(mun_ba['GEOCODIGO'].apply(lambda x: x[:-1]).values)
for i, row in ba_int.iterrows():
for mun in list_mun:
row[mun] = len(df2[(df2['DT_Inter'] <= i) & (df2['DT_Saida'] >= i) & (df2['Cod_Municipio'] == mun)])
ba_int
ba_int.to_excel('NT02 - Bahia/ba_int_dia.xlsx')
```
# (4) Padrão Origem-Destino das Internações
```
df.info()
per = pd.date_range(date(2018,7,1), periods=365).tolist()
per[0]
per[-1]
# Entraram em alguma data até 30/06/2019 e saíram entre 01/07/2018 até 30/06/2019
df_BA = df2[(df2['DT_Inter'] <= per[-1]) & (df2['DT_Saida'] >= per[0]) & (df2['DT_Saida'] <= per[-1]) & (df2['Cod_Municipio_Res'].str.startswith('29'))]
#df_BA = df2[(df2['Cod_Municipio'].str.startswith('29')) & (df2['Cod_Municipio_Res'].str.startswith('29'))].copy()
df_BA['Quantidade'] = 1
df_BA.groupby(['Cod_Municipio_Res','Cod_Municipio']).sum()
df_BA['Quantidade'].sum()
tab = df_BA.groupby(['Cod_Municipio_Res','Cod_Municipio']).sum()
tab_OD = pd.DataFrame(columns = ['ORI','DES','Qtd','Dia','Qtd_Dia'])
tab_OD
tab.index[0][1]
for i in np.arange(len(tab)):
ORI = tab.index[i][0]
DES = tab.index[i][1]
Qtd = tab.loc[tab.index[i],'Quantidade']
Dia = tab.loc[tab.index[i],'Quantidade Diarias']
Qtd_Dia = tab.loc[tab.index[i],'Quantidade']*tab.loc[tab.index[i],'Quantidade Diarias']
tab_OD.loc[i] = [ORI, DES, Qtd, Dia, Qtd_Dia]
tab_OD
tab_OD['ORI_GC'] = 0
tab_OD['DES_GC'] = 0
for i, el in enumerate(zip(tab_OD['ORI'],tab_OD['DES'])):
tab_OD.loc[i,'ORI_GC'] = mun_ba[mun_ba['GEOCODIGO'].str.startswith(str(el[0]))]['GEOCODIGO'].values[0]
tab_OD.loc[i,'DES_GC'] = mun_ba[mun_ba['GEOCODIGO'].str.startswith(str(el[1]))]['GEOCODIGO'].values[0]
tab_OD['Qtd'] = pd.to_numeric(tab_OD['Qtd'])
tab_OD['Dia'] = pd.to_numeric(tab_OD['Dia'])
tab_OD['Qtd_Dia'] = pd.to_numeric(tab_OD['Qtd_Dia'])
tab_OD.head()
tab_OD.info()
tab_OD.to_excel('NT02 - Bahia/tab_OD.xlsx', index = False)
tab_OD = pd.read_excel('NT02 - Bahia/tab_OD.xlsx')
tab_OD_dif = tab_OD[tab_OD['ORI'] != tab_OD['DES']].copy()
tab_OD_dif.to_excel('NT02 - Bahia/tab_OD_dif.xlsx', index = False)
tab_OD_dif.sort_values(by='Qtd', ascending = False).head(20)[['ORI_GC','DES_GC','Qtd','Dia','Qtd_Dia']]
```
### (4.1) Principais centros de internação hospitalar (origens mais demandadas)
```
tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)['Qtd'].sum()
tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:20]
```
Proporção:
```
tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:50]['Qtd']/tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)['Qtd'].sum()
(tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:50]['Qtd']/tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)['Qtd'].sum()).sum()
```
### (4.2) Municípios mais atendidos pelos principais centros de internação hospitalar
```
mun_ba.loc[mun_ba['GEOCODIGO'].isin(tab_OD['DES_GC'].astype(str))][['NOME','NOMEABREV','geometry']]
idx = list(tab_OD.groupby(['DES_GC']).sum().sort_values(by='Qtd', ascending = False)[:10]['Qtd'].index)
```
20 municípios mais atendidos dos 10 maiores centros de atendimento
```
for k in np.arange(len(idx)):
mun_ba[mun_ba['GEOCODIGO']==idx[k]]['NOME'].values[0] #Nome
tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'].sum() #Quantidade de internações
tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'][:20].sum() \
/tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'].sum() # Percentual de internações que estes 20 representam
mun_ba[mun_ba['GEOCODIGO']==idx[0]]['NOME']
tab_OD[tab_OD['DES_GC']==idx[0]].sort_values(by='Qtd', ascending = False)['ORI_GC'][:20].values
atend = []
for k in np.arange(len(idx)):
idx_mun = tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['ORI_GC'][:20].values
int_mun = tab_OD[tab_OD['DES_GC']==idx[k]].sort_values(by='Qtd', ascending = False)['Qtd'][:20].values
nome_mun = list(map(lambda x: mun_ba[mun_ba['GEOCODIGO']==x]['NOME'].values[0], idx_mun))
#pd.DataFrame(zip(idx_mun,nome_mun,int_mun), columns = ['Geocódigo','Município','Internações'])
for i in idx_mun:
atend.append(i)
len(atend)
len(list(set(atend)))
atend = list(set(atend))
mun_ba[mun_ba['GEOCODIGO'].isin(atend)]['Pop'].sum()
mun_ba[mun_ba['GEOCODIGO'].isin(atend)]['Pop'].sum()/mun_ba['Pop'].sum()
```
### (4.3) Análise da Pandemia no NRS Sul:
**Núcleos Regionais de Saúde:**
```
nrs = gpd.read_file('NT02 - Bahia/Oferta Hospitalar/SESAB - NUCLEO REG SAUDE - 20190514 - SIRGAS2000.shp')
nrs = nrs.to_crs(CRS("WGS84"));
nrs.crs
mun_ba.crs == nrs.crs
nrs
mun_ba['NRS'] = 0
for i in list(nrs.index):
mun_ba.loc[mun_ba['geometry'].apply(lambda x: x.centroid.within(nrs.loc[i,'geometry'])),'NRS'] = nrs.loc[i,'NM_NRS']
mun_ba.plot(column = 'NRS');
plt.show();
```
População
```
for i in nrs['NM_NRS'].values:
print(i,mun_ba[mun_ba['NRS']==i]['Pop'].sum())
mun_ba['Qtd_Tot'].sum()
nrs.to_file('NT02 - Bahia/nrs.shp')
```
**Municípios com maior prevalência:**
```
fig, ax = plt.subplots(figsize=(10,10));
mun_ba.plot(ax = ax, column = 'prev');
plt.show();
# 20 maiores do Estado:
mun_ba.sort_values(by='prev', ascending = False)[['GEOCODIGO','NOME','Pop','prev','NRS']][:20]
# Quantidade de municípios no NRS Sul que já possuem casos confirmados até 24/04/2020
len(mun_ba[(mun_ba['NRS']=='Sul') & (mun_ba['c20200424']>0)])
# 10 maiores da Região Sul:
mun_ba[mun_ba['NRS']=='Sul'].sort_values(by='prev', ascending = False)[['GEOCODIGO','NOME','prev']][:14]
```
### (4.4) Oferta Hospitalar no NRS Sul
**Leitos convencionais:**
```
leitos = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/leitos.xlsx')
leitos.info()
leitos.head(2)
```
**Leitos complementares:**
```
leitos_c = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/leitos_comp.xlsx')
leitos_c.info()
leitos_c.head(2)
```
**Leitos adicionados pós COVID:**
```
leitos_add = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/leitos_add.xlsx')
leitos_add.info()
leitos_add.head(2)
```
**Respiradores:**
```
resp = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/respiradores.xlsx')
resp.info()
resp.head(2)
```
**Profissionais:**
```
prof = pd.read_excel('NT02 - Bahia/Oferta Hospitalar/profissionais.xlsx')
prof.info()
prof.head(2)
```
**Adicionando à `mun_ba`:**
```
mun_ba['L_Clin'] = 0
mun_ba['L_UTI_Adu'] = 0
mun_ba['L_UTI_Ped'] = 0
mun_ba['L_CInt_Adu'] = 0
mun_ba['L_CInt_Ped'] = 0
mun_ba['LA_Clin'] = 0
mun_ba['LA_UTI_Adu'] = 0
mun_ba['Resp'] = 0
mun_ba['M_Pneumo'] = 0
mun_ba['M_Familia'] = 0
mun_ba['M_Intens'] = 0
mun_ba['Enferm'] = 0
mun_ba['Fisiot'] = 0
mun_ba['Nutric'] = 0
for i, row in mun_ba.iterrows():
try:
mun_ba.loc[i,'L_Clin'] = leitos[leitos['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Clínicos'].values[0]
except:
pass
try:
mun_ba.loc[i,'L_UTI_Adu'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI adulto I'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI adulto II'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI adulto III'].values[0]
except:
pass
try:
mun_ba.loc[i,'L_UTI_Ped'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI pediátrica I'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI pediátrica II'].values[0] + leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['UTI pediátrica III'].values[0]
except:
pass
try:
mun_ba.loc[i,'L_CInt_Adu'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Unidade de cuidados intermed adulto'].values[0]
except:
pass
try:
mun_ba.loc[i,'L_CInt_Ped'] = leitos_c[leitos_c['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Unidade de cuidados intermed pediatrico'].values[0]
except:
pass
try:
mun_ba.loc[i,'LA_Clin'] = leitos_add[leitos_add['MUNICIPIO']==row['NOME']]['L_Clin'].values[0]
except:
pass
try:
mun_ba.loc[i,'LA_UTI_Adu'] = leitos_add[leitos_add['MUNICIPIO']==row['NOME']]['L_UTI_Adu'].values[0]
except:
pass
try:
mun_ba.loc[i,'Resp'] = resp[resp['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Equipamentos_Existentes'].values[0]
except:
pass
try:
mun_ba.loc[i,'M_Pneumo'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Médico pneumologista'].values[0]
except:
pass
try:
mun_ba.loc[i,'M_Familia'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Médico da Família'].values[0]
except:
pass
try:
mun_ba.loc[i,'M_Intens'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Médico em Medicina Intensiva'].values[0]
except:
pass
try:
mun_ba.loc[i,'Enferm'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Enfermeiro'].values[0]
except:
pass
try:
mun_ba.loc[i,'Fisiot'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Fisioterapeuta'].values[0]
except:
pass
try:
mun_ba.loc[i,'Nutric'] = prof[prof['GEOCODIGO']==int(row['GEOCODIGO'][:-1])]['Nutricionista'].values[0]
except:
pass
mun_ba[mun_ba['NRS']=='Sul'].sort_values(by='prev', ascending = False)[['NOME','Pop','prev','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp','M_Pneumo','M_Intens','Fisiot','Enferm']][:14]
mun_ba.to_file('NT02 - Bahia/saude_mun_ba.shp')
```
### (4.5) Dinâmica do Fluxo de Internaçõe no NRS Sul
**(a) Recursos:**
```
#.isin(mun_ba[mun_ba['NRS']=='Sul']['NOME'].values)
nrs_rec = mun_ba[['NRS','Pop','L_Clin','L_UTI_Adu','L_UTI_Ped','L_CInt_Adu','L_CInt_Ped','LA_Clin','LA_UTI_Adu','Resp','M_Pneumo','M_Familia','M_Intens','Enferm','Fisiot','Nutric']].groupby(['NRS']).sum()
pd.DataFrame(zip(10000*nrs_rec['L_Clin']/nrs_rec['Pop'],10000*nrs_rec['L_UTI_Adu']/nrs_rec['Pop'],10000*nrs_rec['L_UTI_Ped']/nrs_rec['Pop'],
10000*nrs_rec['Resp']/nrs_rec['Pop'],10000*nrs_rec['M_Pneumo']/nrs_rec['Pop'],
10000*nrs_rec['M_Intens']/nrs_rec['Pop'],10000*nrs_rec['Fisiot']/nrs_rec['Pop'],
10000*nrs_rec['Enferm']/nrs_rec['Pop']),
index = (10000*nrs_rec['Enferm']/nrs_rec['Pop']).index, columns = ['L_Clin','L_UTI_Adu','L_UTI_Ped','Resp','M_Pneumo',
'M_Intens','Fisiot','Enferm'])
pd.DataFrame(zip(nrs_rec['L_UTI_Adu'],nrs_rec['Resp'],nrs_rec['M_Intens'],nrs_rec['Fisiot']),
index = (100000*nrs_rec['Enferm']/nrs_rec['Pop']).index, columns = ['L_UTI_Adu','Resp','M_Intens',
'Fisiot'])
```
**(b) Internações hospitalares:**
**Interdependência entre NRS's (Matriz OD):**
```
nrs_names = list(nrs['NM_NRS'].values)
nrs_OD = np.zeros([len(nrs_names),len(nrs_names)])
for i, nrs_o in enumerate(nrs_names):
muns_o = list(mun_ba[mun_ba['NRS']==nrs_o]['GEOCODIGO'].values)
for j, nrs_d in enumerate(nrs_names):
muns_d = list(mun_ba[mun_ba['NRS']==nrs_d]['GEOCODIGO'].values)
nrs_OD[i,j] = tab_OD[tab_OD['ORI_GC'].isin(muns_o) & tab_OD['DES_GC'].isin(muns_d)]['Qtd'].sum()
nrs_od_df = pd.DataFrame(nrs_OD, columns = nrs_names, index = nrs_names).astype(int)
nrs_od_df
from itertools import product
nrs_tab_od = pd.DataFrame(list(product(nrs_names,nrs_names)))
nrs_tab_od['flux'] = 0
nrs_tab_od.rename(columns={0:'ORI',1:'DES'}, inplace = True)
nrs_tab_od
for i, row in nrs_od_df.iterrows():
nrs_tab_od.loc[(nrs_tab_od['ORI']==i),'flux'] = list(row.values)
nrs_tab_od
nrs_tab_od.to_csv('NT02 - Bahia/nrs_tab_od.csv')
```
**P/ cada NRS:**
```
#Municípios de cada NRS
for i in list(nrs['NM_NRS'].values):
muns = list(mun_ba[mun_ba['NRS']==i]['NOME'].values)
muns_gc = list(mun_ba[mun_ba['NRS']==i]['GEOCODIGO'].values)
"NRS "+i+":"
"Total de internações: {}".format(tab_OD[tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum())
"Proporção de internações em relação ao total de internações do estado: {:.3f}".format(tab_OD[tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum()/tab_OD['Qtd'].sum())
"Total de internações de residentes do NRS realizadas no próprio NRS: {}".format(tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum())
"Razão entre internações de residentes do NRS atendidas no próprio NRS e o total de internações de residentes no NRS em todo o estado: {:.3f}".format(tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum() \
/ tab_OD[tab_OD['ORI_GC'].isin(muns_gc)]['Qtd'].sum())
"Total de internações no NRS de residentes fora do NRS: {}".format(tab_OD[~tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum())
"Proporção de internações no NRS de residentes fora do NRS em relação ao total de internações do NRS: {:.3f}".format(tab_OD[~tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum() \
/tab_OD[tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum())
```
**Dependência do NRS Leste:**
```
muns = [i for i in list(nrs['NM_NRS'].values) if i!='Leste']
for i in muns:
muns_gc = list(mun_ba[mun_ba['NRS']==i]['GEOCODIGO'].values)
muns_le = list(mun_ba[mun_ba['NRS']=='Leste']['GEOCODIGO'].values)
"Internações de residentes do {} = {}".format(i,tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_le)]['Qtd'].sum())
"Proporção dos atendimentos do NRS {} = {}".format(i,tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_le)]['Qtd'].sum() \
/tab_OD[tab_OD['ORI_GC'].isin(muns_gc) & tab_OD['DES_GC'].isin(muns_gc)]['Qtd'].sum())
```
**Análise do NRS Sul (maior qtd de casos acumulados):**
```
#Municípios do NRS Sul
mun_sul = list(mun_ba[mun_ba['NRS']=='Sul']['NOME'].values)
mun_sul_gc = list(mun_ba[mun_ba['NRS']=='Sul']['GEOCODIGO'].values)
# Todas as internações demandadas pelos municípios do NRS Sul
tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)
# Todas as internações demandadas pelos municípios do NRS Sul que foram atendidas no NRS Sul
tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)
# Todas as internações que foram atendidas no NRS Sul de municípios que não foram do NRS Sul
tab_OD[~tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)
# Total de internações na Bahia:
tab_OD['Qtd'].sum()
# Total de internações no NRS Sul:
tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum()
# Percentual de internações no NRS Sul em relação ao total de internações do estado:
tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum()/tab_OD['Qtd'].sum()
# Total de internações no NRS Sul de municípios dentro do NRS Sul:
tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum()
# Razão entre internações realizadas no NRS Sul e o total demandado no NRS Sul
tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum() \
/ tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc)]['Qtd'].sum()
# Total de internações no NRS Sul de municípios fora do NRS Sul:
tab_OD[~tab_OD['ORI_GC'].isin(mun_sul_gc) & tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum()
# Total de internações de residentes em municípios do NRS Sul realizadas fora do NRS Sul:
tab_OD[tab_OD['ORI_GC'].isin(mun_sul_gc) & ~tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum()
#Municípios que mais atenderam internações no NRS Sul:
tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)
# Percentual das internações nos 10 primeiros:
tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)[:10]['Qtd'].sum() \
/ tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)]['Qtd'].sum()
muns_10sul = list(map(str,tab_OD[tab_OD['DES_GC'].isin(mun_sul_gc)].sort_values(by='Qtd', ascending = False)[:10]['DES_GC'].values))
# Recursos materiais
mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['NOME','Pop','Qtd_Tot','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp']].sort_values(by = 'Qtd_Tot', ascending = False)
mun_ba[mun_ba['NOME'].isin(['Ilhéus','Itabuna','Jequié'])]['Pop'].sum() \
/mun_ba[mun_ba['NRS']=='Sul']['Pop'].sum()
mun_ba[mun_ba['NRS']=='Sul']['Pop'].sum()
# Recursos materiais de Itabuna, Ilhéus e Jequié em relação a todo NRS
mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['Qtd_Tot','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp']].sort_values(by = 'Qtd_Tot', ascending = False)[:3].sum() \
/ mun_ba[mun_ba['NRS']=='Sul'][['Qtd_Tot','L_Clin','LA_Clin','L_UTI_Adu','LA_UTI_Adu','Resp']].sum()
# Recursos humanos
mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['NOME','Qtd_Tot','M_Pneumo','M_Intens','Fisiot','Enferm']].sort_values(by = 'Qtd_Tot', ascending = False)
# Recursos humanos de Itabuna, Ilhéus e Jequié em relação a todo NRS
mun_ba[mun_ba['GEOCODIGO'].isin(muns_10sul)][['Qtd_Tot','M_Pneumo','M_Intens','Fisiot','Enferm']].sort_values(by = 'Qtd_Tot', ascending = False)[:3].sum() \
/ mun_ba[mun_ba['NRS']=='Sul'][['Qtd_Tot','M_Pneumo','M_Intens','Fisiot','Enferm']].sum()
```
### (4.6) Fluxo de Internações dos 10 municípios mais prevalentes do NRS Sul:
```
mun_sul = list(mun_ba[mun_ba['NRS']=='Sul'].sort_values(by='prev', ascending = False)['GEOCODIGO'].values)
for i in mun_sul[:10]:
orig = []
lst_orig = tab_OD[tab_OD['DES_GC']==i].sort_values(by = 'Qtd', ascending = False)['ORI_GC'].values
if len(lst_orig) == 0:
"{} não recebeu pacientes".format(mun_ba[mun_ba['GEOCODIGO']==i]['NOME'].values[0])
continue
for k, j in enumerate(lst_orig):
if k < len(lst_orig) - 1:
orig.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0])
else:
orig.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0])
print('Intenações com destino a ' + mun_ba[mun_ba['GEOCODIGO']==i]['NOME'].values[0] + ':')
qtd = tab_OD[tab_OD['DES_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd']
perc = qtd/tab_OD[tab_OD['DES_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd'].sum()
pd.DataFrame(zip(orig,qtd,perc), columns = ['Mun_orig','Qtd','Distr_perc'])
for i in mun_sul[:10]:
dest = []
lst_dest = tab_OD[tab_OD['ORI_GC']==i].sort_values(by = 'Qtd', ascending = False)['DES_GC'].values
if len(lst_dest) == 0:
continue
for k, j in enumerate(lst_dest):
if k < len(lst_dest) - 1:
dest.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0])
else:
dest.append(mun_ba[mun_ba['GEOCODIGO']==j]['NOME'].values[0])
print('Intenações com origem em ' + mun_ba[mun_ba['GEOCODIGO']==i]['NOME'].values[0] + ':')
qtd = tab_OD[tab_OD['ORI_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd']
perc = qtd/tab_OD[tab_OD['ORI_GC']==i].sort_values(by = 'Qtd', ascending = False)['Qtd'].sum()
pd.DataFrame(zip(dest,qtd,perc), columns = ['Mun_dest','Qtd','Distr_perc'])
```
| github_jupyter |
<a href="https://colab.research.google.com/github/arindamdeyofficial/Amazon_Review_Sentiment_Analysys/blob/main/Group_037_SEC_3_Assignment_2_Image_Captioning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Assignment 2 Set 5
Image Captioning
###Deep Learning (S1-21_DSECLZG524) - DL Group 037 - SEC-3
* Arindam Dey - 2020FC04251
* Kaushik Dubey - 2020FC04245
* Mohammad Attaullah - 2020FC04274
1. Import Libraries/Dataset (0 mark)
1. Import the required libraries
2. Check the GPU available (recommended- use free GPU provided by Google Colab)
```
import os
#COLAB_GPU
#print(os.environ )
isCollab = os.getenv('COLAB_GPU', False) and os.getenv('OS', True)
print('Collab' if isCollab else 'Local')
#libraries
import numpy as np
import pandas as pd
import random
# folder
import os
# Imports packages to view data
#pip install opencv-python
#pip install opencv-contrib-python
import cv2
#pip install glob2
from glob2 import glob
#pip install matplotlib
import matplotlib.pyplot as plt
from PIL import Image
#below only works in collab as it doesn't support imshow() directly in Google Collab
if isCollab:
from google.colab.patches import cv2_imshow
#pip install prettytable
from prettytable import PrettyTable
# visu
import matplotlib.pyplot as plt
%matplotlib inline
#pip install seaborn
import seaborn as sns
plt.rc('image', cmap='gray')
# sklearn
#pip install scikit-learn
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
#tensorflow and keras
#pip install tensorflow
#pip install keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, GRU, Embedding
from tensorflow.keras.applications import VGG16
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
#google drive
#doesn't work in local
import pickle
if isCollab:
from google.colab import drive
drive.mount('/content/drive')
import sklearn.metrics as metrics
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import classification_report
print(tf.__version__)
isCollab
```
2. Data Processing(1 mark)
### Read the pickle file
```
if isCollab:
drivemasterpath = '/content/drive/My Drive/Colab Notebooks/AutoImageCaptioning'
else:
drivemasterpath = 'D:/OneDrive/Certification/Bits Pilani Data Science/3rd Sem/Deep Learning (S1-21_DSECLZG524)/Assignment 2'
imgDatasetPath = drivemasterpath+"/Flicker8k_Dataset"
pklFilePath = drivemasterpath+'/set_0.pkl'
print(imgDatasetPath,pklFilePath)
infile = open(pklFilePath,'rb')
best_model = pickle.load(infile)
#keep dataobj into file
#import pickle
# dump : put the data of the object in a file
#pickle.dump(obj, open(file_path, "wb"))
# dumps : return the object in bytes
#data = pickle.dump(obj)
```
### Plot at least two samples and their captions (use matplotlib/seaborn/any other library).
```
pics = os.listdir(imgDatasetPath)[25:30] # for 5 images we are showing
pic_address = [imgDatasetPath + '/' + pic for pic in pics]
pic_address
for i in range(0,5):
# Load the images
norm_img = Image.open(pic_address[i])
#Let's plt these images
## plot normal picture
f = plt.figure(figsize= (10,6))
a1 = f.add_subplot(1,2,1)
img_plot = plt.imshow(norm_img)
a1.set_title(f'Normal {pics[i]}')
def load_image(path, size=None):
"""
Load the image from the given file-path and resize it
to the given size if not None.
"""
# Load the image using PIL.
img = Image.open(path)
# Resize image if desired.
if not size is None:
img = img.resize(size=size, resample=Image.LANCZOS)
# Convert image to numpy array.
img = np.array(img)
# Scale image-pixels so they fall between 0.0 and 1.0
img = img / 255.0
# Convert 2-dim gray-scale array to 3-dim RGB array.
if (len(img.shape) == 2):
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
return img
def show_image(idx, train):
"""
Load and plot an image from the training- or validation-set
with the given index.
"""
if train:
# Use an image from the training-set.
dir = coco.train_dir
filename = filenames_train[idx]
captions = captions_train[idx]
else:
# Use an image from the validation-set.
dir = coco.val_dir
filename = filenames_val[idx]
captions = captions_val[idx]
# Path for the image-file.
path = os.path.join(dir, filename)
# Print the captions for this image.
for caption in captions:
print(caption)
# Load the image and plot it.
img = load_image(path)
plt.imshow(img)
plt.show()
```
3. Model Building (4 mark)
1. Use Pretrained VGG-16 model trained on ImageNet dataset (available publicly on google) for image feature extraction.
2. Create 3 layered LSTM layer model and other relevant layers for image caption generation.
3. Add L2 regularization to all the LSTM layers.
4. Add one layer of dropout at the appropriate position and give reasons.
5. Choose the appropriate activation function for all the layers.
6. Print the model summary.
Use Pretrained VGG-16 model trained on ImageNet dataset (available publicly on google) for image feature extraction.
VGG16 is a convolution neural net (CNN ) architecture which was used to win ILSVR(Imagenet) competition in 2014. It is considered to be one of the excellent vision model architecture till date. Most unique thing about VGG16 is that instead of having a large number of hyper-parameter they focused on having convolution layers of 3x3 filter with a stride 1 and always used same padding and maxpool layer of 2x2 filter of stride 2. It follows this arrangement of convolution and max pool layers consistently throughout the whole architecture. In the end it has 2 FC(fully connected layers) followed by a softmax for output. The 16 in VGG16 refers to it has 16 layers that have weights. This network is a pretty large network and it has about 138 million (approx) parameters.

Pre-Trained Image Model (VGG16)
The following creates an instance of the VGG16 model using the Keras API. This automatically downloads the required files if you don't have them already.
The VGG16 model was pre-trained on the ImageNet data-set for classifying images. The VGG16 model contains a convolutional part and a fully-connected (or dense) part which is used for the image classification.
If include_top=True then the whole VGG16 model is downloaded which is about 528 MB. If include_top=False then only the convolutional part of the VGG16 model is downloaded which is just 57 MB.
We will use some of the fully-connected layers in this pre-trained model, so we have to download the full model, but if you have a slow internet connection, then you can try and modify the code below to use the smaller pre-trained model without the classification layers.
```
image_model = VGG16(include_top=True, weights='imagenet')
image_model.summary()
transfer_layer = image_model.get_layer('fc2')
image_model_transfer = Model(inputs=image_model.input,
outputs=transfer_layer.output)
```
The model expects input images to be of this size:
```
img_size = K.int_shape(image_model.input)[1:3]
img_size
transfer_values_size = K.int_shape(transfer_layer.output)[1]
transfer_values_size
```
Process All Images
We now make functions for processing all images in the data-set using the pre-trained image-model and saving the transfer-values in a cache-file so they can be reloaded quickly.
We effectively create a new data-set of the transfer-values. This is because it takes a long time to process an image in the VGG16 model. We will not be changing all the parameters of the VGG16 model, so every time it processes an image, it gives the exact same result. We need the transfer-values to train the image-captioning model for many epochs, so we save a lot of time by calculating the transfer-values once and saving them in a cache-file.
This is a helper-function for printing the progress.
```
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="data",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="test", target_size=(224,224))
model = Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
def print_progress(count, max_count):
# Percentage completion.
pct_complete = count / max_count
# Status-message. Note the \r which means the line should
# overwrite itself.
msg = "\r- Progress: {0:.1%}".format(pct_complete)
# Print it.
sys.stdout.write(msg)
sys.stdout.flush()
```
This is the function for processing the given files using the VGG16-model and returning their transfer-values.
```
def process_images(data_dir, filenames, batch_size=32):
"""
Process all the given files in the given data_dir using the
pre-trained image-model and return their transfer-values.
Note that we process the images in batches to save
memory and improve efficiency on the GPU.
"""
# Number of images to process.
num_images = len(filenames)
# Pre-allocate input-batch-array for images.
shape = (batch_size,) + img_size + (3,)
image_batch = np.zeros(shape=shape, dtype=np.float16)
# Pre-allocate output-array for transfer-values.
# Note that we use 16-bit floating-points to save memory.
shape = (num_images, transfer_values_size)
transfer_values = np.zeros(shape=shape, dtype=np.float16)
# Initialize index into the filenames.
start_index = 0
# Process batches of image-files.
while start_index < num_images:
# Print the percentage-progress.
print_progress(count=start_index, max_count=num_images)
# End-index for this batch.
end_index = start_index + batch_size
# Ensure end-index is within bounds.
if end_index > num_images:
end_index = num_images
# The last batch may have a different batch-size.
current_batch_size = end_index - start_index
# Load all the images in the batch.
for i, filename in enumerate(filenames[start_index:end_index]):
# Path for the image-file.
path = os.path.join(data_dir, filename)
# Load and resize the image.
# This returns the image as a numpy-array.
img = load_image(path, size=img_size)
# Save the image for later use.
image_batch[i] = img
# Use the pre-trained image-model to process the image.
# Note that the last batch may have a different size,
# so we only use the relevant images.
transfer_values_batch = \
image_model_transfer.predict(image_batch[0:current_batch_size])
# Save the transfer-values in the pre-allocated array.
transfer_values[start_index:end_index] = \
transfer_values_batch[0:current_batch_size]
# Increase the index for the next loop-iteration.
start_index = end_index
# Print newline.
print()
return transfer_values
```
Helper-function for processing all images in the training-set. This saves the transfer-values in a cache-file for fast reloading.
```
def process_images_train():
print("Processing {0} images in training-set ...".format(len(filenames_train)))
# Path for the cache-file.
cache_path = os.path.join(coco.data_dir,
"transfer_values_train.pkl")
# If the cache-file already exists then reload it,
# otherwise process all images and save their transfer-values
# to the cache-file so it can be reloaded quickly.
transfer_values = cache(cache_path=cache_path,
fn=process_images,
data_dir=coco.train_dir,
filenames=filenames_train)
return transfer_values
```
Helper-function for processing all images in the validation-set.
```
def process_images_val():
print("Processing {0} images in validation-set ...".format(len(filenames_val)))
# Path for the cache-file.
cache_path = os.path.join(coco.data_dir, "transfer_values_val.pkl")
# If the cache-file already exists then reload it,
# otherwise process all images and save their transfer-values
# to the cache-file so it can be reloaded quickly.
transfer_values = cache(cache_path=cache_path,
fn=process_images,
data_dir=coco.val_dir,
filenames=filenames_val)
return transfer_values
```
Process all images in the training-set and save the transfer-values to a cache-file. This took about 30 minutes to process on a GTX 1070 GPU.
```
%%time
transfer_values_train = process_images_train()
print("dtype:", transfer_values_train.dtype)
print("shape:", transfer_values_train.shape)
```
| github_jupyter |
```
#IMPORT SEMUA LIBARARY
#IMPORT LIBRARY PANDAS
import pandas as pd
#IMPORT LIBRARY UNTUK POSTGRE
from sqlalchemy import create_engine
import psycopg2
#IMPORT LIBRARY CHART
from matplotlib import pyplot as plt
from matplotlib import style
#IMPORT LIBRARY BASE PATH
import os
import io
#IMPORT LIBARARY PDF
from fpdf import FPDF
#IMPORT LIBARARY CHART KE BASE64
import base64
#IMPORT LIBARARY EXCEL
import xlsxwriter
#FUNGSI UNTUK MENGUPLOAD DATA DARI CSV KE POSTGRESQL
def uploadToPSQL(columns, table, filePath, engine):
#FUNGSI UNTUK MEMBACA CSV
df = pd.read_csv(
os.path.abspath(filePath),
names=columns,
keep_default_na=False
)
#APABILA ADA FIELD KOSONG DISINI DIFILTER
df.fillna('')
#MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN
del df['kategori']
del df['jenis']
del df['pengiriman']
del df['satuan']
#MEMINDAHKAN DATA DARI CSV KE POSTGRESQL
df.to_sql(
table,
engine,
if_exists='replace'
)
#DIHITUNG APABILA DATA YANG DIUPLOAD BERHASIL, MAKA AKAN MENGEMBALIKAN KELUARAN TRUE(BENAR) DAN SEBALIKNYA
if len(df) == 0:
return False
else:
return True
#FUNGSI UNTUK MEMBUAT CHART, DATA YANG DIAMBIL DARI DATABASE DENGAN MENGGUNAKAN ORDER DARI TANGGAL DAN JUGA LIMIT
#DISINI JUGA MEMANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF
def makeChart(host, username, password, db, port, table, judul, columns, filePath, name, subjudul, limit, negara, basePath):
#TEST KONEKSI DATABASE
try:
#KONEKSI KE DATABASE
connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=db)
cursor = connection.cursor()
#MENGAMBL DATA DARI TABLE YANG DIDEFINISIKAN DIBAWAH, DAN DIORDER DARI TANGGAL TERAKHIR
#BISA DITAMBAHKAN LIMIT SUPAYA DATA YANG DIAMBIL TIDAK TERLALU BANYAK DAN BERAT
postgreSQL_select_Query = "SELECT * FROM "+table+" ORDER BY tanggal ASC LIMIT " + str(limit)
cursor.execute(postgreSQL_select_Query)
mobile_records = cursor.fetchall()
uid = []
lengthx = []
lengthy = []
#MELAKUKAN LOOPING ATAU PERULANGAN DARI DATA YANG SUDAH DIAMBIL
#KEMUDIAN DATA TERSEBUT DITEMPELKAN KE VARIABLE DIATAS INI
for row in mobile_records:
uid.append(row[0])
lengthx.append(row[1])
if row[2] == "":
lengthy.append(float(0))
else:
lengthy.append(float(row[2]))
#FUNGSI UNTUK MEMBUAT CHART
#bar
style.use('ggplot')
fig, ax = plt.subplots()
#MASUKAN DATA ID DARI DATABASE, DAN JUGA DATA TANGGAL
ax.bar(uid, lengthy, align='center')
#UNTUK JUDUL CHARTNYA
ax.set_title(judul)
ax.set_ylabel('Total')
ax.set_xlabel('Tanggal')
ax.set_xticks(uid)
#TOTAL DATA YANG DIAMBIL DARI DATABASE, DIMASUKAN DISINI
ax.set_xticklabels((lengthx))
b = io.BytesIO()
#CHART DISIMPAN KE FORMAT PNG
plt.savefig(b, format='png', bbox_inches="tight")
#CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64
barChart = base64.b64encode(b.getvalue()).decode("utf-8").replace("\n", "")
#CHART DITAMPILKAN
plt.show()
#line
#MASUKAN DATA DARI DATABASE
plt.plot(lengthx, lengthy)
plt.xlabel('Tanggal')
plt.ylabel('Total')
#UNTUK JUDUL CHARTNYA
plt.title(judul)
plt.grid(True)
l = io.BytesIO()
#CHART DISIMPAN KE FORMAT PNG
plt.savefig(l, format='png', bbox_inches="tight")
#CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64
lineChart = base64.b64encode(l.getvalue()).decode("utf-8").replace("\n", "")
#CHART DITAMPILKAN
plt.show()
#pie
#UNTUK JUDUL CHARTNYA
plt.title(judul)
#MASUKAN DATA DARI DATABASE
plt.pie(lengthy, labels=lengthx, autopct='%1.1f%%',
shadow=True, startangle=180)
plt.axis('equal')
p = io.BytesIO()
#CHART DISIMPAN KE FORMAT PNG
plt.savefig(p, format='png', bbox_inches="tight")
#CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64
pieChart = base64.b64encode(p.getvalue()).decode("utf-8").replace("\n", "")
#CHART DITAMPILKAN
plt.show()
#MENGAMBIL DATA DARI CSV YANG DIGUNAKAN SEBAGAI HEADER DARI TABLE UNTUK EXCEL DAN JUGA PDF
header = pd.read_csv(
os.path.abspath(filePath),
names=columns,
keep_default_na=False
)
#MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN
header.fillna('')
del header['tanggal']
del header['total']
#MEMANGGIL FUNGSI EXCEL
makeExcel(mobile_records, header, name, limit, basePath)
#MEMANGGIL FUNGSI PDF
makePDF(mobile_records, header, judul, barChart, lineChart, pieChart, name, subjudul, limit, basePath)
#JIKA GAGAL KONEKSI KE DATABASE, MASUK KESINI UNTUK MENAMPILKAN ERRORNYA
except (Exception, psycopg2.Error) as error :
print (error)
#KONEKSI DITUTUP
finally:
if(connection):
cursor.close()
connection.close()
#FUNGSI MAKEEXCEL GUNANYA UNTUK MEMBUAT DATA YANG BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2
#PLUGIN YANG DIGUNAKAN ADALAH XLSXWRITER
def makeExcel(datarow, dataheader, name, limit, basePath):
#MEMBUAT FILE EXCEL
workbook = xlsxwriter.Workbook(basePath+'jupyter/BLOOMBERG/SektorEksternal/excel/'+name+'.xlsx')
#MENAMBAHKAN WORKSHEET PADA FILE EXCEL TERSEBUT
worksheet = workbook.add_worksheet('sheet1')
#SETINGAN AGAR DIBERIKAN BORDER DAN FONT MENJADI BOLD
row1 = workbook.add_format({'border': 2, 'bold': 1})
row2 = workbook.add_format({'border': 2})
#MENJADIKAN DATA MENJADI ARRAY
data=list(datarow)
isihead=list(dataheader.values)
header = []
body = []
#LOOPING ATAU PERULANGAN, KEMUDIAN DATA DITAMPUNG PADA VARIABLE DIATAS
for rowhead in dataheader:
header.append(str(rowhead))
for rowhead2 in datarow:
header.append(str(rowhead2[1]))
for rowbody in isihead[1]:
body.append(str(rowbody))
for rowbody2 in data:
body.append(str(rowbody2[2]))
#MEMASUKAN DATA DARI VARIABLE DIATAS KE DALAM COLUMN DAN ROW EXCEL
for col_num, data in enumerate(header):
worksheet.write(0, col_num, data, row1)
for col_num, data in enumerate(body):
worksheet.write(1, col_num, data, row2)
#FILE EXCEL DITUTUP
workbook.close()
#FUNGSI UNTUK MEMBUAT PDF YANG DATANYA BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2
#PLUGIN YANG DIGUNAKAN ADALAH FPDF
def makePDF(datarow, dataheader, judul, bar, line, pie, name, subjudul, lengthPDF, basePath):
#FUNGSI UNTUK MENGATUR UKURAN KERTAS, DISINI MENGGUNAKAN UKURAN A4 DENGAN POSISI LANDSCAPE
pdf = FPDF('L', 'mm', [210,297])
#MENAMBAHKAN HALAMAN PADA PDF
pdf.add_page()
#PENGATURAN UNTUK JARAK PADDING DAN JUGA UKURAN FONT
pdf.set_font('helvetica', 'B', 20.0)
pdf.set_xy(145.0, 15.0)
#MEMASUKAN JUDUL KE DALAM PDF
pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=judul, border=0)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_font('arial', '', 14.0)
pdf.set_xy(145.0, 25.0)
#MEMASUKAN SUB JUDUL KE PDF
pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=subjudul, border=0)
#MEMBUAT GARIS DI BAWAH SUB JUDUL
pdf.line(10.0, 30.0, 287.0, 30.0)
pdf.set_font('times', '', 10.0)
pdf.set_xy(17.0, 37.0)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_font('Times','',10.0)
#MENGAMBIL DATA HEADER PDF YANG SEBELUMNYA SUDAH DIDEFINISIKAN DIATAS
datahead=list(dataheader.values)
pdf.set_font('Times','B',12.0)
pdf.ln(0.5)
th1 = pdf.font_size
#MEMBUAT TABLE PADA PDF, DAN MENAMPILKAN DATA DARI VARIABLE YANG SUDAH DIKIRIM
pdf.cell(100, 2*th1, "Kategori", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][0], border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Jenis", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][1], border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Pengiriman", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][2], border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Satuan", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][3], border=1, align='C')
pdf.ln(2*th1)
#PENGATURAN PADDING
pdf.set_xy(17.0, 75.0)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_font('Times','B',11.0)
data=list(datarow)
epw = pdf.w - 2*pdf.l_margin
col_width = epw/(lengthPDF+1)
#PENGATURAN UNTUK JARAK PADDING
pdf.ln(0.5)
th = pdf.font_size
#MEMASUKAN DATA HEADER YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF
pdf.cell(50, 2*th, str("Negara"), border=1, align='C')
for row in data:
pdf.cell(40, 2*th, str(row[1]), border=1, align='C')
pdf.ln(2*th)
#MEMASUKAN DATA ISI YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF
pdf.set_font('Times','B',10.0)
pdf.set_font('Arial','',9)
pdf.cell(50, 2*th, negara, border=1, align='C')
for row in data:
pdf.cell(40, 2*th, str(row[2]), border=1, align='C')
pdf.ln(2*th)
#MENGAMBIL DATA CHART, KEMUDIAN CHART TERSEBUT DIJADIKAN PNG DAN DISIMPAN PADA DIRECTORY DIBAWAH INI
#BAR CHART
bardata = base64.b64decode(bar)
barname = basePath+'jupyter/BLOOMBERG/SektorEksternal/img/'+name+'-bar.png'
with open(barname, 'wb') as f:
f.write(bardata)
#LINE CHART
linedata = base64.b64decode(line)
linename = basePath+'jupyter/BLOOMBERG/SektorEksternal/img/'+name+'-line.png'
with open(linename, 'wb') as f:
f.write(linedata)
#PIE CHART
piedata = base64.b64decode(pie)
piename = basePath+'jupyter/BLOOMBERG/SektorEksternal/img/'+name+'-pie.png'
with open(piename, 'wb') as f:
f.write(piedata)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_xy(17.0, 75.0)
col = pdf.w - 2*pdf.l_margin
widthcol = col/3
#MEMANGGIL DATA GAMBAR DARI DIREKTORY DIATAS
pdf.image(barname, link='', type='',x=8, y=100, w=widthcol)
pdf.set_xy(17.0, 75.0)
col = pdf.w - 2*pdf.l_margin
pdf.image(linename, link='', type='',x=103, y=100, w=widthcol)
pdf.set_xy(17.0, 75.0)
col = pdf.w - 2*pdf.l_margin
pdf.image(piename, link='', type='',x=195, y=100, w=widthcol)
pdf.ln(2*th)
#MEMBUAT FILE PDF
pdf.output(basePath+'jupyter/BLOOMBERG/SektorEksternal/pdf/'+name+'.pdf', 'F')
#DISINI TEMPAT AWAL UNTUK MENDEFINISIKAN VARIABEL VARIABEL SEBELUM NANTINYA DIKIRIM KE FUNGSI
#PERTAMA MANGGIL FUNGSI UPLOADTOPSQL DULU, KALAU SUKSES BARU MANGGIL FUNGSI MAKECHART
#DAN DI MAKECHART MANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF
#DEFINISIKAN COLUMN BERDASARKAN FIELD CSV
columns = [
"kategori",
"jenis",
"tanggal",
"total",
"pengiriman",
"satuan",
]
#UNTUK NAMA FILE
name = "SektorEksternal2_1"
#VARIABLE UNTUK KONEKSI KE DATABASE
host = "localhost"
username = "postgres"
password = "1234567890"
port = "5432"
database = "bloomberg_sektoreksternal"
table = name.lower()
#JUDUL PADA PDF DAN EXCEL
judul = "Data Sektor Eksternal"
subjudul = "Badan Perencanaan Pembangunan Nasional"
#LIMIT DATA UNTUK SELECT DI DATABASE
limitdata = int(8)
#NAMA NEGARA UNTUK DITAMPILKAN DI EXCEL DAN PDF
negara = "Indonesia"
#BASE PATH DIRECTORY
basePath = 'C:/Users/ASUS/Documents/bappenas/'
#FILE CSV
filePath = basePath+ 'data mentah/BLOOMBERG/SektorEksternal/' +name+'.csv';
#KONEKSI KE DATABASE
engine = create_engine('postgresql://'+username+':'+password+'@'+host+':'+port+'/'+database)
#MEMANGGIL FUNGSI UPLOAD TO PSQL
checkUpload = uploadToPSQL(columns, table, filePath, engine)
#MENGECEK FUNGSI DARI UPLOAD PSQL, JIKA BERHASIL LANJUT MEMBUAT FUNGSI CHART, JIKA GAGAL AKAN MENAMPILKAN PESAN ERROR
if checkUpload == True:
makeChart(host, username, password, database, port, table, judul, columns, filePath, name, subjudul, limitdata, negara, basePath)
else:
print("Error When Upload CSV")
```
| github_jupyter |
```
import numpy as np
import pandas as pd
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
#src_name = ["Results1-5-116.csv", "Results1-54-109-20.csv", "Results2-125-215-20.csv", "Results2-160-210.csv",
# "Results3-1-74-20.csv", "Results3-75-120.csv", "Results4-60-100.csv", "Results4-248-370-20.csv",
# "Results5-1-100-20.csv", "Results6-380-485-20.csv", "Results7-250-310-20.csv", "Results8-1-105-20.csv",
# "Results9-464-555-20.csv", "Results10-665-733-20.csv", "Results11-249-315-20.csv"]
# 20 points = the anterior edges of T3, and A1-A9
src_name = ["Results1-54-109-20.csv", "Results2-125-215-20.csv", "Results3-1-74-20.csv", "Results4-248-370-20.csv",
"Results5-1-100-20.csv", "Results6-380-485-20.csv", "Results7-250-310-20.csv", "Results8-1-105-20.csv",
"Results9-464-555-20.csv", "Results10-665-733-20.csv", "Results11-249-315-20.csv"]
src = []
for elem in src_name:
src.append(pd.read_csv(src_path + elem))
print("Flie number:", len(src))
print("Frames:")
for i in range(len(src)):
print("file{0:2d}: {1:d}".format(i, int(len(src[i])/20)))
print(src[0].iloc[0])
print(src[0].iloc[0,1])
print(src[0].iloc[0,2])
print(src[0].iloc[18,1])
print(src[0].iloc[18,2])
# xy coordinates of all
xy_all = []
label_num = 20
for src_dat in src:
xy = []
if len(src_dat)%label_num != 0:
print("Invalid data.")
else:
for frame in range(len(src_dat)//label_num):
xy0 = []
for segment in range(label_num//2):
xy00 = []
xy00_LR = []
xy00_LR.append([src_dat.iloc[frame*label_num + segment*2, 1],
src_dat.iloc[frame*label_num + segment*2, 2]] )
xy00_LR.append([src_dat.iloc[frame*label_num + segment*2+1, 1],
src_dat.iloc[frame*label_num + segment*2+1, 2]] )
xy0.append(xy00_LR)
xy.append(xy0)
xy = np.array(xy)
xy_all.append(xy)
print("file:", len(xy_all))
print("frames:", len(xy_all[0]))
print("segments:", len(xy_all[0][0]))
print("LR:", len(xy_all[0][0][0]))
print("xy:", len(xy_all[0][0][0][0]))
print("shape of xy_all[0]:", xy_all[0].shape)
import matplotlib.pyplot as plt
file = 0
seg = 0 # 0: A9, 9: T3
LR = 0 # 0: right, 1: left
plt.plot(xy_all[0][:,seg,LR,0], xy_all[0][:,seg,LR,1])
plt.plot(xy_all[0][:,seg,LR+1,0], xy_all[0][:,seg,LR+1,1])
plt.plot(xy_all[0][:,seg+9,LR,0], xy_all[0][:,seg+9,LR,1])
plt.plot(xy_all[0][:,seg+9,LR+1,0], xy_all[0][:,seg+9,LR+1,1])
plt.show()
frame = 0
print("seg0_Right")
print("x:", xy_all[0][frame,seg,LR,0])
print("y:", xy_all[0][frame,seg,LR,1])
print("seg0_Left")
print("x:", xy_all[0][frame,seg,LR+1,0])
print("y:", xy_all[0][frame,seg,LR+1,1])
seg0_mid_x = (xy_all[0][frame,seg,LR,0] + xy_all[0][frame,seg,LR+1,0])/2
seg0_mid_y = (xy_all[0][frame,seg,LR,1] + xy_all[0][frame,seg,LR+1,1])/2
print("seg9_Right")
print("x:", xy_all[0][frame,seg+9,LR,0])
print("y:", xy_all[0][frame,seg+9,LR,1])
print("seg9_Left")
print("x:", xy_all[0][frame,seg+9,LR+1,0])
print("y:", xy_all[0][frame,seg+9,LR+1,1])
seg9_mid_x = (xy_all[0][frame,seg+9,LR,0] + xy_all[0][frame,seg+9,LR+1,0])/2
seg9_mid_y = (xy_all[0][frame,seg+9,LR,1] + xy_all[0][frame,seg+9,LR+1,1])/2
mm_per_pixel = 0.011
v0 = np.array([seg0_mid_x, seg0_mid_y])
v1 = np.array([seg9_mid_x, seg9_mid_y])
print(v0)
print(v1)
d = np.linalg.norm(v0-v1)
print("Distance between seg0_mid and seg9_mid, pixel:", d, "mm:", d*mm_per_pixel)
xy_all_mid = []
for i in range(len(xy_all)):
xy_mid0 = []
for frame in range(len(xy_all[i])):
xy_mid00 = []
for seg in range(len(xy_all[i][0])):
midx = (xy_all[i][frame,seg,0,0] + xy_all[i][frame,seg,1,0])/2
midy = (xy_all[i][frame,seg,0,1] + xy_all[i][frame,seg,1,1])/2
xy_mid00.append([midx, midy])
xy_mid0.append(xy_mid00)
xy_mid0 = np.array(xy_mid0)
xy_all_mid.append(xy_mid0)
print("file:", len(xy_all_mid))
print("xy_all_mid[0].shape (frame, seg, xy):", xy_all_mid[0].shape)
initial_disp_all = []
for file_id in range(len(xy_all_mid)):
initial_disp = []
dat = xy_all_mid[file_id]
for seg in range(10):
v0 = dat[0,0,:]
v1 = dat[0,seg,:]
initial_disp.append(np.linalg.norm(v0-v1)*mm_per_pixel)
initial_disp_all.append(initial_disp)
initial_disp_all = np.array(initial_disp_all)
print(initial_disp_all[:,-1])
i = 0
for elm in range(10):
plt.plot(xy_all_mid[i][:,elm,0], xy_all_mid[i][:,elm,1])
plt.title(src_name[i])
plt.xlabel("x axis (pixel)")
plt.ylabel("y axis (pixel)")
plt.show()
for i in range(len(xy_all_mid)):
for elm in range(10):
plt.plot(xy_all_mid[i][:,elm,0], xy_all_mid[i][:,elm,1])
plt.title(src_name[i])
plt.xlabel("x axis (pixel)")
plt.ylabel("y axis (pixel)")
plt.savefig(src_path + "img/201102_midpoint_plot_" + src_name[i] + ".png")
plt.close()
print("file:", len(xy_all_mid))
print("xy_all_mid[0].shape (frame, seg, xy):", xy_all_mid[0].shape)
# constants
mm_per_pixel = 0.011
sec_per_frame = 0.03333
initial_disp_all = []
disp_rel_all = []
disp_abs_all = []
seg_len_all = []
body_len_all = []
for file_id in range(len(xy_all_mid)):
# initial position
initial_disp = []
dat = xy_all_mid[file_id]
for seg in range(10):
v0 = dat[0,0,:]
v1 = dat[0,seg,:]
initial_disp.append(np.linalg.norm(v0-v1)*mm_per_pixel)
initial_disp_all.append(initial_disp)
# displacement_rel
disp_rel = []
dat = xy_all_mid[file_id]
for seg in range(10):
disp_seg = []
for frame in range(len(dat)):
t = frame * sec_per_frame
v0 = dat[0,seg,:]
v1 = dat[frame,seg,:]
disp_seg.append([t, np.linalg.norm(v0-v1)*mm_per_pixel])
disp_rel.append(disp_seg)
disp_rel = np.array(disp_rel)
disp_rel_all.append(disp_rel)
# displacement_abs
disp_abs = []
for seg in range(10):
disp_abs0 = []
for frame in range(len(disp_rel[0])):
t = disp_rel[seg,frame,0]
disp_abs00 = disp_rel[seg,frame,1] + initial_disp[seg]
disp_abs0.append([t, disp_abs00])
disp_abs.append(disp_abs0)
disp_abs = np.array(disp_abs)
disp_abs_all.append(disp_abs)
# segment length
seg_len = []
dat = xy_all_mid[file_id]
for seg in range(9):
seg_len0 = []
for frame in range(len(dat)):
t = frame * sec_per_frame
v0 = dat[frame,seg,:]
v1 = dat[frame,seg+1,:]
seg_len0.append([t, np.linalg.norm(v0-v1)*mm_per_pixel])
seg_len.append(seg_len0)
seg_len = np.array(seg_len)
seg_len_all.append(seg_len)
# body length
body_len = []
dat = xy_all_mid[file_id]
for frame in range(len(dat)):
t = frame * sec_per_frame
v0 = dat[frame,0,:] # posterior end
v1 = dat[frame,9,:] # anterior end
body_len.append([t, np.linalg.norm(v0-v1)*mm_per_pixel])
body_len_all.append(np.array(body_len))
print("len(initial_disp_all):", len(initial_disp_all))
print("len(initial_disp_all[0]) (seg number):", len(initial_disp_all[0]))
print("len(disp_rel_all):", len(disp_rel_all))
print("disp_rel_all[0].shape:", disp_rel_all[0].shape)
print("len(disp_abs_all):", len(disp_abs_all))
print("disp_abs_all[0].shape:", disp_abs_all[0].shape)
print("len(seg_len_all):", len(seg_len_all))
print("seg_len_all[0].shape:", seg_len_all[0].shape)
print("len(body_len_all):", len(body_len_all))
print("body_len_all[0].shape:", body_len_all[0].shape)
print(initial_disp_all)
for file_id in range(11):
for seg in range(10):
plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1])
plt.title("Displacement of file {0}".format(src_name[file_id]))
plt.xlabel("Time (sec)")
plt.ylabel("Displacement (mm)")
plt.xlim([0,4.2])
plt.ylim([0,6.2])
plt.xticks([0,1,2,3,4])
plt.savefig(src_path + "img/201102_displacement_plot_" + src_name[file_id] + ".png")
plt.close()
file_id = 0
for seg in range(10):
plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1])
plt.title("Displacement of file {0}".format(src_name[file_id]))
plt.xlabel("Time (sec)")
plt.ylabel("Displacement (mm)")
plt.xlim([0,4.2])
plt.ylim([0,6.2])
plt.xticks([0,1,2,3,4])
plt.show()
for file_id in range(11):
plt.figure(figsize = (10,6))
for seg in range(9):
plt.plot(seg_len_all[file_id][seg,:,0], seg_len_all[file_id][seg,:,1])
plt.title("Segment length of file {0}".format(src_name[file_id]))
plt.xlabel("Time (sec)")
plt.ylabel("Segment length (mm)")
plt.xlim([0,4.2])
plt.ylim([0,0.6])
plt.xticks([0,1,2,3,4])
plt.savefig(src_path + "img/201102_segment_length_plot_" + src_name[file_id] + ".png")
plt.close()
file_id = 0
plt.figure(figsize = (10,6))
for seg in range(9):
plt.plot(seg_len_all[file_id][seg,:,0], seg_len_all[file_id][seg,:,1])
plt.title("Segment length of file {0}".format(src_name[file_id]))
plt.xlabel("Time (sec)")
plt.ylabel("Segment length (mm)")
plt.xlim([0,4.2])
plt.ylim([0,0.6])
plt.xticks([0,1,2,3,4])
plt.show()
import matplotlib.pyplot as plt
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
for file_id in range(len(body_len_all)):
plt.figure(figsize = (10,6))
plt.plot(body_len_all[file_id][:,0], body_len_all[file_id][:,1])
plt.title("Body length of file {0}".format(src_name[file_id]))
plt.xlabel("Time (sec)")
plt.ylabel("Segment length (mm)")
plt.xlim([0,4.2])
plt.ylim([2,4])
plt.xticks([0,1,2,3,4])
plt.savefig(src_path + "img/201104_body_length_plot_" + src_name[file_id] + ".png")
plt.close()
import matplotlib.pyplot as plt
file_id = 0
plt.figure(figsize = (10,6))
plt.plot(body_len_all[file_id][:,0], body_len_all[file_id][:,1])
plt.title("Body length of file {0}".format(src_name[file_id]))
plt.xlabel("Time (sec)")
plt.ylabel("Segment length (mm)")
plt.xlim([0,4.2])
plt.ylim([2,4])
plt.xticks([0,1,2,3,4])
plt.show()
```
# Parameter extraction
```
# Stride length and stride duration
print("len(disp_abs_all):", len(disp_abs_all))
print("disp_abs_all[0].shape:", disp_abs_all[0].shape)
import copy
from scipy import signal
disp_abs_all_savgol = copy.deepcopy(disp_abs_all)
file_id = 0
seg = 0
disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2)
plt.figure()
plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g')
plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m')
plt.show()
import copy
from scipy import signal
disp_abs_all_savgol = copy.deepcopy(disp_abs_all)
for file_id in range(len(disp_abs_all)):
savgol0 = []
for seg in range(len(disp_abs_all[0])):
disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2)
plt.figure()
plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g')
plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m')
plt.show()
import peakutils
from scipy.signal import argrelmax
xmin = 0
xmax = 6
bins = 120
width = (xmax-xmin)/bins
stride_all = []
for file_id in range(len(disp_abs_all)):
stride_seg = []
for seg in range(10):
stride_seg0 = []
hist_dat = np.histogram(disp_abs_all_savgol[file_id][seg,:,1], bins=120,range=(0,6))
#peaks = hist_dat[1][argrelmax(hist_dat[0], order=4)]
peaks_id = peakutils.indexes(hist_dat[0], thres=0.2, min_dist=5)
peaks_id = np.sort(peaks_id)
peaks = hist_dat[1][peaks_id]
for peak_id in range(len(peaks)):
dat0 = disp_abs_all[file_id][seg]
disp_peak = [dat0[i,1] for i in range(len(dat0))
if dat0[i,1] > peaks[peak_id] and dat0[i,1] < peaks[peak_id] + width]
time_peak = [dat0[i,0] for i in range(len(dat0))
if dat0[i,1] > peaks[peak_id] and dat0[i,1] < peaks[peak_id] + width]
disp_peak_med = np.median(disp_peak)
time_peak_med = np.median(time_peak)
stride_seg0.append([time_peak_med, disp_peak_med])
stride_seg.append(np.array(stride_seg0))
stride_all.append(stride_seg)
plt.figure()
for seg in range(10):
plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1])
plt.plot(stride_all[file_id][seg][:,0], stride_all[file_id][seg][:,1], 'o')
plt.title("Displacement of file {0}".format(src_name[file_id]))
plt.xlabel("Time (sec)")
plt.ylabel("Displacement (mm)")
plt.xlim([0,4.2])
plt.ylim([0,6.2])
plt.xticks([0,1,2,3,4])
plt.savefig(src_path + "img/201102_stride_length_detection_" + src_name[file_id] + ".png")
plt.close()
import pickle
with open(src_path + "pickle/initial_disp_all_201102.pickle", "wb") as f1:
pickle.dump(initial_disp_all, f1)
with open(src_path + "pickle/disp_rel_all_201102.pickle", "wb") as f2:
pickle.dump(disp_rel_all, f2)
with open(src_path + "pickle/disp_abs_all_201102.pickle", "wb") as f3:
pickle.dump(disp_abs_all, f3)
with open(src_path + "pickle/seg_len_all_201102.pickle", "wb") as f4:
pickle.dump(seg_len_all, f4)
with open(src_path + "pickle/stride_all_201102.pickle", "wb") as f5:
pickle.dump(stride_all, f5)
with open(src_path + "pickle/body_len_all_201104.pickle", "wb") as f6:
pickle.dump(body_len_all, f6)
print("len(initial_disp_all):", len(initial_disp_all))
print("len(initial_disp_all[0]) (seg number):", len(initial_disp_all[0]))
print("len(disp_rel_all):", len(disp_rel_all))
print("disp_rel_all[0].shape:", disp_rel_all[0].shape)
print("len(disp_abs_all):", len(disp_abs_all))
print("disp_abs_all[0].shape:", disp_abs_all[0].shape)
print("len(seg_len_all):", len(seg_len_all))
print("seg_len_all[0].shape:", seg_len_all[0].shape)
print("len(stride_all)(movie number):", len(stride_all))
print("len(stride_all[0])(seg number):", len(stride_all[0]))
print("len(stride_all[0][0])(peak number):", len(stride_all[0][0]))
print("len(stride_all[0][0][0])(time, displacement):", len(stride_all[0][0][0]))
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/stride_all_201102.pickle", "rb") as f5:
stride_all = pickle.load(f5)
import numpy as np
stride_length_all = []
for mov_id in range(len(stride_all)):
dst1 = []
for seg_id in range(10):
dat_stride = stride_all[mov_id][seg_id]
dst0 = []
for i in range(len(dat_stride)-1):
dst0.append(dat_stride[i+1,1]-dat_stride[i,1])
dst1.append(np.median(dst0))
stride_length_all.append(dst1)
print(stride_length_all)
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/stride_length_all_201104.pickle", "wb") as f7:
pickle.dump(stride_length_all, f7)
import numpy as np
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/stride_length_all_201104.pickle", "rb") as f6:
stride_length_all = np.array(pickle.load(f6))
print("stride_length_all.shape", stride_length_all.shape)
stride_len_med = []
for i in range(len(stride_length_all)):
stride_len_med.append(np.median(stride_length_all[i]))
print("median stride length of movie{0}: {1:3f}".format(i, np.median(stride_length_all[i])))
with open(src_path + "pickle/body_len_all_201104.pickle", "rb") as f6:
body_len_all = pickle.load(f6)
body_len_max = []
for file_id in range(len(body_len_all)):
body_len_max.append(body_len_all[file_id][:,1].max())
print("body_len_max:", body_len_max)
print("stride_length_med:", stride_len_med)
import matplotlib.pyplot as plt
from scipy import stats
plt.plot(body_len_max, stride_len_med, 'go')
plt.xlim([2,5])
plt.xlabel("Body length (mm)")
plt.ylim([0.5,1.0])
plt.ylabel("Stride length (mm)")
plt.show()
print("Body length average (mm):{0:4.2f}±{1:4.2f}".format(np.mean(body_len_max), stats.sem(body_len_max)))
print("Stride length average (mm):{0:4.2f}±{1:4.2f}".format(np.mean(stride_len_med), stats.sem(stride_len_med)))
print("len(seg_len_all):", len(seg_len_all))
print("seg_len_all[0].shape: (seg, frame, time/length)", seg_len_all[0].shape)
import copy
import matplotlib.pyplot as plt
import peakutils
from scipy import signal
seg_len_savgol = []
seg_len_peaks = []
for file_id in range(len(seg_len_all)):
seg_len_savgol0 = []
seg_len_peaks0 = []
for seg in range(len(seg_len_all[file_id])):
dat = seg_len_all[file_id][seg]
dat_savgol = copy.deepcopy(dat)
dat_savgol[:,1] = signal.savgol_filter(dat[:,1],11,2)
peaks_id_p = peakutils.indexes(dat_savgol[:,1], thres=0.2, min_dist=20)
peaks_id_n = peakutils.indexes(-dat_savgol[:,1], thres=0.2, min_dist=20)
seg_len_savgol0.append(dat_savgol)
seg_len_peaks0.append([peaks_id_p, peaks_id_n])
seg_len_savgol.append(seg_len_savgol0)
seg_len_peaks.append(seg_len_peaks0)
file_id = 0
seg = 0
dat_src = seg_len_all[file_id][seg]
dat_sav = seg_len_savgol[file_id][seg]
dat_peaks = seg_len_peaks[file_id][seg]
plt.plot(dat_src[:,0], dat_src[:,1])
plt.plot(dat_sav[:,0], dat_sav[:,1])
plt.plot(dat_sav[dat_peaks[0],0], dat_sav[dat_peaks[0],1], 'go')
plt.plot(dat_sav[dat_peaks[1],0], dat_sav[dat_peaks[1],1], 'mo')
plt.savefig(src_path + "img/201104_segment_length_{0}_seg{1}.png".format(src_name[file_id], seg))
plt.show()
seg_len_range_all = []
for file_id in range(len(seg_len_all)):
dst = []
for seg in range(len(seg_len_all[file_id])):
dat_src = seg_len_all[file_id][seg]
dat_sav = seg_len_savgol[file_id][seg]
dat_peaks = seg_len_peaks[file_id][seg]
dst_p = [dat_sav[dat_peaks[0],0], dat_sav[dat_peaks[0],1]]
dst_n = [dat_sav[dat_peaks[1],0], dat_sav[dat_peaks[1],1]]
dst.append([dst_p, dst_n])
plt.plot(dat_src[:,0], dat_src[:,1])
plt.plot(dat_sav[:,0], dat_sav[:,1])
plt.plot(dat_sav[dat_peaks[0],0], dat_sav[dat_peaks[0],1], 'go')
plt.plot(dat_sav[dat_peaks[1],0], dat_sav[dat_peaks[1],1], 'mo')
plt.savefig(src_path + "img/201104_segment_length_{0}_seg{1}.png".format(src_name[file_id], seg))
plt.close()
seg_len_range_all.append(dst)
import pickle
with open(src_path + "pickle/seg_len_range_all_201104.pickle", "wb") as f:
pickle.dump(seg_len_range_all, f)
import pickle
with open(src_path + "pickle/seg_len_range_all_201104.pickle", "rb") as f:
seg_len_range_all = pickle.load(f)
print("len(seg_len_range_all) (file_id):", len(seg_len_range_all))
print("len(seg_len_range_all[0])(seg):", len(seg_len_range_all[0]))
print("len(seg_len_range_all[0][0])(peak/valley)", len(seg_len_range_all[0][0]))
print("len(seg_len_range_all[0][0][0])(time/length)", len(seg_len_range_all[0][0][0]))
file_id = 0
seg_id = 0
peak = 0
valley = 1
print("seg_len_range_all[file_id][seg][peak]:(time/length)", seg_len_range_all[file_id][seg_id][peak])
print("seg_len_range_all[file_id][seg][valley]:(time/length)", seg_len_range_all[file_id][seg_id][valley])
import numpy as np
import peakutils
# signal:
seg0 = 0
seg1 = 4
sig0 = seg_len_savgol[0][seg0][:,1]
sig1 = seg_len_savgol[0][seg1][:,1]
# centralization
sig0 = sig0 - sig0.mean()
sig1 = sig1 - sig1.mean()
corr = np.correlate(sig1, sig0, "full")
peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20)
estimated_delay = peaks_id[0]
print("estimated delay is {}".format(estimated_delay))
print(peaks_id)
fig, ax = plt.subplots(2,1, figsize = (10,8))
ax[0].plot(sig0, label="sig0")
ax[0].plot(sig1, label="sig1")
ax[0].legend()
ax[1].set_ylabel("corr")
ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr)
ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro')
ax[1].set_xlim([0, len(sig1)])
plt.show()
print(len(corr))
import numpy as np
import peakutils
fig_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/img/correlation/"
# segmental delay
seg_len_delay_all = []
for file_id in range(len(seg_len_savgol)):
dst0 = []
for seg_id in range(len(seg_len_savgol[file_id])-1):
sig0 = seg_len_savgol[file_id][seg_id][:,1]
sig1 = seg_len_savgol[file_id][seg_id+1][:,1]
# centralization
sig0 = sig0 - sig0.mean()
sig1 = sig1 - sig1.mean()
corr = np.correlate(sig1, sig0, "full")
t_margin = 2
peaks_id = peakutils.indexes(corr[len(corr)-len(sig0)-t_margin:], thres=0.2, min_dist=20)
peaks_id = peaks_id - t_margin
estimated_delay = peaks_id[0]
dst0.append(estimated_delay)
fig, ax = plt.subplots(2,1, figsize = (10,8))
ax[0].plot(sig0, label="sig0")
ax[0].plot(sig1, label="sig1")
ax[0].legend()
ax[1].set_ylabel("corr")
ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr)
ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro')
ax[1].set_xlim([0, len(sig1)])
plt.savefig(fig_path + "intersegmental_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id))
plt.close()
seg_len_delay_all.append(dst0)
# stride duration
stride_duration_all = []
for file_id in range(len(seg_len_savgol)):
dst0 = []
for seg_id in range(len(seg_len_savgol[file_id])):
sig0 = seg_len_savgol[file_id][seg_id][:,1]
sig1 = seg_len_savgol[file_id][seg_id][:,1]
# centralization
sig0 = sig0 - sig0.mean()
sig1 = sig1 - sig1.mean()
corr = np.correlate(sig1, sig0, "full")
peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20)
estimated_delay = peaks_id[0]
dst0.append(estimated_delay)
fig, ax = plt.subplots(2,1, figsize = (10,8))
ax[0].plot(sig0, label="sig0")
ax[0].plot(sig1, label="sig1")
ax[0].legend()
ax[1].set_ylabel("corr")
ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr)
ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro')
ax[1].set_xlim([0, len(sig1)])
plt.savefig(fig_path + "auto_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id))
plt.close()
stride_duration_all.append(dst0)
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/seg_len_delay_all_201104.pickle", "wb") as f8:
pickle.dump(seg_len_delay_all, f8)
with open(src_path + "pickle/stride_duration_all_201104.pickle", "wb") as f9:
pickle.dump(stride_duration_all, f9)
import pickle
with open(src_path + "pickle/seg_len_delay_all_201104.pickle", "rb") as f8:
seg_len_delay_all = pickle.load(f8)
with open(src_path + "pickle/stride_duration_all_201104.pickle", "rb") as f9:
stride_duration_all = pickle.load(f9)
print("From cross-correlation")
print("len(seg_len_delay_all):", len(seg_len_delay_all))
print("len(seg_len_delay_all[0])(seg):", len(seg_len_delay_all[0]))
print("seg_len_delay_all[0]:", seg_len_delay_all[0])
print("From auto-correlation")
print("len(stride_duration_all):", len(stride_duration_all))
print("len(stride_duration_all[0])(seg):", len(stride_duration_all[0]))
print("stride_duration_all[0]:", stride_duration_all[0])
# boundary stride duration 201119
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/disp_abs_all_201102.pickle", "rb") as f:
disp_abs_all = pickle.load(f)
import copy
from scipy import signal
disp_abs_all_savgol = copy.deepcopy(disp_abs_all)
for file_id in range(len(disp_abs_all)):
savgol0 = []
for seg in range(len(disp_abs_all[0])):
disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2)
import matplotlib.pyplot as plt
file_id = 0
seg = 0
plt.figure()
plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g')
plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m')
plt.show()
import numpy as np
diff = np.diff(disp_abs_all_savgol[file_id][seg,:,1])
plt.plot(diff)
plt.show()
import numpy as np
import peakutils
# signal:
sig0 = diff
sig1 = diff
# centralization
sig0 = sig0 - sig0.mean()
sig1 = sig1 - sig1.mean()
corr = np.correlate(sig1, sig0, "full")
peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20)
estimated_delay = peaks_id[0]
print("estimated delay is {}".format(estimated_delay))
print(peaks_id)
fig, ax = plt.subplots(2,1, figsize = (10,8))
ax[0].plot(sig0, label="sig0")
ax[0].plot(sig1, label="sig1")
ax[0].legend()
ax[1].set_ylabel("corr")
ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr)
ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro')
ax[1].set_xlim([0, len(sig1)])
plt.show()
print(len(corr))
import copy
from scipy import signal
disp_abs_all_savgol = copy.deepcopy(disp_abs_all)
for file_id in range(len(disp_abs_all)):
savgol0 = []
for seg in range(len(disp_abs_all[0])):
disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2)
import numpy as np
diff = np.diff(disp_abs_all_savgol[file_id][seg,:,1])
plt.plot(diff)
plt.show()
import numpy as np
import peakutils
# source: disp_abs_all_savgol
fig_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/img/correlation/"
src_name = ["Results1-54-109-20.csv", "Results2-125-215-20.csv", "Results3-1-74-20.csv", "Results4-248-370-20.csv",
"Results5-1-100-20.csv", "Results6-380-485-20.csv", "Results7-250-310-20.csv", "Results8-1-105-20.csv",
"Results9-464-555-20.csv", "Results10-665-733-20.csv", "Results11-249-315-20.csv"]
# bounday motion delay
boundary_motion_delay_all = []
for file_id in range(len(disp_abs_all_savgol)):
dst0 = []
for seg_id in range(len(disp_abs_all_savgol[file_id])-1):
sig0 = np.diff(disp_abs_all_savgol[file_id][seg_id][:,1])
sig1 = np.diff(disp_abs_all_savgol[file_id][seg_id+1][:,1])
# centralization
sig0 = sig0 - sig0.mean()
sig1 = sig1 - sig1.mean()
corr = np.correlate(sig1, sig0, "full")
t_margin = 2
peaks_id = peakutils.indexes(corr[len(corr)-len(sig0)-t_margin:], thres=0.2, min_dist=20)
peaks_id = peaks_id - t_margin
estimated_delay = peaks_id[0]
dst0.append(estimated_delay)
fig, ax = plt.subplots(2,1, figsize = (10,8))
ax[0].plot(sig0, label="sig0")
ax[0].plot(sig1, label="sig1")
ax[0].legend()
ax[1].set_ylabel("corr")
ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr)
ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro')
ax[1].set_xlim([0, len(sig1)])
plt.savefig(fig_path + "201119_boundary_motion_interseg_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id))
plt.close()
boundary_motion_delay_all.append(dst0)
# boundary stride duration
boundary_stride_duration_all = []
for file_id in range(len(disp_abs_all_savgol)):
dst0 = []
for seg_id in range(len(disp_abs_all_savgol[file_id])):
sig0 = np.diff(disp_abs_all_savgol[file_id][seg_id][:,1])
sig1 = np.diff(disp_abs_all_savgol[file_id][seg_id][:,1])
# centralization
sig0 = sig0 - sig0.mean()
sig1 = sig1 - sig1.mean()
corr = np.correlate(sig1, sig0, "full")
peaks_id = peakutils.indexes(corr[len(corr)-len(sig0):], thres=0.2, min_dist=20)
estimated_delay = peaks_id[0]
dst0.append(estimated_delay)
fig, ax = plt.subplots(2,1, figsize = (10,8))
ax[0].plot(sig0, label="sig0")
ax[0].plot(sig1, label="sig1")
ax[0].legend()
ax[1].set_ylabel("corr")
ax[1].plot(np.arange(len(corr))-len(sig0)+1, corr)
ax[1].plot(peaks_id, corr[peaks_id+len(sig0)-1], 'ro')
ax[1].set_xlim([0, len(sig1)])
plt.savefig(fig_path + "201119_boundary_auto_corr_{0}_seg{1}.png".format(src_name[file_id], seg_id))
plt.close()
boundary_stride_duration_all.append(dst0)
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/boundary_motion_delay_all_201119.pickle", "wb") as f1:
pickle.dump(boundary_motion_delay_all, f1)
with open(src_path + "pickle/boundary_stride_duration_all_201119.pickle", "wb") as f2:
pickle.dump(boundary_stride_duration_all, f2)
boundary_stride_duration_all = np.array(boundary_stride_duration_all)
print("boundary_stride_duration_all", boundary_stride_duration_all.shape)
print(boundary_stride_duration_all)
boundary_motion_delay_all = np.array(boundary_motion_delay_all)
print("boundary_motion_delay_all", boundary_motion_delay_all.shape)
print(boundary_motion_delay_all)
# Calculate speed
import copy
from scipy import signal
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/disp_abs_all_201102.pickle", "rb") as f:
disp_abs_all = pickle.load(f)
disp_abs_all_savgol = copy.deepcopy(disp_abs_all)
for file_id in range(len(disp_abs_all)):
savgol0 = []
for seg in range(len(disp_abs_all[0])):
disp_abs_all_savgol[file_id][seg][:,1] = signal.savgol_filter(disp_abs_all[file_id][seg][:,1], 11,2)
import matplotlib.pyplot as plt
file_id = 0
seg = 0
plt.figure()
plt.plot(disp_abs_all[file_id][seg,:,0], disp_abs_all[file_id][seg,:,1], color='g')
plt.plot(disp_abs_all_savgol[file_id][seg,:,0], disp_abs_all_savgol[file_id][seg,:,1], color='m')
plt.show()
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
file_id = 0
seg = 0
X = disp_abs_all_savgol[file_id][seg,:,0].reshape(-1,1)
Y = disp_abs_all_savgol[file_id][seg,:,1].reshape(-1,1)
lr.fit(X, Y)
plt.scatter(X, Y, color='green')
plt.plot(X, lr.predict(X), color='magenta')
plt.show()
print("coefficient:", lr.coef_[0])
print(X)
print(Y)
print(Y.reshape(-1,1))
# Calculate all speed
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
fig_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/img/"
src_name = ["Results1-54-109-20.csv", "Results2-125-215-20.csv", "Results3-1-74-20.csv", "Results4-248-370-20.csv",
"Results5-1-100-20.csv", "Results6-380-485-20.csv", "Results7-250-310-20.csv", "Results8-1-105-20.csv",
"Results9-464-555-20.csv", "Results10-665-733-20.csv", "Results11-249-315-20.csv"]
speed_all = []
for file_id in range(len(disp_abs_all_savgol)):
dst = []
for seg_id in range(len(disp_abs_all_savgol[file_id])):
lr = LinearRegression()
X = disp_abs_all_savgol[file_id][seg_id,:,0].reshape(-1,1)
Y = disp_abs_all_savgol[file_id][seg_id,:,1].reshape(-1,1)
lr.fit(X, Y)
plt.plot(X, Y, color='green')
plt.plot(X, lr.predict(X), color='magenta')
plt.savefig(fig_path + "201120_speed_{0}_seg{1}.png".format(src_name[file_id], seg_id))
plt.close()
dst.append(lr.coef_[0][0])
speed_all.append(dst)
speed_all = np.array(speed_all)
print("speed_all.shape:", speed_all.shape)
print(speed_all)
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
#with open(src_path + "pickle/speed_all_201120.pickle", "wb") as f:
# pickle.dump(speed_all, f)
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
with open(src_path + "pickle/speed_all_201120.pickle", "rb") as f:
speed_all = pickle.load(f)
speed_larvae = speed_all.mean(axis=1)
print("speed_larvae.shape:", speed_larvae.shape)
print(speed_larvae)
# Scatter plot of speed vs stride duration/length
# data of speed: speed_all
# data of stride duration: boundary_stride_duration_all
# data of stride length: stride_length_all
import numpy as np
import pickle
src_path = "C:/Users/h1006/Documents/Research/Sun/Data/1_Kinematics/"
sec_per_frame = 0.03333
with open(src_path + "pickle/speed_all_201120.pickle", "rb") as f1:
speed_all = pickle.load(f1)
with open(src_path + "pickle/boundary_stride_duration_all_201119.pickle", "rb") as f2:
stride_duration_all = pickle.load(f2)
stride_duration_all = np.array(stride_duration_all) * sec_per_frame
with open(src_path + "pickle/stride_length_all_201104.pickle", "rb") as f3:
stride_length_all = pickle.load(f3)
stride_length_all = np.array(stride_length_all)
print("speed_all:", speed_all.shape)
print("stride_duration_all:", stride_duration_all.shape)
print("stride_length_all:", stride_length_all.shape)
import matplotlib.pyplot as plt
dst_path = "C:/Users/h1006/Documents/Research/Sun/Images/"
speed = speed_all.reshape(11*10)
duration = stride_duration_all.reshape(11*10)
length = stride_length_all.reshape(11*10)
plt.figure(figsize = (8,9))
ax = plt.gca()
plt.plot(duration, speed, 'o', color = "k", markersize = 10)
plt.xlim([0.7, 1.45])
plt.ylim([0.45, 1.0])
plt.xlabel("Stride duration (sec)", fontsize = 28)
plt.ylabel("Speed (mm/sec)", fontsize = 28)
plt.xticks([0.7,0.8,0.9,1.0,1.1,1.2,1.3,1.4],fontsize = 20)
plt.yticks([0.5,0.6,0.7,0.8,0.9,1.0], fontsize = 20)
ax.spines["top"].set_color("none")
ax.spines["right"].set_color("none")
plt.savefig(dst_path + "Speed_vs_stride_duration_201120.png", bbox_inches = "tight", facecolor="white")
plt.show()
plt.close()
plt.figure(figsize = (8,9))
ax = plt.gca()
plt.plot(length, speed, 'o', color = "k", markersize = 10)
plt.xlim([0.5, 0.9])
plt.ylim([0.45, 1.0])
plt.xlabel("Stride length (mm)", fontsize = 28)
plt.ylabel("Speed (mm/sec)", fontsize = 28)
plt.xticks([0.5,0.6,0.7,0.8,0.9], fontsize = 20)
plt.yticks([0.5,0.6,0.7,0.8,0.9,1.0], fontsize = 20)
ax.spines["top"].set_color("none")
ax.spines["right"].set_color("none")
plt.savefig(dst_path + "Speed_vs_stride_length_201120.png", bbox_inches = "tight", facecolor="white")
plt.show()
plt.close()
import pandas as pd
speed_series = pd.Series(speed)
duration_series = pd.Series(duration)
length_series = pd.Series(length)
Corr_duration = speed_series.corr(duration_series)
Corr_length = speed_series.corr(length_series)
print("Correlation speed vs duration:", Corr_duration)
print("Correlation speed vs length:", Corr_length)
# Calculate maximum and minimum segment length
# seg_len_all: file_id, seg_id, frame [time, length]; 11 x 9 x frames x 2
# seg_len_range_all: file_id, seg_id, peak/valley, point number: 11 x 9 x 2 x point number
import pickle
with open(src_path + "pickle/seg_len_range_all_201104.pickle", "rb") as f1:
seg_len_range_all = pickle.load(f1)
with open(src_path + "pickle/seg_len_all_201102.pickle", "rb") as f2:
seg_len_all = pickle.load(f2)
file_id = 0
seg_id = 4
dat = seg_len_range_all[file_id][seg_id]
seg_max = dat[0][1].max()
seg_min = dat[1][1].min()
print("seg_len_range_all[file_id][seg_Id]:", dat)
print("dat[0][1].max():", dat[0][1].max())
print("dat[1][1].min():", dat[1][1].min())
import numpy as np
max_len_all = []
min_len_all = []
for file_id in range(len(seg_len_range_all)):
dst_max = []
dst_min = []
for seg_id in range(len(seg_len_range_all[file_id])):
dat = seg_len_range_all[file_id][seg_id]
dst_max.append(dat[0][1].max())
dst_min.append(dat[1][1].min())
max_len_all.append(dst_max)
min_len_all.append(dst_min)
max_len_all = np.array(max_len_all)
min_len_all = np.array(min_len_all)
print(max_len_all)
print(min_len_all)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.figure(0, figsize=(6,10))
plot_shift = 0.5
for seg in range(9):
plt.plot(max_len_all[:,seg],[seg+plot_shift]*11, color=cm.jet((seg+1)/10), marker='^', linestyle='None', markersize=15)
plt.plot(min_len_all[:,seg],[seg]*11, color=cm.jet((seg+1)/10), marker='v', linestyle='None', markersize=15)
plt.plot([max_len_all[:,seg], min_len_all[:,seg]], [seg+plot_shift, seg], color=cm.jet((seg+1)/10), linewidth=1, linestyle="dotted")
plt.title("Segment length range")
plt.xlabel("Segment length (mm)", fontsize=30)
plt.xlim([0,0.6])
#plt.ylim([0,6])
#plt.xticks([0,1,2,3])
plt.yticks([])
plt.tick_params(labelsize=24)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#plt.legend()
plt.savefig(dst_path + "Segment_length_range_201120.png", facecolor="white", bbox_inches = "tight")
plt.show()
import pickle
with open(src_path + "pickle/max_len_all_201120.pickle", "wb") as f1:
#pickle.dump(max_len_all, f1)
with open(src_path + "pickle/min_len_all_201120.pickle", "wb") as f2:
#pickle.dump(min_len_all, f2)
# Calculate contraction duration
import pickle
with open(src_path + "pickle/seg_len_range_all_201104.pickle", "rb") as f1:
seg_len_range_all = pickle.load(f1)
with open(src_path + "pickle/seg_len_all_201102.pickle", "rb") as f2:
seg_len_all = pickle.load(f2)
with open(src_path + "pickle/max_len_all_201120.pickle", "rb") as f3:
max_len_all = pickle.load(f3)
with open(src_path + "pickle/min_len_all_201120.pickle", "rb") as f4:
min_len_all = pickle.load(f4)
# Check max and min in segment length data
# seg0 (A8) - seg8 (T3)
# select valleys
# Result1: 1,1,0,0,0,0,0,0,0
# Result2: 1,1,1,1,1,1,1,1,1
# Result3: 1,1,1,1,1,1,0,0,0
# Result4: 3,2,2,2,2,2,2,2,3
# Result5: 2,2,2,2,2,2,2,2,2
# Result6: 0,1,1,1,1,1,1,1,1
# Result7: 1,1,1,1,1,1,1,1,1
# Result8: 1,1,1,1,1,1,1,1,1
# Result9: 1,1,1,1,1,1,1,1,1
# Result10: 1,1,1,1,1,1,1,1,1
# Result11: 1,1,1,1,1,0,0,0,0
valleys = np.array([[1,1,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,0,0,1],
[3,2,2,2,2,2,2,2,3],
[2,2,2,2,2,2,2,2,2],
[0,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,0,0,0,0]])
# Calculate contraction duration
# seg_len_all: file_id, seg_id, frame [time, length]; 11 x 9 x frames x 2
# seg_len_range_all: file_id, seg_id, peak/valley, point number: 11 x 9 x 2 x point number
import matplotlib.pyplot as plt
from scipy import signal
file_id = 0
seg_id = 2
t = seg_len_all[file_id][seg_id][:,0]
length = signal.savgol_filter(seg_len_all[file_id][seg_id][:,1], 11, 2)
peaks = seg_len_range_all[file_id][seg_id]
plt.plot(t, length)
plt.plot(peaks[0][0], peaks[0][1], 'go')
plt.plot(peaks[1][0], peaks[1][1], 'mo')
plt.show()
from scipy import signal
file_id = 0
seg_id = 2
dat_t = seg_len_all[file_id][seg_id][:,0]
dat_l = signal.savgol_filter(seg_len_all[file_id][seg_id][:,1],11,2)
valley_point = seg_len_range_all[file_id][seg_id][1][0][valleys[file_id][seg_id]]
idx = np.where(dat_t == valley_point)[0]
thrd = (max_len_all[file_id][seg_id] - min_len_all[file_id][seg_id])*0.5 + min_len_all[file_id][seg_id]
# search for left idx
left_ = 0
while(dat_l[idx-left_]<thrd):
left_ += 1
idx_left = idx - left_
# search for right idx
right_ = 0
while(dat_l[idx+right_]<thrd):
right_ += 1
idx_right = idx + right_
time_left = dat_t[idx_left]
time_right = dat_t[idx_right]
dst0 = [[time_left, time_right], [idx_left, idx_right]]
print(dst0)
plt.plot(dat_t, dat_l)
plt.plot(dat_t[idx_left], dat_l[idx_left], "go")
plt.plot(dat_t[idx_right], dat_l[idx_right], "go")
plt.show()
print("thrd:", thrd)
print("left side:", dat_l[idx_left-1], dat_l[idx_left], dat_l[idx_left+1])
print("right side:", dat_l[idx_right-1], dat_l[idx_right], dat_l[idx_right+1])
# Calculate contraction duration
from scipy import signal
FWHM_segment_length_all = []
for file_id in range(11):
dst = []
for seg_id in range(9):
dat_t = seg_len_all[file_id][seg_id][:,0]
dat_l = signal.savgol_filter(seg_len_all[file_id][seg_id][:,1],11,2)
valley_point = seg_len_range_all[file_id][seg_id][1][0][valleys[file_id][seg_id]]
idx = np.where(dat_t == valley_point)[0]
thrd = (max_len_all[file_id][seg_id] - min_len_all[file_id][seg_id])*0.5 + min_len_all[file_id][seg_id]
# search for left idx
left_ = 0
while(dat_l[idx-left_]<thrd):
left_ += 1
idx_left = idx - left_
# search for right idx
right_ = 0
while(dat_l[idx+right_]<thrd):
right_ += 1
idx_right = idx + right_
time_left = dat_t[idx_left]
time_right = dat_t[idx_right]
dst0 = [[time_left[0], time_right[0]], [int(idx_left[0]), int(idx_right[0])]]
dst.append(dst0)
FWHM_segment_length_all.append(dst)
FWHM_segment_length_all = np.array(FWHM_segment_length_all)
FWHM_segment_length_all.shape
contraction_duration_all = []
for file_id in range(11):
dst = []
for seg_id in range(9):
dat = FWHM_segment_length_all[file_id][seg_id]
dst.append(dat[0,1] - dat[0,0])
contraction_duration_all.append(dst)
contraction_duration_all = np.array(contraction_duration_all)
print("contraction_duration_all", contraction_duration_all)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.figure(0, figsize=(6,10))
plot_shift = 0.5
for seg in range(1,9):
plt.plot(contraction_duration_all[:,seg], np.array([seg-1]*11) + np.random.randn(11)*0.07, color=cm.jet((seg+1)/10),
marker='o', linestyle='None', markersize=10)
plt.plot([0,0.7], [seg-1, seg-1], color=cm.jet((seg+1)/10), linestyle='dotted')
plt.title("Contraction duration")
plt.xlabel("Contraction duration (sec)", fontsize=30)
plt.xlim([0,0.7])
#plt.ylim([0,6])
plt.xticks([0,0.2, 0.4, 0.6])
plt.yticks([])
plt.tick_params(labelsize=24)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#plt.legend()
plt.savefig(dst_path + "Contraction_duration_201120.png", facecolor="white", bbox_inches = "tight")
plt.show()
```
| github_jupyter |
# Thematic Reports
Thematic reports run historical analyses on the exposure of a portfolio to various Goldman Sachs Flagship Thematic baskets over a specified date range.
### Prerequisite
To execute all the code in this tutorial, you will need the following application scopes:
- **read_product_data**
- **read_financial_data**
- **modify_financial_data** (must be requested)
- **run_analytics** (must be requested)
If you are not yet permissioned for these scopes, please request them on your [My Applications Page](https://developer.gs.com/go/apps/view).
If you have any other questions please reach out to the [Marquee sales team](mailto:[email protected]).
## Step 1: Authenticate and Initialize Your Session
First you will import the necessary modules and add your client id and client secret.
```
import datetime as dt
from time import sleep
from gs_quant.markets.baskets import Basket
from gs_quant.markets.report import ThematicReport
from gs_quant.session import GsSession, Environment
client = None
secret = None
scopes = None
## External users must fill in their client ID and secret below and comment out the line below
#client = 'ENTER CLIENT ID'
#secret = 'ENTER CLIENT SECRET'
#scopes = ('read_product_data read_financial_data modify_financial_data run_analytics',)
GsSession.use(
Environment.PROD,
client_id=client,
client_secret=secret,
scopes=scopes
)
print('GS Session initialized.')
```
## Step 2: Create a New Thematic Report
#### Already have a thematic report?
<i>If you want to skip creating a new report and continue this tutorial with an existing thematic report, run the following and skip to Step 3:</i>
```
thematic_report_id = 'ENTER THEMATIC REPORT ID'
thematic_report = ThematicReport.get(thematic_report_id)
```
The only parameter necessary in creating a new thematic report is the unique Marquee identifier of the portfolio on which you would like to run thematic analytics.
```
portfolio_id = 'ENTER PORTFOLIO ID'
thematic_report = ThematicReport(position_source_id=portfolio_id)
thematic_report.save()
print(f'A new thematic report for portfolio "{portfolio_id}" has been made with ID "{thematic_report.id}".')
```
## Step 3: Schedule the Report
When scheduling reports, you have two options:
- Backcast the report: Take the earliest date with positions in the portfolio / basket and run the report on the positions held then with a start date before the earliest position date and an end date
of the earliest position date
- Do not backcast the report: Set the start date as a date that has positions in the portfolio or basket and an end date after that (best practice is to set it to T-1). In this case the
report will run on positions held as of each day in the date range
In this case, let's try scheduling the report without backcasting:
```
start_date = dt.date(2021, 1, 4)
end_date = dt.date(2021, 8, 4)
thematic_report.schedule(
start_date=start_date,
end_date=end_date,
backcast=False
)
print(f'Report "{thematic_report.id}" has been scheduled.')
```
## Alternative Step 3: Run the Report
Depending on the size of your portfolio and the length of the schedule range, it usually takes anywhere from a couple seconds to half a minute for your report to finish executing.
Only after that can you successfully pull the results from that report. If you would rather run the report and pull the results immediately after they are ready, you can leverage the `run`
function.
You can run a report synchronously or asynchronously.
- Synchronous: the Python script will stall at the `run` function line and wait for the report to finish. The `run` function will then return a dataframe with the report results
- Asynchronously: the Python script will not stall at the `run` function line. The `run` function will return a `ReportJobFuture` object that will contain the report results when they are ready.
In this example, let's run the report asynchronously and wait for the results:
```
start_date = dt.date(2021, 1, 4)
end_date = dt.date(2021, 8, 4)
report_result_future = thematic_report.run(
start_date=start_date,
end_date=end_date,
backcast=False,
is_async=True
)
while not report_result_future.done():
print('Waiting for report results...')
sleep(5)
print('\nReport results done! Here they are...')
print(report_result_future.result())
```
### Step 3: Pull Report Results
Now that we have our factor risk report, we can leverage the unique functionalities of the `ThematicReport` class to pull exposure and PnL data. Let's get the historical changes in thematic exposure and beta to the GS Asia Stay at Home basket:
```
basket = Basket.get('GSXASTAY')
thematic_exposures = thematic_report.get_thematic_data(
start_date=start_date,
end_date=end_date,
basket_ids=[basket.get_marquee_id()]
)
print(f'Thematic Exposures: \n{thematic_exposures.__str__()}')
thematic_exposures.plot(title='Thematic Data Breakdown')
```
### You're all set; Congrats!
*Other questions? Reach out to the [Portfolio Analytics team](mailto:[email protected])!*
| github_jupyter |
```
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
# Set random seed for reproducibility
manualSeed = 999
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
# Root directory for dataset
dataroot = "./data"
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 64
# Spatial size of training images. All images will be resized to this
# size using a transformer.
image_size = 32
# Number of channels in the training images. For color images this is 3
nc = 3
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 64
# Size of feature maps in discriminator
ndf = 64
# Number of training epochs
num_epochs = 20
# Learning rate for optimizers
lr = 0.0002
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1
# Create the dataset
dataset = dset.CIFAR10(
root=dataroot,
download=True,
transform=transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=workers)
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8,8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(),(1,2,0)))
plt.show()
```
## The Generator
The generator, G, is designed to map the latent space vector (z) to data-space. Since our data are images, converting z to data-space means ultimately creating a RGB image with the same size as the training images (i.e. 3x32x32). In practice, this is accomplished through a series of strided two dimensional convolutional transpose layers, each paired with a 2d batch norm layer and a relu activation. The output of the generator is fed through a tanh function to return it to the input data range of [−1,1]. It is worth noting the existence of the batch norm functions after the conv-transpose layers, as this is a critical contribution of the DCGAN paper. These layers help with the flow of gradients during training. An image of the generator from the DCGAN paper is shown below.
```
# Generator Code
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, kernel_size=1, stride=1, padding=0, bias=False),
nn.Tanh()
)
def forward(self, input):
return self.main(input)
# Create the generator
netG = Generator(ngpu).to(device)
# Print the model
print(netG)
# Input shape for the DCGAN generator is the variable of shape (1, 100, 1, 1, ).
# There ara nothing important about this shape and you can change it to other numbers
# by modifying `nz` variable. (ex. 128, 200, etc).
# Lets check that GAN generates image with correct shape (1, 3, 32, 32)
input_variable = torch.randn((1, 100, 1, 1, )).to(device)
netG(input_variable).shape
```
## The Discriminator
As mentioned, the discriminator, D, is a binary classification network that takes an image as input and outputs a scalar probability that the input image is real (as opposed to fake). Here, D takes a 3x64x64 input image, processes it through a series of Conv2d, BatchNorm2d, and LeakyReLU layers, and outputs the final probability through a Sigmoid activation function. This architecture can be extended with more layers if necessary for the problem, but there is significance to the use of the strided convolution, BatchNorm, and LeakyReLUs. The DCGAN paper mentions it is a good practice to use strided convolution rather than pooling to downsample because it lets the network learn its own pooling function. Also batch norm and leaky relu functions promote healthy gradient flow which is critical for the learning process of both G and D.
```
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 2, 2, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
# Create the Discriminator
netD = Discriminator(ngpu).to(device)
# Print the model
print(netD)
# Discriminator is the model that should predict single number from input image.
# This number is the probability of input being fake.
# Lets check that Discriminator will return single number from input of size (1, 3, 32, 32)
input_variable = torch.randn((1, 3, 32, 32, )).to(device)
netD(input_variable)
# Initialize BCELoss function
# This is the lost function used in DCGAN
criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(64, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
real_label = 1
fake_label = 0
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
# Training Loop
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
# For each batch in the dataloader
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# Output training stats
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch+1, num_epochs, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses,label="G")
plt.plot(D_losses,label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
#%%capture
fig = plt.figure(figsize=(8,8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
HTML(ani.to_jshtml())
# Grab a batch of real images from the dataloader
real_batch = next(iter(dataloader))
# Plot the real images
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0)))
# Plot the fake images from the last epoch
plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1],(1,2,0)))
plt.show()
```
# Task
1) Train for longer to see how good the results get
2) Modify this model to take torchvision.datasets.SVHN as input
3) Modify this model to take torchvision.datasets.MNIST as input
| github_jupyter |
```
%cd ../
```
## Stochastic Block Model Experiment
Before geting into the experiment details, let's review algorithm 1 and the primal and dual updates.
### Algorithm 1

```
# %load algorithm/main.py
%time
from sklearn.metrics import mean_squared_error
from penalty import *
def algorithm_1(K, D, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func_name='norm1', calculate_score=False):
'''
:param K: the number of iterations
:param D: the block incidence matrix
:param weight_vec: a list containing the edges's weights of the graph
:param datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1
:param true_labels: a list containing the true labels of the nodes
:param samplingset: the sampling set
:param lambda_lasso: the parameter lambda
:param penalty_func_name: the name of the penalty function used in the algorithm
:return iteration_scores: the mean squared error of the predicted weight vectors in each iteration
:return new_w: the predicted weigh vectors for each node
'''
Sigma = np.diag(np.full(weight_vec.shape, 0.9 / 2))
'''
Sigma: the block diagonal matrix Sigma
'''
T_matrix = np.diag(np.array((1.0 / (np.sum(abs(D), 0)))).ravel())
'''
T_matrix: the block diagonal matrix T
'''
if np.linalg.norm(np.dot(Sigma ** 0.5, D).dot(T_matrix ** 0.5), 2) > 1:
print ('product norm', np.linalg.norm(np.dot(Sigma ** 0.5, D).dot(T_matrix ** 0.5), 2))
E, N = D.shape
m, n = datapoints[0]['features'].shape
# define the penalty function
if penalty_func_name == 'norm1':
penalty_func = Norm1Pelanty(lambda_lasso, weight_vec, Sigma, n)
elif penalty_func_name == 'norm2':
penalty_func = Norm2Pelanty(lambda_lasso, weight_vec, Sigma, n)
elif penalty_func_name == 'mocha':
penalty_func = MOCHAPelanty(lambda_lasso, weight_vec, Sigma, n)
else:
raise Exception('Invalid penalty name')
# starting algorithm 1
new_w = np.array([np.zeros(n) for i in range(N)])
'''
new_w: the primal variable of the algorithm 1
'''
new_u = np.array([np.zeros(n) for i in range(E)])
'''
new_u: the dual variable of the algorithm 1
'''
iteration_scores = []
for iterk in range(K):
# if iterk % 100 == 0:
# print ('iter:', iterk)
prev_w = np.copy(new_w)
# algorithm 1, line 2
hat_w = new_w - np.dot(T_matrix, np.dot(D.T, new_u))
for i in range(N):
if i in samplingset: # algorithm 1, line 6
optimizer = datapoints[i]['optimizer']
new_w[i] = optimizer.optimize(datapoints[i]['features'], datapoints[i]['label'], hat_w[i], datapoints[i]['degree'])
else:
new_w[i] = hat_w[i]
# algorithm 1, line 9
tilde_w = 2 * new_w - prev_w
new_u = new_u + np.dot(Sigma, np.dot(D, tilde_w))
# algorithm 1, line 10
new_u = penalty_func.update(new_u)
# calculate the MSE of the predicted weight vectors
if calculate_score:
Y_pred = []
for i in range(N):
Y_pred.append(np.dot(datapoints[i]['features'], new_w[i]))
iteration_scores.append(mean_squared_error(true_labels.reshape(N, m), Y_pred))
# print (np.max(abs(new_w - prev_w)))
return iteration_scores, new_w
```
### Primal Update
As you see in the algorithm picture, the primal update needs a optimizer operator for the sampling set (line 6). We have implemented the optimizers discussed in the paper, both the logistic loss and squared error loss optimizers implementations with pytorch is available, also we have implemented the squared error loss optimizer using the fixed point equation in the `Networked Linear Regression` section of the paper.
```
# %load algorithm/optimizer.py
import torch
import abc
import numpy as np
from abc import ABC
# The linear model which is implemented by pytorch
class TorchLinearModel(torch.nn.Module):
def __init__(self, n):
super(TorchLinearModel, self).__init__()
self.linear = torch.nn.Linear(n, 1, bias=False)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
# The abstract optimizer model which should have model, optimizer, and criterion as the input
class Optimizer(ABC):
def __init__(self, model, optimizer, criterion):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
@abc.abstractmethod
def optimize(self, x_data, y_data, old_weight, regularizer_term):
torch_old_weight = torch.from_numpy(np.array(old_weight, dtype=np.float32))
self.model.linear.weight.data = torch_old_weight
for iterinner in range(40):
self.optimizer.zero_grad()
y_pred = self.model(x_data)
loss1 = self.criterion(y_pred, y_data)
loss2 = 1 / (2 * regularizer_term) * torch.mean((self.model.linear.weight - torch_old_weight) ** 2) # + 10000*torch.mean((model.linear.bias+0.5)**2)#model.linear.weight.norm(2)
loss = loss1 + loss2
loss.backward()
self.optimizer.step()
return self.model.linear.weight.data.numpy()
# The linear model in Networked Linear Regression section of the paper
class LinearModel:
def __init__(self, degree, features, label):
mtx1 = 2 * degree * np.dot(features.T, features).astype('float64')
mtx1 += 1 * np.eye(mtx1.shape[0])
mtx1_inv = np.linalg.inv(mtx1)
mtx2 = 2 * degree * np.dot(features.T, label).T
self.mtx1_inv = mtx1_inv
self.mtx2 = mtx2
def forward(self, x):
mtx2 = x + self.mtx2
mtx_inv = self.mtx1_inv
return np.dot(mtx_inv, mtx2)
# The Linear optimizer in Networked Linear Regression section of the paper
class LinearOptimizer(Optimizer):
def __init__(self, model):
super(LinearOptimizer, self).__init__(model, None, None)
def optimize(self, x_data, y_data, old_weight, regularizer_term):
return self.model.forward(old_weight)
# The Linear optimizer model which is implemented by pytorch
class TorchLinearOptimizer(Optimizer):
def __init__(self, model):
criterion = torch.nn.MSELoss(reduction='mean')
optimizer = torch.optim.RMSprop(model.parameters())
super(TorchLinearOptimizer, self).__init__(model, optimizer, criterion)
def optimize(self, x_data, y_data, old_weight, regularizer_term):
return super(TorchLinearOptimizer, self).optimize(x_data, y_data, old_weight, regularizer_term)
# The Logistic optimizer model which is implemented by pytorch
class TorchLogisticOptimizer(Optimizer):
def __init__(self, model):
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.RMSprop(model.parameters())
super(TorchLogisticOptimizer, self).__init__(model, optimizer, criterion)
def optimize(self, x_data, y_data, old_weight, regularizer_term):
return super(TorchLogisticOptimizer, self).optimize(x_data, y_data, old_weight, regularizer_term)
```
### Dual Update
As mentioned in the paper, the dual update has a penalty function(line 10) which is either norm1, norm2, or mocha.
```
# %load algorithm/penalty.py
import abc
import numpy as np
from abc import ABC
# The abstract penalty function which has a function update
class Penalty(ABC):
def __init__(self, lambda_lasso, weight_vec, Sigma, n):
self.lambda_lasso = lambda_lasso
self.weight_vec = weight_vec
self.Sigma = Sigma
@abc.abstractmethod
def update(self, new_u):
pass
# The norm2 penalty function
class Norm2Pelanty(Penalty):
def __init__(self, lambda_lasso, weight_vec, Sigma, n):
super(Norm2Pelanty, self).__init__(lambda_lasso, weight_vec, Sigma, n)
self.limit = np.array(lambda_lasso * weight_vec)
def update(self, new_u):
normalized_u = np.where(np.linalg.norm(new_u, axis=1) >= self.limit)
new_u[normalized_u] = (new_u[normalized_u].T * self.limit[normalized_u] / np.linalg.norm(new_u[normalized_u], axis=1)).T
return new_u
# The MOCHA penalty function
class MOCHAPelanty(Penalty):
def __init__(self, lambda_lasso, weight_vec, Sigma, n):
super(MOCHAPelanty, self).__init__(lambda_lasso, weight_vec, Sigma, n)
self.normalize_factor = 1 + np.dot(2 * self.Sigma, 1/(self.lambda_lasso * self.weight_vec))
def update(self, new_u):
for i in range(new_u.shape[1]):
new_u[:, i] /= self.normalize_factor
return new_u
# The norm1 penalty function
class Norm1Pelanty(Penalty):
def __init__(self, lambda_lasso, weight_vec, Sigma, n):
super(Norm1Pelanty, self).__init__(lambda_lasso, weight_vec, Sigma, n)
self.limit = np.array([np.zeros(n) for i in range(len(weight_vec))])
for i in range(n):
self.limit[:, i] = lambda_lasso * weight_vec
def update(self, new_u):
normalized_u = np.where(abs(new_u) >= self.limit)
new_u[normalized_u] = self.limit[normalized_u] * new_u[normalized_u] / abs(new_u[normalized_u])
return new_u
```
## Create SBM Graph
The stochastic block model is a generative model for random graphs with some clusters structure. Two nodes within the same cluster of the empirical graph are connected by an edge with probability pin, two nodes from different clusters are connected by an edge with probability pout. Each node $i \in V$ represents a local dataset consisting of $m$ feature vectors $x^{(i,1)}, ... , x^{(i,m)} \in R^n$. The feature vectors are i.i.d. realizations of a standard Gaussian random vector x ∼ N(0,I). The labels $y_1^{(i)}, . . . , y_m^{(i)} \in R$ of the nodes $i \in V$ are generated according to the linear model $y_r^{(i)} = (x^{(i, r)})^T w^{(i)} + \epsilon$, with $\epsilon ∼ N(0,\sigma)$. To learn the weight $w^{(i)}$ ,we apply Algorithm 1 to a training set M obtained by randomly selecting 40% of the nodes.
```
from optimizer import *
from torch.autograd import Variable
#from graspy.simulations import sbm
def get_sbm_data(cluster_sizes, G, W, m=5, n=2, noise_sd=0, is_torch_model=True):
'''
:param cluster_sizes: a list containing the size of each cluster
:param G: generated SBM graph with defined clusters using graspy.simulations
:param W: a list containing the weight vectors for each cluster
:param m, n: shape of features vector for each node
:param pin: the probability of edges inside each cluster
:param pout: the probability of edges between the clusters
:param noise_sd: the standard deviation of the noise for calculating the labels
:return B: adjacency matrix of the graph
:return weight_vec: a list containing the edges's weights of the graph
:return true_labels: a list containing the true labels of the nodes
:return datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1
'''
N = len(G)
E = int(G.number_of_edges())#int(len(np.argwhere(G > 0))/2)
'''
N: total number of nodes
E: total number of edges
'''
# create B(adjacency matrix) and edges's weights vector(weight_vec) based on the graph G
B = np.zeros((E, N))
'''
B: adjacency matrix of the graph with the shape of E*N
'''
weight_vec = np.zeros(E)
'''
weight_vec: a list containing the edges's weights of the graph with the shape of E
'''
cnt = 0
for i, j in G.edges:
if i > j:
continue
B[cnt, i] = 1
B[cnt, j] = -1
weight_vec[cnt] = 1
cnt += 1
# create the data of each node needed for the algorithm 1
node_degrees = np.array((1.0 / (np.sum(abs(B), 0)))).ravel()
'''
node_degrees: a list containing the nodes degree for the alg1 (1/N_i)
'''
datapoints = {}
'''
datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1,
which are features, label, degree, and also the optimizer model for each node
'''
true_labels = []
'''
true_labels: the true labels for the nodes of the graph
'''
cnt = 0
for i, cluster_size in enumerate(cluster_sizes):
for j in range(cluster_size):
features = np.random.normal(loc=0.0, scale=1.0, size=(m, n))
'''
features: the feature vector of node i which are i.i.d. realizations of a standard Gaussian random vector x~N(0,I)
'''
label = np.dot(features, W[i]) + np.random.normal(0,noise_sd)
'''
label: the label of the node i that is generated according to the linear model y = x^T w + e
'''
true_labels.append(label)
if is_torch_model:
model = TorchLinearModel(n)
optimizer = TorchLinearOptimizer(model)
features = Variable(torch.from_numpy(features)).to(torch.float32)
label = Variable(torch.from_numpy(label)).to(torch.float32)
else:
model = LinearModel(node_degrees[i], features, label)
optimizer = LinearOptimizer(model)
'''
model : the linear model for the node i
optimizer : the optimizer model for the node i
'''
datapoints[cnt] = {
'features': features,
'degree': node_degrees[i],
'label': label,
'optimizer': optimizer
}
cnt += 1
return B, weight_vec, np.array(true_labels), datapoints
```
### Compare Results
As the result we compare the MSE of Algorithm 1 with plain linear regression
and decision tree regression
```
# %load results/compare_results.py
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
def get_algorithm1_MSE(datapoints, predicted_w, samplingset):
'''
:param datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1
:param predicted_w: the predicted weigh vectors for each node
:param samplingset: the sampling set for the algorithm 1
:return alg1_MSE: the MSE of the algorithm 1 for all the nodes, the samplingset and other nodes (test set)
'''
not_samplingset = [i for i in range(len(datapoints)) if i not in samplingset]
true_labels = []
pred_labels = []
for i in range(len(datapoints)):
features = np.array(datapoints[i]['features'])
label = np.array(datapoints[i]['label'])
true_labels.append(label)
pred_labels.append(np.dot(features, predicted_w[i]))
pred_labels = np.array(pred_labels)
true_labels = np.array(true_labels)
alg1_MSE = {'total': mean_squared_error(true_labels, pred_labels),
'train': mean_squared_error(true_labels[samplingset], pred_labels[samplingset]),
'test': mean_squared_error(true_labels[not_samplingset], pred_labels[not_samplingset])}
return alg1_MSE
def get_linear_regression_MSE(x, y, samplingset, not_samplingset):
'''
:param x: a list containing the features of the nodes
:param y: a list containing the labels of the nodes
:param samplingset: the training dataset
:param not_samplingset: the test dataset
:return linear_regression_MSE : the MSE of linear regression for all the nodes, the samplingset and other nodes (test set)
'''
model = LinearRegression().fit(x[samplingset], y[samplingset])
pred_y = model.predict(x)
linear_regression_MSE = {'total': mean_squared_error(y, pred_y),
'train': mean_squared_error(y[samplingset],
pred_y[samplingset]),
'test': mean_squared_error(y[not_samplingset],
pred_y[not_samplingset])}
return linear_regression_MSE
def get_decision_tree_MSE(x, y, samplingset, not_samplingset):
'''
:param x: a list containing the features of the nodes
:param y: a list containing the labels of the nodes
:param samplingset: the training dataset
:param not_samplingset: the test dataset
:return decision_tree_MSE : the MSE of decision tree for all the nodes, the samplingset and other nodes (test set)
'''
max_depth = 2
regressor = DecisionTreeRegressor(max_depth=max_depth)
regressor.fit(x[samplingset], y[samplingset])
pred_y = regressor.predict(x)
decision_tree_MSE = {'total': mean_squared_error(y, pred_y),
'train': mean_squared_error(y[samplingset],
pred_y[samplingset]),
'test': mean_squared_error(y[not_samplingset],
pred_y[not_samplingset])}
return decision_tree_MSE
def get_scores(datapoints, predicted_w, samplingset):
N = len(datapoints)
'''
N : the total number of nodes
'''
# calculate algorithm1 MSE
alg_1_score = get_algorithm1_MSE(datapoints, predicted_w, samplingset)
# prepare the data for calculating the linear regression and decision tree regression MSEs
X = []
'''
X: an array containing the features of all the nodes
'''
true_labels = []
'''
true_labels: an array containing the labels of all the nodes
'''
for i in range(len(datapoints)):
X.append(np.array(datapoints[i]['features']))
true_labels.append(np.array(datapoints[i]['label']))
X = np.array(X)
true_labels = np.array(true_labels)
m, n = X[0].shape
x = X.reshape(-1, n)
y = true_labels.reshape(-1, 1)
reformated_samplingset = []
for item in samplingset:
for i in range(m):
reformated_samplingset.append(m * item + i)
reformated_not_samplingset = [i for i in range(m * N) if i not in reformated_samplingset]
# calculate linear regression MSE
linear_regression_score = get_linear_regression_MSE(x, y, reformated_samplingset, reformated_not_samplingset)
# calculate decision tree MSE
decision_tree_score = get_decision_tree_MSE(x, y, reformated_samplingset, reformated_not_samplingset)
return alg_1_score, linear_regression_score, decision_tree_score
```
### SBM with Two Clusters
This SBM has two clusters $|C_1| = |C_2| = 100$.
Two nodes within the same cluster are connected by an edge with probability `pin=0.5`,
and two nodes from different clusters are connected by an edge with probability `pout=0.01`.
Each node $i \in V$ represents a local dataset consisting of feature vectors $x^{(i,1)}, ... , x^{(i,5)} \in R^2$.
The feature vectors are i.i.d. realizations of a standard Gaussian random vector x ~ N(0,I).
The labels $y_1^{(i)}, . . . , y_5^{(i)} \in R$ for each node $i \in V$
are generated according to the linear model $y_r^{(i)} = (x^{(i, r)})^T w^{(i)} + \epsilon$, with $\epsilon = 0$.
The tuning parameter $\lambda$ in algorithm1
is manually chosen, guided by the resulting MSE, as $\lambda=0.01$ for norm1 and norm2 and also $\lambda=0.05$ for mocha penalty function.
To learn the weight $w^{(i)}$ ,we apply Algorithm 1 to a training set M obtained by randomly selecting 40% of the nodes and use the rest as test set. As the result we compare the mean MSE of Algorithm 1 with plain linear regression and decision tree regression with respect to the different random sampling sets.
```
#from graspy.simulations import sbm
import networkx as nx
def get_sbm_2blocks_data(m=5, n=2, pin=0.5, pout=0.01, noise_sd=0, is_torch_model=True):
'''
:param m, n: shape of features vector for each node
:param pin: the probability of edges inside each cluster
:param pout: the probability of edges between the clusters
:param noise_sd: the standard deviation of the noise for calculating the labels
:return B: adjacency matrix of the graph
:return weight_vec: a list containing the edges's weights of the graph
:return true_labels: a list containing the true labels of the nodes
:return datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1
'''
cluster_sizes = [100, 100]
# generate graph G which is a SBM wich 2 clusters
#G = sbm(n=cluster_sizes, p=[[pin, pout],[pout, pin]])
probs = [[pin, pout], [pout, pin]]
G = nx.stochastic_block_model(cluster_sizes, probs)
'''
G: generated SBM graph with 2 clusters
'''
# define weight vectors for each cluster of the graph
W1 = np.array([2, 2])
'''
W1: the weigh vector for the first cluster
'''
W2 = np.array([-2, 2])
'''
W2: the weigh vector for the second cluster
'''
W = [W1, W2]
return get_sbm_data(cluster_sizes, G, W, m, n, noise_sd, is_torch_model)
a = nx.stochastic_block_model([100, 100], [[0.1,0.01], [0.01,0.1]])
nx.draw(a,with_labels=True)
```
Plot the MSE with respect to the different random sampling sets for each penalty function, the plots are in the log scale
```
%time
import random
import matplotlib.pyplot as plt
from collections import defaultdict
PENALTY_FUNCS = ['norm1', 'norm2', 'mocha']
LAMBDA_LASSO = {'norm1': 0.01, 'norm2': 0.01, 'mocha': 0.05}
K = 1000
B, weight_vec, true_labels, datapoints = get_sbm_2blocks_data(pin=0.5, pout=0.01, is_torch_model=False)
E, N = B.shape
alg1_scores = defaultdict(list)
linear_regression_scores = defaultdict(list)
decision_tree_scores = defaultdict(list)
##samplingset = random.sample([i for i in range(N)], k=int(0.4* N))
##lambda_lasso = LAMBDA_LASSO['mocha']
##algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, PENALTY_FUNCS[0])
num_tries = 5
for i in range(num_tries):
samplingset = random.sample([i for i in range(N)], k=int(0.4* N))
for penalty_func in PENALTY_FUNCS:
lambda_lasso = LAMBDA_LASSO[penalty_func]
_, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func)
alg1_score, linear_regression_score, decision_tree_score = get_scores(datapoints, predicted_w, samplingset)
alg1_scores[penalty_func].append(alg1_score)
linear_regression_scores[penalty_func].append(linear_regression_score)
decision_tree_scores[penalty_func].append(decision_tree_score)
%time
labels = ['alg1,norm1', 'alg1,norm2', 'alg1,mocha', 'linear reg', 'decision tree']
x_pos = np.arange(len(labels))
print('algorithm 1, norm1:',
'\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm1']]),
'\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm1']]))
print('algorithm 1, norm2:',
'\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm2']]),
'\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm2']]))
print('algorithm 1, mocha:',
'\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['mocha']]),
'\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['mocha']]))
print('linear regression:',
'\n mean train MSE:', np.mean([item['train'] for item in linear_regression_scores['norm1']]),
'\n mean test MSE:', np.mean([item['test'] for item in linear_regression_scores['norm1']]))
print('decision tree:',
'\n mean train MSE:', np.mean([item['train'] for item in decision_tree_scores['norm1']]),
'\n mean test MSE:', np.mean([item['test'] for item in decision_tree_scores['norm1']]))
alg1_norm1_score = [item['total'] for item in alg1_scores['norm1']]
alg1_norm2_score = [item['total'] for item in alg1_scores['norm2']]
alg1_mocha_score = [item['total'] for item in alg1_scores['mocha']]
linear_regression_score = [item['total'] for item in linear_regression_scores['norm1']]
decision_tree_score = [item['total'] for item in decision_tree_scores['norm1']]
mean_MSEs = [
np.mean(alg1_norm1_score),
np.mean(alg1_norm2_score),
np.mean(alg1_mocha_score),
np.mean(linear_regression_score),
np.mean(decision_tree_score)
]
std_MSEs = [
np.std(alg1_norm1_score),
np.std(alg1_norm2_score),
np.std(alg1_mocha_score),
np.std(linear_regression_score),
np.std(decision_tree_score)]
fig, ax = plt.subplots()
ax.bar(x_pos, mean_MSEs,
yerr=std_MSEs,
align='center',
alpha=0.5,
ecolor='black',
capsize=20)
ax.set_ylabel('MSE')
ax.set_xticks(x_pos)
ax.set_xticklabels(labels)
ax.set_yscale('log')
ax.set_title('error bars plot')
plt.show()
plt.close()
```
Plot the MSE with respect to the different noise standard deviations (0.01, 0.1, 1.0) for each penalty function, as you can see algorithm 1 is somehow robust to the noise.
```
%time
import random
import matplotlib.pyplot as plt
PENALTY_FUNCS = ['norm1', 'norm2', 'mocha']
lambda_lasso = 0.01
K = 20
sampling_ratio = 0.6
pouts = [0.01, 0.1, 0.2, 0.4, 0.6]
colors = ['steelblue', 'darkorange', 'green']
for penalty_func in PENALTY_FUNCS:
print('penalty_func:', penalty_func)
for i, noise in enumerate([0.01, 0.1, 1.0]):
MSEs_mean = {}
MSEs_std = {}
for pout in pouts:
num_tries = 5
pout_mses = []
for j in range(num_tries):
B, weight_vec, true_labels, datapoints = get_sbm_2blocks_data(pin=0.5, pout=pout, noise_sd=noise, is_torch_model=False)
E, N = B.shape
samplingset = random.sample([i for i in range(N)], k=int(sampling_ratio * N))
_, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func)
alg1_score, _, _ = get_scores(datapoints, predicted_w, samplingset)
pout_mses.append(alg1_score['total'])
MSEs_mean[pout] = np.mean(pout_mses)
MSEs_std[pout] = np.std(pout_mses)
plt.errorbar(list(MSEs_mean.keys()), list(MSEs_mean.values()), yerr=list(MSEs_std.values()),
ecolor=colors[i], capsize=3,
label='noise=' + str(noise), c=colors[i])
print('noise', noise)
print(' MSEs:', MSEs_mean)
plt.xlabel('p_out')
plt.ylabel('MSE')
plt.legend(loc='best')
plt.title('Penalty function : %s' % penalty_func)
plt.show()
plt.close()
```
Plot the MSE with respect to the different sampling ratios (0.2, 0.4, 0.6) for each penalty function
```
import random
import matplotlib.pyplot as plt
PENALTY_FUNCS = ['norm1', 'norm2', 'mocha']
lambda_lasso = 0.01
K = 30
sampling_ratio = 0.6
pouts = [0.01, 0.1, 0.2, 0.4, 0.6]
colors = ['steelblue', 'darkorange', 'green']
for penalty_func in PENALTY_FUNCS:
print('penalty_func:', penalty_func)
for i, sampling_ratio in enumerate([0.2, 0.4, 0.6]):
MSEs_mean = {}
MSEs_std = {}
for pout in pouts:
num_tries = 5
pout_mses = []
for j in range(num_tries):
B, weight_vec, true_labels, datapoints = get_sbm_2blocks_data(pin=0.5, pout=pout, is_torch_model=False)
E, N = B.shape
samplingset = random.sample([i for i in range(N)], k=int(sampling_ratio * N))
_, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func)
alg1_score, _, _ = get_scores(datapoints, predicted_w, samplingset)
pout_mses.append(alg1_score['total'])
MSEs_mean[pout] = np.mean(pout_mses)
MSEs_std[pout] = np.std(pout_mses)
plt.errorbar(list(MSEs_mean.keys()), list(MSEs_mean.values()), yerr=list(MSEs_std.values()),
ecolor=colors[i], capsize=3,
label='M=' + str(sampling_ratio), c=colors[i])
print('M:', sampling_ratio)
print('MSE:', MSEs_mean)
plt.xlabel('p_out')
plt.ylabel('MSE')
plt.legend(loc='best')
plt.title('Penalty function : %s' % penalty_func)
plt.show()
plt.close()
```
### SBM with Five Clusters
The size of the clusters are {70, 10, 50, 100, 150}
with random weight vectors $\in R^2$ selected uniformly from $[0,1)$.
We run Algorithm 1 with a fixed `pin = 0.5` and `pout = 0.001`,
and a fixed number of 1000 iterations. Each node $i \in V$ represents a local dataset consisting of feature vectors $x^{(i,1)}, ... , x^{(i,5)} \in R^2$.
The feature vectors are i.i.d. realizations of a standard Gaussian random vector x ~ N(0,I).
The labels $y_1^{(i)}, . . . , y_5^{(i)} \in R$ for each node $i \in V$
are generated according to the linear model $y_r^{(i)} = (x^{(i, r)})^T w^{(i)} + \epsilon$, with $\epsilon = 0$. The tuning parameter $\lambda$ in algorithm1
is manually chosen, guided by the resulting MSE, as $\lambda=0.01$ for norm1 and norm2 and also $\lambda=0.05$ for mocha penalty function.
We assume that labels $y^{(i)}$ are available for 20% of the graph nodes. We randomly choose the training set M
and use the rest as test set.
As the result we compare the mean MSE of Algorithm 1 with plain linear regression
and decision tree regression with respect to the different random sampling sets.
```
from graspy.simulations import sbm
def get_sbm_5blocks_data(m=5, n=2, pin=0.5, pout=0.01, noise_sd=0, is_torch_model=True):
'''
:param m, n: shape of features vector for each node
:param pin: the probability of edges inside each cluster
:param pout: the probability of edges between the clusters
:param noise_sd: the standard deviation of the noise for calculating the labels
:return B: adjacency matrix of the graph
:return weight_vec: a list containing the edges's weights of the graph
:return true_labels: a list containing the true labels of the nodes
:return datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1
'''
cluster_sizes = [70, 10, 50, 100, 150]
p = [[pin if i==j else pout for i in range(len(cluster_sizes))] for j in range(len(cluster_sizes))]
# generate graph G which is a SBM wich 2 clusters
G = sbm(n=cluster_sizes, p=p)
'''
G: generated SBM graph with 2 clusters
'''
# define weight vectors for each cluster of the graph
W = []
for i in range(len(cluster_sizes)):
# the weigh vector for the ith cluster
W.append(np.random.random(n))
return get_sbm_data(cluster_sizes, G, W, m, n, noise_sd, is_torch_model)
import random
PENALTY_FUNCS = ['norm1', 'norm2', 'mocha']
LAMBDA_LASSO = {'norm1': 0.01, 'norm2': 0.01, 'mocha': 0.05}
K = 1000
B, weight_vec, true_labels, datapoints = get_sbm_5blocks_data(pin=0.5, pout=0.001, is_torch_model=False)
E, N = B.shape
alg1_scores = defaultdict(list)
linear_regression_scores = defaultdict(list)
decision_tree_scores = defaultdict(list)
num_tries = 5
for i in range(num_tries):
samplingset = random.sample([i for i in range(N)], k=int(0.2* N))
for penalty_func in PENALTY_FUNCS:
lambda_lasso = LAMBDA_LASSO[penalty_func]
_, predicted_w = algorithm_1(K, B, weight_vec, datapoints, true_labels, samplingset, lambda_lasso, penalty_func)
alg1_score, linear_regression_score, decision_tree_score = get_scores(datapoints, predicted_w, samplingset)
alg1_scores[penalty_func].append(alg1_score)
linear_regression_scores[penalty_func].append(linear_regression_score)
decision_tree_scores[penalty_func].append(decision_tree_score)
labels = ['alg1,norm1', 'alg1,norm2', 'alg1,mocha', 'linear reg', 'decision tree']
x_pos = np.arange(len(labels))
print('algorithm 1, norm1:',
'\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm1']]),
'\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm1']]))
print('algorithm 1, norm2:',
'\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['norm2']]),
'\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['norm2']]))
print('algorithm 1, mocha:',
'\n mean train MSE:', np.mean([item['train'] for item in alg1_scores['mocha']]),
'\n mean test MSE:', np.mean([item['test'] for item in alg1_scores['mocha']]))
print('linear regression:',
'\n mean train MSE:', np.mean([item['train'] for item in linear_regression_scores['norm1']]),
'\n mean test MSE:', np.mean([item['test'] for item in linear_regression_scores['norm1']]))
print('decision tree:',
'\n mean train MSE:', np.mean([item['train'] for item in decision_tree_scores['norm1']]),
'\n mean test MSE:', np.mean([item['test'] for item in decision_tree_scores['norm1']]))
alg1_norm1_score = [item['total'] for item in alg1_scores['norm1']]
alg1_norm2_score = [item['total'] for item in alg1_scores['norm2']]
alg1_mocha_score = [item['total'] for item in alg1_scores['mocha']]
linear_regression_score = [item['total'] for item in linear_regression_scores['norm1']]
decision_tree_score = [item['total'] for item in decision_tree_scores['norm1']]
mean_MSEs = [
np.mean(alg1_norm1_score),
np.mean(alg1_norm2_score),
np.mean(alg1_mocha_score),
np.mean(linear_regression_score),
np.mean(decision_tree_score)
]
std_MSEs = [
np.std(alg1_norm1_score),
np.std(alg1_norm2_score),
np.std(alg1_mocha_score),
np.std(linear_regression_score),
np.std(decision_tree_score)]
fig, ax = plt.subplots()
ax.bar(x_pos, mean_MSEs,
yerr=std_MSEs,
align='center',
alpha=0.5,
ecolor='black',
capsize=20)
ax.set_ylabel('MSE')
ax.set_xticks(x_pos)
ax.set_xticklabels(labels)
ax.set_yscale('log')
ax.set_title('error bars plot')
plt.show()
plt.close()
import scipy
version = scipy.version.version
print(version)
```
| github_jupyter |
```
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# E2E ML on GCP: MLOps stage 3 : formalization: get started with custom training pipeline components
<table align="left">
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/ml_ops_stage3/get_started_with_custom_training_pipeline_components.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/ml_ops_stage3/get_started_with_custom_training_pipeline_components.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 3 : formalization: get started with custom training pipeline components.
### Dataset
The dataset used for this tutorial is the [Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip.
## Installations
Install *one time* the packages for executing the MLOps notebooks.
```
ONCE_ONLY = False
if ONCE_ONLY:
! pip3 install -U tensorflow==2.5 $USER_FLAG
! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG
! pip3 install -U tensorflow-transform==1.2 $USER_FLAG
! pip3 install -U tensorflow-io==0.18 $USER_FLAG
! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG
! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG
! pip3 install --upgrade google-cloud-bigquery $USER_FLAG
! pip3 install --upgrade google-cloud-logging $USER_FLAG
! pip3 install --upgrade apache-beam[gcp] $USER_FLAG
! pip3 install --upgrade pyarrow $USER_FLAG
! pip3 install --upgrade cloudml-hypertune $USER_FLAG
! pip3 install --upgrade kfp $USER_FLAG
```
### Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
```
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
#### Set your project ID
**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
#### Service Account
**If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.
```
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].strip()
print("Service Account:", SERVICE_ACCOUNT)
```
#### Set service account access for Vertex AI Pipelines
Run the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.
```
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME
! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
```
import google.cloud.aiplatform as aip
import json
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import component
```
### Initialize Vertex AI SDK for Python
Initialize the Vertex AI SDK for Python for your project and corresponding bucket.
```
aip.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_NAME)
```
#### Set hardware accelerators
You can set hardware accelerators for training and prediction.
Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
Otherwise specify `(None, None)` to use a container image to run on a CPU.
Learn more about [hardware accelerator support for your region](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators).
*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3. This is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
```
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
```
#### Set pre-built containers
Set the pre-built Docker container image for training and prediction.
For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).
```
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2.5".replace(".", "-")
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
DEPLOY_IMAGE = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format(
REGION.split("-")[0], DEPLOY_VERSION
)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
```
#### Set machine type
Next, set the machine type to use for training and prediction.
- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: The following is not supported for training:*
- `standard`: 2 vCPUs
- `highcpu`: 2, 4 and 8 vCPUs
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
```
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
```
#### Location of Cloud Storage training data.
Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.
```
IMPORT_FILE = (
"gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv"
)
```
### Examine the training package
#### Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
- PKG-INFO
- README.md
- setup.cfg
- setup.py
- trainer
- \_\_init\_\_.py
- task.py
The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.
The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).
#### Package Assembly
In the following cells, you will assemble the training package.
```
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow==2.5.0',\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Flowers image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
```
### Create the task script for the Python training package
Next, you create the `task.py` script for driving the training package. Some noteable steps include:
- Command-line arguments:
- `data-format` The format of the data. In this example, the data is exported from an `ImageDataSet` and will be in a JSONL format.
- `train-data-dir`, `val-data-dir`, `test-data-dir`: The Cloud Storage locations of the train, validation and test data. When using Vertex AI custom training, these locations will be specified in the corresponding environment variables: `AIP_TRAINING_DATA_URI`, `AIP_VALIDATION_DATA_URI`, and `AIP_TEST_DATA_URI`.
- `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`,
- `distributr`: single, mirrored or distributed training strategy.
- Data preprocessing (`get_data()`):
- Compiles the one or more JSONL data files for a dataset, and constructs a `tf.data.Dataset()` generator for data preprocessing and model feeding.
- Model architecture (`get_model()`):
- Builds the corresponding model architecture.
- Training (`train_model()`):
- Trains the model
- Model artifact saving
- Saves the model artifacts where the Cloud Storage location is determined based on the type of distribution training strategy.
```
%%writefile custom/trainer/task.py
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
import json
import logging
import tqdm
def parse_args():
parser = argparse.ArgumentParser(description="TF.Keras Image Classification")
# data source
parser.add_argument("--data-format", default=os.getenv('AIP_DATA_FORMAT'), dest="data_format", type=str, help="data format")
parser.add_argument("--train-data-dir", default=os.getenv('AIP_TRAINING_DATA_URI'), dest="train_data_dir", type=str, help="train data directory")
parser.add_argument("--val-data-dir", default=os.getenv('AIP_VALIDATION_DATA_URI'), dest="val_data_dir", type=str, help="validation data directory")
parser.add_argument("--test-data-dir", default=os.getenv('AIP_TEST_DATA_URI'), dest="test_data_dir", type=str, help="test data directory")
# data preprocessing
parser.add_argument("--image-width", dest="image_width", default=32, type=int, help="image width")
parser.add_argument("--image-height", dest="image_height", default=32, type=int, help="image height")
# model artifact location
parser.add_argument(
"--model-dir",
default=os.getenv("AIP_MODEL_DIR"),
type=str,
help="model directory",
)
# training hyperparameters
parser.add_argument(
"--lr", dest="lr", default=0.01, type=float, help="Learning rate."
)
parser.add_argument("--batch-size", default=16, type=int, help="mini-batch size")
parser.add_argument(
"--epochs", default=10, type=int, help="number of training epochs"
)
parser.add_argument(
"--steps",
dest="steps",
default=200,
type=int,
help="Number of steps per epoch.",
)
parser.add_argument(
"--distribute",
dest="distribute",
type=str,
default="single",
help="distributed training strategy",
)
args = parser.parse_args()
return args
args = parse_args()
logging.getLogger().setLevel(logging.DEBUG)
logging.info('DEVICES' + str(device_lib.list_local_devices()))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
logging.info("Single device training")
# Single Machine, multiple compute device
elif args.distribute == 'mirrored':
strategy = tf.distribute.MirroredStrategy()
logging.info("Mirrored Strategy distributed training")
# Multi Machine, multiple compute device
elif args.distribute == 'multiworker':
strategy = tf.distribute.MultiWorkerMirroredStrategy()
logging.info("Multi-worker Strategy distributed training")
logging.info('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
logging.info('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
NUM_WORKERS = strategy.num_replicas_in_sync
GLOBAL_BATCH_SIZE = args.batch_size * NUM_WORKERS
def _is_chief(task_type, task_id):
''' Check for primary if multiworker training
'''
return (task_type == 'chief') or (task_type == 'worker' and task_id == 0) or task_type is None
def get_data():
logging.info('DATA_FORMAT ' + args.data_format)
logging.info('TRAINING_DATA_URI ' + args.train_data_dir)
logging.info('VALIDATION_DATA_URI ' + args.val_data_dir)
logging.info('TEST_DATA_URI ' + args.test_data_dir)
class_names = ["daisy", "dandelion", "roses", "sunflowers", "tulips"]
class_indices = dict(zip(class_names, range(len(class_names))))
num_classes = len(class_names)
GLOBAL_BATCH_SIZE = args.batch_size * NUM_WORKERS
def parse_image(filename):
image = tf.io.read_file(filename)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [args.image_width, args.image_height])
return image
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
def extract(data_dir, batch_size=GLOBAL_BATCH_SIZE, repeat=True):
data = []
labels = []
for data_uri in tqdm.tqdm(tf.io.gfile.glob(pattern=data_dir)):
with tf.io.gfile.GFile(name=data_uri, mode="r") as gfile:
for line in gfile.readlines():
instance = json.loads(line)
data.append(instance["imageGcsUri"])
classification_annotation = instance["classificationAnnotations"][0]
label = classification_annotation["displayName"]
labels.append(class_indices[label])
data_dataset = tf.data.Dataset.from_tensor_slices(data)
data_dataset = data_dataset.map(
parse_image, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
label_dataset = tf.data.Dataset.from_tensor_slices(labels)
label_dataset = label_dataset.map(lambda x: tf.one_hot(x, num_classes))
dataset = tf.data.Dataset.zip((data_dataset, label_dataset)).map(scale).cache().shuffle(batch_size * 32)
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
# Add property to retain the class names
dataset.class_names = class_names
return dataset
logging.info('Prepare training data')
train_dataset = extract(args.train_data_dir)
logging.info('Prepare validation data')
val_dataset = extract(args.val_data_dir, batch_size=1, repeat=False)
return num_classes, train_dataset, val_dataset
def get_model(num_classes):
logging.info("Get model architecture")
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
32, 3, activation="relu", input_shape=(args.image_width, args.image_height, 3)
),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation="relu"),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(num_classes, activation="softmax"),
]
)
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=["accuracy"],
)
return model
def train_model(model, train_dataset, val_dataset):
logging.info("Start model training")
history = model.fit(
x=train_dataset, epochs=args.epochs, validation_data=val_dataset, steps_per_epoch=args.steps
)
return history
num_classes, train_dataset, val_dataset = get_data()
with strategy.scope():
model = get_model(num_classes=num_classes)
history = train_model(model, train_dataset, val_dataset)
logging.info("Save the model to: " + args.model_dir)
if args.distribute == 'multiworker':
task_type, task_id = (strategy.cluster_resolver.task_type,
strategy.cluster_resolver.task_id)
else:
task_type, task_id = None, None
# single, mirrored or primary for multiworker
if _is_chief(task_type, task_id):
model.save(args.model_dir)
# non-primary workers for multi-workers
else:
# each worker saves their model instance to a unique temp location
worker_dir = args.model_dir + '/workertemp_' + str(task_id)
tf.io.gfile.makedirs(worker_dir)
model.save(worker_dir)
```
#### Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
```
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_flowers.tar.gz
!gsutil ls gs://andy-1234-221921aip-20211201001323/pipeline_root/custom_icn_training/aiplatform-custom-training-2021-12-01-00:39:25.109/dataset-899163017009168384-image_classification_multi_label-2021-12-01T00:39:26.044880Z/
```
## Construct custom training pipeline
In the example below, you construct a pipeline for training a custom model using pre-built Google Cloud Pipeline Components for Vertex AI Training, as follows:
1. Pipeline arguments, specify the locations of:
- `import_file`: The CSV index file for the dataset.
- `python_package`: The custom training Python package.
- `python_module`: The entry module in the package to execute.
2. Use the prebuilt component `ImageDatasetCreateOp` to create a Vertex AI Dataset resource, where:
- The display name for the dataset is passed into the pipeline.
- The import file for the dataset is passed into the pipeline.
- The component returns the dataset resource as `outputs["dataset"]`
3. Use the prebuilt component `CustomPythonPackageTrainingJobRunOp` to train a custom model and upload the custom model as a Vertex AI Model resource, where:
- The display name for the dataset is passed into the pipeline.
- The dataset is the output from the `ImageDatasetCreateOp`.
- The python package, command line argument are passed into the pipeline.
- The training and serving containers are specified in the pipeline definition.
- The component returns the model resource as `outputs["model"]`.
4. Use the prebuilt component `EndpointCreateOp` to create a Vertex AI Endpoint to deploy the trained model to, where:
- Since the component has no dependencies on other components, by default it would be executed in parallel with the model training.
- The `after(training_op)` is added to serialize its execution, so its only executed if the training operation completes successfully.
- The component returns the endpoint resource as `outputs["endpoint"]`.
5. Use the prebuilt component `ModelDeployOp` to deploy the trained Vertex AI model to, where:
- The display name for the dataset is passed into the pipeline.
- The model is the output from the `CustomPythonPackageTrainingJobRunOp`.
- The endpoint is the output from the `EndpointCreateOp`
*Note:* Since each component is executed as a graph node in its own execution context, you pass the parameter `project` for each component op, in constrast to doing a `aip.init(project=project)` if this was a Python script calling the SDK methods directly within the same execution context.
```
from google_cloud_pipeline_components import aiplatform as gcc_aip
PIPELINE_ROOT = "{}/pipeline_root/custom_icn_training".format(BUCKET_NAME)
@dsl.pipeline(
name="custom-icn-training", description="Custom image classification training"
)
def pipeline(
import_file: str,
display_name: str,
python_package: str,
python_module: str,
project: str = PROJECT_ID,
region: str = REGION,
):
dataset_op = gcc_aip.ImageDatasetCreateOp(
project=project,
display_name=display_name,
gcs_source=import_file,
import_schema_uri=aip.schema.dataset.ioformat.image.single_label_classification,
)
training_op = gcc_aip.CustomPythonPackageTrainingJobRunOp(
project=project,
display_name=display_name,
dataset=dataset_op.outputs["dataset"],
# Training
python_package_gcs_uri=python_package,
python_module_name=python_module,
container_uri=TRAIN_IMAGE,
staging_bucket=PIPELINE_ROOT,
annotation_schema_uri=aip.schema.dataset.annotation.image.classification,
args=["--epochs", "50", "--image-width", "32", "--image-height", "32"],
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
# Serving - As part of this operation, the model is registered to Vertex AI
model_serving_container_image_uri=DEPLOY_IMAGE,
model_display_name=display_name,
)
endpoint_op = gcc_aip.EndpointCreateOp(
project=project,
location=region,
display_name=display_name,
).after(training_op)
deploy_op = gcc_aip.ModelDeployOp(
model=training_op.outputs["model"],
endpoint=endpoint_op.outputs["endpoint"],
dedicated_resources_min_replica_count=1,
dedicated_resources_max_replica_count=1,
dedicated_resources_machine_type="n1-standard-4",
)
```
### Compile and execute the pipeline
Next, you compile the pipeline and then exeute it. The pipeline takes the following parameters, which are passed as the dictionary `parameter_values`:
- `import_file`: The Cloud Storage path to the dataset index file.
- `display_name`: The display name for the generated Vertex AI resources.
- `python_package`: The Python package for the custom training job.
- `python_module`: The Python module in the package to execute.
- `project`: The project ID.
- `region`: The region.
```
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="custom_icn_training.json"
)
pipeline = aip.PipelineJob(
display_name="custom_icn_training",
template_path="custom_icn_training.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={
"import_file": IMPORT_FILE,
"display_name": "flowers" + TIMESTAMP,
"python_package": f"{BUCKET_NAME}/trainer_flowers.tar.gz",
"python_module": "trainer.task",
"project": PROJECT_ID,
"region": REGION,
},
)
pipeline.run()
! rm -f custom_icn_training.json
```
### Delete a pipeline job
After a pipeline job is completed, you can delete the pipeline job with the method `delete()`. Prior to completion, a pipeline job can be canceled with the method `cancel()`.
```
pipeline.delete()
```
# Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- AutoML Training Job
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline training job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom training job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
# SAMUR Emergency Frequencies
This notebook explores how the frequency of different types of emergency changes with time in relation to different periods (hours of the day, days of the week, months of the year...) and locations in Madrid. This will be useful for constructing a realistic emergency generator in the city simulation.
Let's start with some imports and setup, and then read the table.
```
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import yaml
%matplotlib inline
df = pd.read_csv("../data/emergency_data.csv")
df.head()
```
The column for the time of the call is a string, so let's change that into a timestamp.
```
df["time_call"] = pd.to_datetime(df["Solicitud"])
```
We will also need to assign a numerical code to each district of the city in order to properly vectorize the distribution an make it easier to work along with other parts of the project.
```
district_codes = {
'Centro': 1,
'Arganzuela': 2,
'Retiro': 3,
'Salamanca': 4,
'Chamartín': 5,
'Tetuán': 6,
'Chamberí': 7,
'Fuencarral - El Pardo': 8,
'Moncloa - Aravaca': 9,
'Latina': 10,
'Carabanchel': 11,
'Usera': 12,
'Puente de Vallecas': 13,
'Moratalaz': 14,
'Ciudad Lineal': 15,
'Hortaleza': 16,
'Villaverde': 17,
'Villa de Vallecas': 18,
'Vicálvaro': 19,
'San Blas - Canillejas': 20,
'Barajas': 21,
}
df["district_code"] = df.Distrito.apply(lambda x: district_codes[x])
```
Each emergency has already been assigned a severity level, depending on the nature of the reported emergency.
```
df["severity"] = df["Gravedad"]
```
We also need the hour, weekday and month of the event in order to assign it in the various distributions.
```
df["hour"] = df["time_call"].apply(lambda x: x.hour) # From 0 to 23
df["weekday"] = df["time_call"].apply(lambda x: x.weekday()+1) # From 1 (Mon) to 7 (Sun)
df["month"] = df["time_call"].apply(lambda x: x.month)
```
Let's also strip down the dataset to just the columns we need right now.
```
df = df[["district_code", "severity", "time_call", "hour", "weekday", "month"]]
df.head()
```
We are going to group the distributions by severity.
```
emergencies_per_grav = df.severity.value_counts().sort_index().rename("total_emergencies")
emergencies_per_grav
```
We will also need the global frequency of the emergencies:
```
total_seconds = (df.time_call.max()-df.time_call.min()).total_seconds()
frequencies_per_grav = (emergencies_per_grav / total_seconds).rename("emergency_frequencies")
frequencies_per_grav
```
Each emergency will need to be assigne a district. Assuming independent distribution of emergencies by district and time, each will be assigned to a district according to a global probability based on this dataset, as follows.
```
prob_per_district = (df.district_code.value_counts().sort_index()/df.district_code.value_counts().sum()).rename("distric_weight")
prob_per_district
```
In order to be able to simplify the generation of emergencies, we are going to assume that the distributions of emergencies per hour, per weekday and per month are independent, sharing no correlation. This is obiously not fully true, but it is a good approximation for the chosen time-frames.
```
hourly_dist = (df.hour.value_counts()/df.hour.value_counts().mean()).sort_index().rename("hourly_distribution")
daily_dist = (df.weekday.value_counts()/df.weekday.value_counts().mean()).sort_index().rename("daily_distribution")
monthly_dist = (df.month.value_counts()/df.month.value_counts().mean()).sort_index().rename("monthly_distribution")
```
We will actually make one of these per severity level.
This will allow us to modify the base emergency density of a given severity as follows:
```
def emergency_density(gravity, hour, weekday, month):
base_density = frequencies_per_grav[gravity]
density = base_density * hourly_dist[hour] * daily_dist[weekday] * monthly_dist[month]
return density
emergency_density(3, 12, 4, 5) # Emergency frequency for severity level 3, at 12 hours of a thursday in May
```
In order for the model to read these distributions we will need to store them in a dict-like format, in this case YAML, which is easily readable by human or machine.
```
dists = {}
for severity in range(1, 6):
sub_df = df[df["severity"] == severity]
frequency = float(frequencies_per_grav.round(8)[severity])
hourly_dist = (sub_df.hour. value_counts()/sub_df.hour. value_counts().mean()).sort_index().round(5).to_dict()
daily_dist = (sub_df.weekday.value_counts()/sub_df.weekday.value_counts().mean()).sort_index().round(5).to_dict()
monthly_dist = (sub_df.month. value_counts()/sub_df.month. value_counts().mean()).sort_index().round(5).to_dict()
district_prob = (sub_df.district_code.value_counts()/sub_df.district_code.value_counts().sum()).sort_index().round(5).to_dict()
dists[severity] = {"frequency": frequency,
"hourly_dist": hourly_dist,
"daily_dist": daily_dist,
"monthly_dist": monthly_dist,
"district_prob": district_prob}
f = open("../data/distributions.yaml", "w+")
yaml.dump(dists, f, allow_unicode=True)
```
We can now check that the dictionary stored in the YAML file is the same one we have created.
```
with open("../data/distributions.yaml") as dist_file:
yaml_dict = yaml.safe_load(dist_file)
yaml_dict == dists
```
| github_jupyter |
```
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
lines = open('movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
conv_lines = open('movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
id2line = {}
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = [ ]
for line in conv_lines[:-1]:
_line = line.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
convs.append(_line.split(','))
questions = []
answers = []
for conv in convs:
for i in range(len(conv)-1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i+1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return ' '.join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
concat_from = ' '.join(short_questions+question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print('filtered vocab size:',len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from)/vocabulary_size_from,4)*100))
concat_to = ' '.join(short_answers+answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab from size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print('filtered vocab size:',len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to)/vocabulary_size_to,4)*100))
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(short_answers)):
short_answers[i] += ' EOS'
class Chatbot:
def __init__(self, size_layer, num_layers, embedded_size,
from_dict_size, to_dict_size, batch_size,
grad_clip=5.0, beam_width=5, force_teaching_ratio=0.5):
def cells(size, reuse=False):
return tf.nn.rnn_cell.GRUCell(size, reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32)
batch_size = tf.shape(self.X)[0]
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
self.encoder_out = tf.nn.embedding_lookup(encoder_embeddings, self.X)
def bahdanau(size):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(num_units = size,
memory = self.encoder_out)
return tf.contrib.seq2seq.AttentionWrapper(cell = cells(size),
attention_mechanism = attention_mechanism,
attention_layer_size = size)
def luong(size):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size,
memory = self.encoder_out)
return tf.contrib.seq2seq.AttentionWrapper(cell = cells(size),
attention_mechanism = attention_mechanism,
attention_layer_size = size)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = bahdanau(size_layer//2),
cell_bw = luong(size_layer//2),
inputs = self.encoder_out,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_embedded = tf.concat((out_fw, out_bw), 2)
bi_state = tf.concat((state_fw[0],state_bw[0]), -1)
encoder_state = tuple([bi_state] * num_layers)
dense = tf.layers.Dense(to_dict_size)
with tf.variable_scope('decode'):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = size_layer,
memory = self.encoder_out,
memory_sequence_length = self.X_seq_len)
luong_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units = size_layer,
memory = self.encoder_out,
memory_sequence_length = self.X_seq_len)
bahdanau_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
decoder_cells = tf.nn.rnn_cell.MultiRNNCell([luong_cells, bahdanau_cells])
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
training_helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
sequence_length = self.Y_seq_len,
embedding = decoder_embeddings,
sampling_probability = 1 - force_teaching_ratio,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cells,
helper = training_helper,
initial_state = decoder_cells.zero_state(batch_size, tf.float32),
output_layer = tf.layers.Dense(to_dict_size))
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = training_decoder,
impute_finished = True,
maximum_iterations = tf.reduce_max(self.Y_seq_len))
self.training_logits = training_decoder_output.rnn_output
with tf.variable_scope('decode', reuse=True):
encoder_out_tiled = tf.contrib.seq2seq.tile_batch(self.encoder_out, beam_width)
encoder_state_tiled = tf.contrib.seq2seq.tile_batch(encoder_state, beam_width)
X_seq_len_tiled = tf.contrib.seq2seq.tile_batch(self.X_seq_len, beam_width)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = size_layer,
memory = encoder_out_tiled,
memory_sequence_length = X_seq_len_tiled)
luong_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer,reuse=True) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units = size_layer,
memory = encoder_out_tiled,
memory_sequence_length = X_seq_len_tiled)
bahdanau_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer,reuse=True) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
decoder_cells = tf.nn.rnn_cell.MultiRNNCell([luong_cells, bahdanau_cells])
predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell = decoder_cells,
embedding = decoder_embeddings,
start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
end_token = EOS,
initial_state = decoder_cells.zero_state(batch_size * beam_width, tf.float32),
beam_width = beam_width,
output_layer = tf.layers.Dense(to_dict_size, _reuse=True),
length_penalty_weight = 0.0)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = predicting_decoder,
impute_finished = False,
maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
self.predicting_ids = predicting_decoder_output.predicted_ids[:, :, 0]
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, len(dictionary_from),
len(dictionary_to), batch_size,learning_rate)
sess.run(tf.global_variables_initializer())
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k,UNK))
X.append(ints)
return X
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, len(short_questions), batch_size):
index = min(k+batch_size, len(short_questions))
batch_x, seq_x = pad_sentence_batch(X[k: index], PAD)
batch_y, seq_y = pad_sentence_batch(Y[k: index], PAD)
predicted, accuracy,loss, _ = sess.run([model.predicting_ids,
model.accuracy, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y})
total_loss += loss
total_accuracy += accuracy
total_loss /= (len(short_questions) / batch_size)
total_accuracy /= (len(short_questions) / batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD)
batch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD)
predicted = sess.run(model.predicting_ids, feed_dict={model.X:batch_x})
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
```
| github_jupyter |
# S3Fs Notebook Example
S3Fs is a Pythonic file interface to S3. It builds on top of botocore.
The top-level class S3FileSystem holds connection information and allows typical file-system style operations like cp, mv, ls, du, glob, etc., as well as put/get of local files to/from S3.
The connection can be anonymous - in which case only publicly-available, read-only buckets are accessible - or via credentials explicitly supplied or in configuration files.
API Version 2021.06.0
https://buildmedia.readthedocs.org/media/pdf/s3fs/latest/s3fs.pdfhttps://buildmedia.readthedocs.org/media/pdf/s3fs/latest/s3fs.pdf
Note: If you get errors like `ModuleNotFoundError: No module named 's3fs'`, try `pip install s3fs` in a terminal and then restart your notebook:
```
import json
import os
import s3fs
```
Load the credentials file .json to make a connection to `S3FileSystem`
```
tenant="standard"
with open(f'/vault/secrets/minio-{tenant}-tenant-1.json') as f:
creds = json.load(f)
```
The connection can be anonymous- in which case only publicly-available, read-only buckets are accessible - or via credentials explicitly supplied or in configuration files.
Calling open() on a S3FileSystem (typically using a context manager) provides an S3File for read or write access to a particular key. The object emulates the standard File protocol (read, write, tell, seek), such that functions expecting a file can access S3.
```
HOST = creds['MINIO_URL']
SECURE = HOST.startswith('https')
fs = s3fs.S3FileSystem(
anon=False,
use_ssl=SECURE,
client_kwargs=
{
"region_name": "us-east-1",
"endpoint_url": creds['MINIO_URL'],
"aws_access_key_id": creds['AWS_ACCESS_KEY_ID'],
"aws_secret_access_key": creds['AWS_SECRET_ACCESS_KEY']
}
)
```
## Upload a file
Now that your personal bucket exists you can upload your files! We can use
`example.txt` from the same folder as this notebook.
**Note:** Bucket storage doesn't actually have real directories, so you won't
find any functions for creating them. But some software will show you a
directory structure by looking at the slashes (`/`) in the file names. We'll use
this to put `example.txt` under an `/s3fs-examples` faux directory.
```
# Desired location in the bucket
#NB_NAMESPACE: namespace of user e.g. rohan-katkar
LOCAL_FILE='example.txt'
REMOTE_FILE= os.environ['NB_NAMESPACE']+'/s3fs-examples/Happy-DAaaS-Bird.txt'
fs.put(LOCAL_FILE,REMOTE_FILE)
```
## Check path exists in bucket
```
fs.exists(os.environ['NB_NAMESPACE']+'/s3fs-examples')
```
## List objects in bucket
```
fs.ls(os.environ['NB_NAMESPACE'])
```
## List objects in path
```
x = []
x= fs.ls(os.environ['NB_NAMESPACE'] +'/s3fs-examples')
for obj in x:
print(f'Name: {obj}')
```
## Download a file
There is another method `download(rpath, lpath[, recursive])`. S3Fs has issues with this method. Get is an equivalent method.
```
from shutil import copyfileobj
DL_FILE='downloaded_s3fsexample.txt'
fs.get(os.environ['NB_NAMESPACE']+'/s3fs-examples/Happy-DAaaS-Bird.txt', DL_FILE)
with open(DL_FILE, 'r') as file:
print(file.read())
```
# That's it!
You've seen how to upload, list, and download files. You can do more things! For
more advanced usage, check out the full API documentation for the
[S3Fs Python SDK](https://s3fs.readthedocs.io/en/latest/api.html).
And don't forget that you can also do this all on the commandline with `mc`.
| github_jupyter |
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
```
# Imports and Functions
```
import numpy as np
from scipy.stats import special_ortho_group
from scipy.spatial.transform import Rotation
from scipy.linalg import svd
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
FIGURE_SCALE = 1.0
FONT_SIZE = 20
plt.rcParams.update({
'figure.figsize': np.array((8, 6)) * FIGURE_SCALE,
'axes.labelsize': FONT_SIZE,
'axes.titlesize': FONT_SIZE,
'xtick.labelsize': FONT_SIZE,
'ytick.labelsize': FONT_SIZE,
'legend.fontsize': FONT_SIZE,
'lines.linewidth': 3,
'lines.markersize': 10,
})
def SO3_via_svd(A):
"""Map 3x3 matrix onto SO(3) via SVD."""
u, s, vt = np.linalg.svd(A)
s_SO3 = [1, 1, np.sign(np.linalg.det(np.matmul(u, vt)))]
return np.matmul(np.matmul(u, np.diag(s_SO3)), vt)
def SO3_via_gramschmidt(A):
"""Map 3x3 matrix on SO(3) via GS, ignores last column."""
x_normalized = A[:, 0] / np.linalg.norm(A[:, 0])
z = np.cross(x_normalized, A[:, 1])
z_normalized = z / np.linalg.norm(z)
y_normalized = np.cross(z_normalized, x_normalized)
return np.stack([x_normalized, y_normalized, z_normalized], axis=1)
def rotate_from_z(v):
"""Construct a rotation matrix R such that R * [0,0,||v||]^T = v.
Input v is shape (3,), output shape is 3x3 """
vn = v / np.linalg.norm(v)
theta = np.arccos(vn[2])
phi = np.arctan2(vn[1], vn[0])
r = Rotation.from_euler('zyz', [0, theta, phi])
R = np.squeeze(r.as_dcm()) # Maps Z to vn
return R
def perturb_rotation_matrix(R, kappa):
"""Perturb a random rotation matrix with noise.
Noise is random small rotation applied to each of the three
column vectors of R. Angle of rotation is sampled from the
von-Mises distribution on the circle (with uniform random azimuth).
The von-Mises distribution is analagous to Gaussian distribution on the circle.
Note, the concentration parameter kappa is inversely related to variance,
so higher kappa means less variance, less noise applied. Good ranges for
kappa are 64 (high noise) up to 512 (low noise).
"""
R_perturb = []
theta = np.random.vonmises(mu=0.0, kappa=kappa, size=(3,))
phi = np.random.uniform(low=0.0, high=np.pi*2.0, size=(3,))
for i in range(3):
v = R[:, i]
R_z_to_v = rotate_from_z(v)
r_noise_z = np.squeeze(Rotation.from_euler('zyz', [0, theta[i], phi[i]]).as_dcm())
v_perturb = np.matmul(R_z_to_v, np.matmul(r_noise_z, np.array([0,0,1])))
R_perturb.append(v_perturb)
R_perturb = np.stack(R_perturb, axis=-1)
return R_perturb
def sigma_to_kappa(sigma):
return ((0.5 - sigma) * 1024) + 64
# We create a ground truth special orthogonal matrix and perturb it with
# additive noise. We then see which orthogonalization process (SVD or GS) is
# better at recovering the ground truth matrix.
def run_expt(sigmas, num_trials, noise_type='gaussian'):
# Always use identity as ground truth, or pick random matrix.
# Nothing should change if we pick random (can verify by setting to True) since
# SVD and Gram-Schmidt are both Equivariant to rotations.
pick_random_ground_truth=False
all_errs_svd = []
all_errs_gs = []
all_geo_errs_svd = []
all_geo_errs_gs = []
all_noise_norms = []
all_noise_sq_norms = []
for sig in sigmas:
svd_errors = np.zeros(num_trials)
gs_errors = np.zeros(num_trials)
svd_geo_errors = np.zeros(num_trials)
gs_geo_errors = np.zeros(num_trials)
noise_norms = np.zeros(num_trials)
noise_sq_norms = np.zeros(num_trials)
for t in range(num_trials):
if pick_random_ground_truth:
A = special_ortho_group.rvs(3) # Pick a random ground truth matrix
else:
A = np.eye(3) # Our ground truth matrix in SO(3)
N = None
if noise_type == 'gaussian':
N = np.random.standard_normal(size=(3,3)) * sig
if noise_type == 'uniform':
N = np.random.uniform(-1, 1, (3, 3)) * sig
if noise_type == 'rademacher':
N = np.sign(np.random.uniform(-1, 1, (3, 3))) * sig
if noise_type == 'rotation':
A_perturb = perturb_rotation_matrix(A, kappa=sigma_to_kappa(sig))
N = A_perturb - A
if N is None:
print ('Error: unknown noise_type: %s', noise_type)
return
AplusN = A + N # Ground-truth plus noise
noise_norm = np.linalg.norm(N)
noise_norm_sq = noise_norm**2
# Compute SVD result and error.
res_svd = SO3_via_svd(AplusN)
error_svd = np.linalg.norm(res_svd - A, ord='fro')**2
error_geodesic_svd = np.arccos(
(np.trace(np.matmul(np.transpose(res_svd), A))-1.0)/2.0);
# Compute GS result and error.
res_gs = SO3_via_gramschmidt(AplusN)
error_gs = np.linalg.norm(res_gs - A, ord='fro')**2
error_geodesic_gs = np.arccos(
(np.trace(np.matmul(np.transpose(res_gs), A))-1.0)/2.0);
svd_errors[t] = error_svd
gs_errors[t] = error_gs
svd_geo_errors[t] = error_geodesic_svd
gs_geo_errors[t] = error_geodesic_gs
noise_norms[t] = noise_norm
noise_sq_norms[t] = noise_norm_sq
all_errs_svd.append(svd_errors)
all_errs_gs.append(gs_errors)
all_geo_errs_svd.append(svd_geo_errors)
all_geo_errs_gs.append(gs_geo_errors)
all_noise_norms.append(noise_norms)
all_noise_sq_norms.append(noise_sq_norms)
print('finished sigma = %f / kappa = %f' % (sig, sigma_to_kappa(sig)))
return [np.array(x) for x in (
all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms)]
boxprops = dict(linewidth=2)
medianprops = dict(linewidth=2)
whiskerprops = dict(linewidth=2)
capprops = dict(linewidth=2)
def make_diff_plot(svd_errs, gs_errs, xvalues, title='', ytitle='', xtitle=''):
plt.figure(figsize=(8,6))
plt.title(title, fontsize=16)
diff = gs_errs - svd_errs
step_size = np.abs(xvalues[1] - xvalues[0])
plt.boxplot(diff.T, positions=xvalues, widths=step_size/2, whis=[5, 95],
boxprops=boxprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops,
showmeans=False, meanline=True, showfliers=False)
plt.plot(xvalues, np.max(diff, axis=1), 'kx', markeredgewidth=2)
plt.plot(xvalues, np.min(diff, axis=1), 'kx', markeredgewidth=2)
xlim = [np.min(xvalues) - (step_size / 3), np.max(xvalues) + (step_size / 3)]
plt.xlim(xlim)
plt.plot(xlim, [0, 0], 'k--', linewidth=1)
plt.xlabel(xtitle, fontsize=16)
plt.ylabel(ytitle, fontsize=16)
plt.tight_layout()
```
# Global Params
```
num_trials = 100000 # Num trials at each sigma
sigmas = np.linspace(0.125, 0.5, 4)
```
# Gaussian Noise
Here we generate a noise matrix with iid Gaussian entries drawn from
$\sigma N(0,1)$.
The "Frobenius Error Diff" shows the distributions of the error differences
$\|A - \textrm{GS}(\tilde A)\|_F^2 - \|A - \textrm{SVD}(\tilde A)\|_F^2$ for
different values of $\sigma$. The "Geodesic Error Diff" plot shows the
analagous data, but in terms of the geodesic error.
```
(all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms
) = run_expt(sigmas, num_trials, noise_type='gaussian')
plt.plot(sigmas,
3*sigmas**2,
'--b',
label='3 $\\sigma^2$')
plt.errorbar(sigmas,
all_errs_svd.mean(axis=1),
color='b',
label='E[$\\|\\|\\mathrm{SVD}^+(M) - R\\|\\|_F^2]$')
plt.plot(sigmas, 6*sigmas**2,
'--r',
label='6 $\\sigma^2$')
plt.errorbar(sigmas,
all_errs_gs.mean(axis=1),
color='r',
label='E[$\\|\\|\\mathrm{GS}^+(M) - R\\|\\|_F^2$]')
plt.xlabel('$\\sigma$')
plt.legend(loc='upper left')
make_diff_plot(all_errs_svd, all_errs_gs, sigmas, title='Gaussian Noise', ytitle='Frobenius Error Diff', xtitle='$\\sigma$')
make_diff_plot(all_geo_errs_svd, all_geo_errs_gs, sigmas, title='Gaussian Noise', ytitle='Geodesic Error Diff', xtitle='$\\sigma$')
```
# Uniform Noise
Here, the noise matrix is constructed with iid entries drawn from $\sigma \textrm{Unif}(-1, 1)$.
```
(all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms
) = run_expt(sigmas, num_trials, noise_type='uniform')
make_diff_plot(all_errs_svd, all_errs_gs, sigmas, title='Uniform Noise', ytitle='Frobenius Error Diff', xtitle='$\\phi$')
make_diff_plot(all_geo_errs_svd, all_geo_errs_gs, sigmas, title='Uniform Noise', ytitle='Geodesic Error Diff', xtitle='$\\phi$')
```
#Rotation Noise
```
(all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms
) = run_expt(sigmas, num_trials, noise_type='rotation')
make_diff_plot(all_errs_svd, all_errs_gs, sigma_to_kappa(sigmas), title='Rotation Noise', ytitle='Frobenius Error Diff', xtitle='$\\kappa$')
make_diff_plot(all_geo_errs_svd, all_geo_errs_gs, sigma_to_kappa(sigmas), title='Rotation Noise', ytitle='Geodesic Error Diff', xtitle='$\\kappa$')
```
| github_jupyter |
```
import matplotlib.pyplot as plt
from wildfires.analysis import *
from wildfires.data.datasets import *
new = NewERA5_DryDayPeriod()
old = ERA5_DryDayPeriod()
old.cubes = iris.cube.CubeList([old.cube[:20]])
iris.cube.CubeList([new.cube, old.cube]).realise_data()
diff = new.cube.data - old.cube.data
rel_abs_diff = np.mean(np.abs(diff) / old.cube.data, axis=0)
rel_diff = np.mean(diff / old.cube.data, axis=0)
cube_plotting(new.cube, fig=plt.figure(figsize=(15, 7)), log=True)
cube_plotting(old.cube, fig=plt.figure(figsize=(15, 7)), log=True)
cube_plotting(rel_abs_diff, fig=plt.figure(figsize=(15, 7)))
cube_plotting(rel_diff, cmap_midpoint=0, fig=plt.figure(figsize=(15, 7)))
np.where(rel_diff == np.min(rel_diff))
new.cube.coord("latitude").points[449], new.cube.coord("longitude").points[837]
plt.hist(diff.flatten(), bins=1000)
plt.yscale("log")
import glob
import os
from tqdm import tqdm
tpdir = os.path.join(DATA_DIR, "ERA5", "tp_daily")
# Sort so that time is increasing.
filenames = sorted(
glob.glob(os.path.join(tpdir, "**", "*_daily_mean.nc"), recursive=True)
)
precip_cubes = iris.cube.CubeList()
prev_dry_day_period = None
prev_end = None
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Collapsing a non-contiguous coordinate. Metadata may not "
"be fully descriptive for 'time'."
),
)
for filename in tqdm(filenames[:20]):
raw_cube = iris.load_cube(filename)
precip_cubes.append(raw_cube)
precip_cubes = homogenise_cube_attributes(precip_cubes)
all_combined = precip_cubes.concatenate_cube()
iris.cube.CubeList([all_combined]).realise_data()
combined = all_combined.intersection(latitude=(22.25, 22.26), longitude=(29.25, 29.26))
N = 400
plt.figure(figsize=(20, 8))
plt.plot(combined.data.flatten()[:N], marker="o", linestyle="")
plt.hlines(y=M_PER_HR_THRES, xmin=0, xmax=N)
plt.figure(figsize=(20, 8))
plt.plot(
old.cube.intersection(
latitude=(22.25, 22.26), longitude=(29.25, 29.26)
).data.flatten()[: N // 30],
marker="o",
linestyle="",
)
plt.figure(figsize=(20, 8))
plt.plot(
new.cube.intersection(
latitude=(22.25, 22.26), longitude=(29.25, 29.26)
).data.flatten()[: N // 30],
marker="o",
linestyle="",
)
np.where(rel_diff == np.max(rel_diff))
all_combined.shape, old.cube.shape, new.cube.shape
old.cube.coord("latitude").points[403]
old.cube.coord("longitude").points[660]
plt.figure(figsize=(20, 8))
data = all_combined.intersection(latitude=(10.75, 10.76), longitude=(-15, -14.9)).data
max_d = np.max(data)
below = data < M_PER_HR_THRES
plt.scatter(
list(range(len(data))), data, marker="o", c=["r" if b else "b" for b in below]
)
plt.hlines(y=M_PER_HR_THRES, xmin=0, xmax=all_combined.shape[0])
x = 0
for cube in precip_cubes:
d = cube.shape[0]
plt.vlines(x=[x, x + d], ymin=0, ymax=max_d, colors="g")
x += d
plt.figure(figsize=(20, 8))
plt.plot(old.cube.data[:, 403, 660], marker="o", linestyle="")
plt.figure(figsize=(20, 8))
plt.plot(new.cube.data[:, 403, 660], marker="o", linestyle="")
import scipy.ndimage
# Find contiguous blocks in the time dimension where dry_days is True.
structure = np.zeros((3,), dtype=np.int64)
structure[:] = 1
labelled = scipy.ndimage.label(below, structure=structure)
slices = scipy.ndimage.find_objects(labelled[0])
labelled
slices
```
| github_jupyter |
# Character-Level LSTM in PyTorch
In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**
This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
First let's load in our required resources for data loading and model creation.
```
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
```
## Load in Data
Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
```
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
```
Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
```
text[:100]
```
### Tokenization
In the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
```
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
```
And we can see those same characters from above, encoded as integers.
```
encoded[:100]
```
## Pre-processing the data
As you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
```
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
```
## Making training mini-batches
To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/[email protected]" width=500px>
<br>
In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.
### Creating Batches
**1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **
Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.
**2. After that, we need to split `arr` into $N$ batches. **
You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
**3. Now that we have this array, we can iterate through it to get our mini-batches. **
The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.
> **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**
```
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
batch_size_total = batch_size * seq_length
# total number of batches we can make
n_batches = len(arr)//batch_size_total
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size_total]
# Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
# iterate through the array, one sequence at a time
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
```
### Test Your Implementation
Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
```
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
```
If you implemented `get_batches` correctly, the above output should look something like
```
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
```
although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
---
## Defining the network with PyTorch
Below is where you'll define the network.
<img src="assets/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
### Model Structure
In `__init__` the suggested structure is as follows:
* Create and store the necessary dictionaries (this has been done for you)
* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
* Define a dropout layer with `drop_prob`
* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
* Finally, initialize the weights (again, this has been given)
Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
---
### LSTM Inputs/Outputs
You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
```python
self.lstm = nn.LSTM(input_size, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
```
where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
We also need to create an initial hidden state of all zeros. This is done like so
```python
self.init_hidden()
```
```
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
## TODO: define the LSTM
self.lstm = nn.LSTM(len(self.chars), n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## TODO: define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## TODO: define the final, fully-connected output layer
self.fc = nn.Linear(n_hidden, len(self.chars))
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## TODO: Get the outputs and the new hidden state from the lstm
r_output, hidden = self.lstm(x, hidden)
## TODO: pass through a dropout layer
out = self.dropout(r_output)
# Stack up LSTM outputs using view
# you may need to use contiguous to reshape the output
out = out.contiguous().view(-1, self.n_hidden)
## TODO: put x through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
```
## Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training:
>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
```
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length).long())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length).long())
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
```
## Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
```
# define and print the net
n_hidden=512
n_layers=2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
batch_size = 128
seq_length = 100
n_epochs = 20 # start smaller if you are just testing initial behavior
# train the model
train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)
```
## Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
## Hyperparameters
Here are the hyperparameters for the network.
In defining the model:
* `n_hidden` - The number of units in the hidden layers.
* `n_layers` - Number of hidden LSTM layers to use.
We assume that dropout probability and learning rate will be kept at the default, in this example.
And in training:
* `batch_size` - Number of sequences running through the network in one pass.
* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* `lr` - Learning rate for training
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
> ## Tips and Tricks
>### Monitoring Validation Loss vs. Training Loss
>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
> ### Approximate number of parameters
> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
> - The number of parameters in your model. This is printed when you start training.
> - The size of your dataset. 1MB file is approximately 1 million characters.
>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
> ### Best models strategy
>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
## Checkpoint
After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
```
# change the name, for saving multiple files
model_name = 'rnn_20_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
```
---
## Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
### A note on the `predict` function
The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
### Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
```
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
```
### Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
```
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='Anna', top_k=5))
```
## Loading a checkpoint
```
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_20_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# Sample using a loaded model
print(sample(loaded, 2000, top_k=5, prime="And Levin said"))
```
| github_jupyter |
# 1 - Sequence to Sequence Learning with Neural Networks
In this series we'll be building a machine learning model to go from once sequence to another, using PyTorch and torchtext. This will be done on German to English translations, but the models can be applied to any problem that involves going from one sequence to another, such as summarization, i.e. going from a sequence to a shorter sequence in the same language.
In this first notebook, we'll start simple to understand the general concepts by implementing the model from the [Sequence to Sequence Learning with Neural Networks](https://arxiv.org/abs/1409.3215) paper.
## Introduction
The most common sequence-to-sequence (seq2seq) models are *encoder-decoder* models, which commonly use a *recurrent neural network* (RNN) to *encode* the source (input) sentence into a single vector. In this notebook, we'll refer to this single vector as a *context vector*. We can think of the context vector as being an abstract representation of the entire input sentence. This vector is then *decoded* by a second RNN which learns to output the target (output) sentence by generating it one word at a time.

The above image shows an example translation. The input/source sentence, "guten morgen", is passed through the embedding layer (yellow) and then input into the encoder (green). We also append a *start of sequence* (`<sos>`) and *end of sequence* (`<eos>`) token to the start and end of sentence, respectively. At each time-step, the input to the encoder RNN is both the embedding, $e$, of the current word, $e(x_t)$, as well as the hidden state from the previous time-step, $h_{t-1}$, and the encoder RNN outputs a new hidden state $h_t$. We can think of the hidden state as a vector representation of the sentence so far. The RNN can be represented as a function of both of $e(x_t)$ and $h_{t-1}$:
$$h_t = \text{EncoderRNN}(e(x_t), h_{t-1})$$
We're using the term RNN generally here, it could be any recurrent architecture, such as an *LSTM* (Long Short-Term Memory) or a *GRU* (Gated Recurrent Unit).
Here, we have $X = \{x_1, x_2, ..., x_T\}$, where $x_1 = \text{<sos>}, x_2 = \text{guten}$, etc. The initial hidden state, $h_0$, is usually either initialized to zeros or a learned parameter.
Once the final word, $x_T$, has been passed into the RNN via the embedding layer, we use the final hidden state, $h_T$, as the context vector, i.e. $h_T = z$. This is a vector representation of the entire source sentence.
Now we have our context vector, $z$, we can start decoding it to get the output/target sentence, "good morning". Again, we append start and end of sequence tokens to the target sentence. At each time-step, the input to the decoder RNN (blue) is the embedding, $d$, of current word, $d(y_t)$, as well as the hidden state from the previous time-step, $s_{t-1}$, where the initial decoder hidden state, $s_0$, is the context vector, $s_0 = z = h_T$, i.e. the initial decoder hidden state is the final encoder hidden state. Thus, similar to the encoder, we can represent the decoder as:
$$s_t = \text{DecoderRNN}(d(y_t), s_{t-1})$$
Although the input/source embedding layer, $e$, and the output/target embedding layer, $d$, are both shown in yellow in the diagram they are two different embedding layers with their own parameters.
In the decoder, we need to go from the hidden state to an actual word, therefore at each time-step we use $s_t$ to predict (by passing it through a `Linear` layer, shown in purple) what we think is the next word in the sequence, $\hat{y}_t$.
$$\hat{y}_t = f(s_t)$$
The words in the decoder are always generated one after another, with one per time-step. We always use `<sos>` for the first input to the decoder, $y_1$, but for subsequent inputs, $y_{t>1}$, we will sometimes use the actual, ground truth next word in the sequence, $y_t$ and sometimes use the word predicted by our decoder, $\hat{y}_{t-1}$. This is called *teacher forcing*, see a bit more info about it [here](https://machinelearningmastery.com/teacher-forcing-for-recurrent-neural-networks/).
When training/testing our model, we always know how many words are in our target sentence, so we stop generating words once we hit that many. During inference it is common to keep generating words until the model outputs an `<eos>` token or after a certain amount of words have been generated.
Once we have our predicted target sentence, $\hat{Y} = \{ \hat{y}_1, \hat{y}_2, ..., \hat{y}_T \}$, we compare it against our actual target sentence, $Y = \{ y_1, y_2, ..., y_T \}$, to calculate our loss. We then use this loss to update all of the parameters in our model.
## Preparing Data
We'll be coding up the models in PyTorch and using torchtext to help us do all of the pre-processing required. We'll also be using spaCy to assist in the tokenization of the data.
```
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.legacy.datasets import Multi30k
from torchtext.legacy.data import Field, BucketIterator
import spacy
import numpy as np
import random
import math
import time
```
We'll set the random seeds for deterministic results.
```
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
```
Next, we'll create the tokenizers. A tokenizer is used to turn a string containing a sentence into a list of individual tokens that make up that string, e.g. "good morning!" becomes ["good", "morning", "!"]. We'll start talking about the sentences being a sequence of tokens from now, instead of saying they're a sequence of words. What's the difference? Well, "good" and "morning" are both words and tokens, but "!" is a token, not a word.
spaCy has model for each language ("de_core_news_sm" for German and "en_core_web_sm" for English) which need to be loaded so we can access the tokenizer of each model.
**Note**: the models must first be downloaded using the following on the command line:
```
python -m spacy download en_core_web_sm
python -m spacy download de_core_news_sm
```
We load the models as such:
```
spacy_de = spacy.load('de_core_news_sm')
spacy_en = spacy.load('en_core_web_sm')
```
Next, we create the tokenizer functions. These can be passed to torchtext and will take in the sentence as a string and return the sentence as a list of tokens.
In the paper we are implementing, they find it beneficial to reverse the order of the input which they believe "introduces many short term dependencies in the data that make the optimization problem much easier". We copy this by reversing the German sentence after it has been transformed into a list of tokens.
```
def tokenize_de(text):
"""
Tokenizes German text from a string into a list of strings (tokens) and reverses it
"""
return [tok.text for tok in spacy_de.tokenizer(text)][::-1]
def tokenize_en(text):
"""
Tokenizes English text from a string into a list of strings (tokens)
"""
return [tok.text for tok in spacy_en.tokenizer(text)]
```
torchtext's `Field`s handle how data should be processed. All of the possible arguments are detailed [here](https://github.com/pytorch/text/blob/master/torchtext/data/field.py#L61).
We set the `tokenize` argument to the correct tokenization function for each, with German being the `SRC` (source) field and English being the `TRG` (target) field. The field also appends the "start of sequence" and "end of sequence" tokens via the `init_token` and `eos_token` arguments, and converts all words to lowercase.
```
SRC = Field(tokenize = tokenize_de,
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = tokenize_en,
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
```
Next, we download and load the train, validation and test data.
The dataset we'll be using is the [Multi30k dataset](https://github.com/multi30k/dataset). This is a dataset with ~30,000 parallel English, German and French sentences, each with ~12 words per sentence.
`exts` specifies which languages to use as the source and target (source goes first) and `fields` specifies which field to use for the source and target.
```
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),
fields = (SRC, TRG))
```
We can double check that we've loaded the right number of examples:
```
print(f"Number of training examples: {len(train_data.examples)}")
print(f"Number of validation examples: {len(valid_data.examples)}")
print(f"Number of testing examples: {len(test_data.examples)}")
```
We can also print out an example, making sure the source sentence is reversed:
```
print(vars(train_data.examples[0]))
```
The period is at the beginning of the German (src) sentence, so it looks like the sentence has been correctly reversed.
Next, we'll build the *vocabulary* for the source and target languages. The vocabulary is used to associate each unique token with an index (an integer). The vocabularies of the source and target languages are distinct.
Using the `min_freq` argument, we only allow tokens that appear at least 2 times to appear in our vocabulary. Tokens that appear only once are converted into an `<unk>` (unknown) token.
It is important to note that our vocabulary should only be built from the training set and not the validation/test set. This prevents "information leakage" into our model, giving us artifically inflated validation/test scores.
```
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
print(f"Unique tokens in source (de) vocabulary: {len(SRC.vocab)}")
print(f"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}")
```
The final step of preparing the data is to create the iterators. These can be iterated on to return a batch of data which will have a `src` attribute (the PyTorch tensors containing a batch of numericalized source sentences) and a `trg` attribute (the PyTorch tensors containing a batch of numericalized target sentences). Numericalized is just a fancy way of saying they have been converted from a sequence of readable tokens to a sequence of corresponding indexes, using the vocabulary.
We also need to define a `torch.device`. This is used to tell torchText to put the tensors on the GPU or not. We use the `torch.cuda.is_available()` function, which will return `True` if a GPU is detected on our computer. We pass this `device` to the iterator.
When we get a batch of examples using an iterator we need to make sure that all of the source sentences are padded to the same length, the same with the target sentences. Luckily, torchText iterators handle this for us!
We use a `BucketIterator` instead of the standard `Iterator` as it creates batches in such a way that it minimizes the amount of padding in both the source and target sentences.
```
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 128
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
```
## Building the Seq2Seq Model
We'll be building our model in three parts. The encoder, the decoder and a seq2seq model that encapsulates the encoder and decoder and will provide a way to interface with each.
### Encoder
First, the encoder, a 2 layer LSTM. The paper we are implementing uses a 4-layer LSTM, but in the interest of training time we cut this down to 2-layers. The concept of multi-layer RNNs is easy to expand from 2 to 4 layers.
For a multi-layer RNN, the input sentence, $X$, after being embedded goes into the first (bottom) layer of the RNN and hidden states, $H=\{h_1, h_2, ..., h_T\}$, output by this layer are used as inputs to the RNN in the layer above. Thus, representing each layer with a superscript, the hidden states in the first layer are given by:
$$h_t^1 = \text{EncoderRNN}^1(e(x_t), h_{t-1}^1)$$
The hidden states in the second layer are given by:
$$h_t^2 = \text{EncoderRNN}^2(h_t^1, h_{t-1}^2)$$
Using a multi-layer RNN also means we'll also need an initial hidden state as input per layer, $h_0^l$, and we will also output a context vector per layer, $z^l$.
Without going into too much detail about LSTMs (see [this](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) blog post to learn more about them), all we need to know is that they're a type of RNN which instead of just taking in a hidden state and returning a new hidden state per time-step, also take in and return a *cell state*, $c_t$, per time-step.
$$\begin{align*}
h_t &= \text{RNN}(e(x_t), h_{t-1})\\
(h_t, c_t) &= \text{LSTM}(e(x_t), h_{t-1}, c_{t-1})
\end{align*}$$
We can just think of $c_t$ as another type of hidden state. Similar to $h_0^l$, $c_0^l$ will be initialized to a tensor of all zeros. Also, our context vector will now be both the final hidden state and the final cell state, i.e. $z^l = (h_T^l, c_T^l)$.
Extending our multi-layer equations to LSTMs, we get:
$$\begin{align*}
(h_t^1, c_t^1) &= \text{EncoderLSTM}^1(e(x_t), (h_{t-1}^1, c_{t-1}^1))\\
(h_t^2, c_t^2) &= \text{EncoderLSTM}^2(h_t^1, (h_{t-1}^2, c_{t-1}^2))
\end{align*}$$
Note how only our hidden state from the first layer is passed as input to the second layer, and not the cell state.
So our encoder looks something like this:

We create this in code by making an `Encoder` module, which requires we inherit from `torch.nn.Module` and use the `super().__init__()` as some boilerplate code. The encoder takes the following arguments:
- `input_dim` is the size/dimensionality of the one-hot vectors that will be input to the encoder. This is equal to the input (source) vocabulary size.
- `emb_dim` is the dimensionality of the embedding layer. This layer converts the one-hot vectors into dense vectors with `emb_dim` dimensions.
- `hid_dim` is the dimensionality of the hidden and cell states.
- `n_layers` is the number of layers in the RNN.
- `dropout` is the amount of dropout to use. This is a regularization parameter to prevent overfitting. Check out [this](https://www.coursera.org/lecture/deep-neural-network/understanding-dropout-YaGbR) for more details about dropout.
We aren't going to discuss the embedding layer in detail during these tutorials. All we need to know is that there is a step before the words - technically, the indexes of the words - are passed into the RNN, where the words are transformed into vectors. To read more about word embeddings, check these articles: [1](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/), [2](http://p.migdal.pl/2017/01/06/king-man-woman-queen-why.html), [3](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/), [4](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/).
The embedding layer is created using `nn.Embedding`, the LSTM with `nn.LSTM` and a dropout layer with `nn.Dropout`. Check the PyTorch [documentation](https://pytorch.org/docs/stable/nn.html) for more about these.
One thing to note is that the `dropout` argument to the LSTM is how much dropout to apply between the layers of a multi-layer RNN, i.e. between the hidden states output from layer $l$ and those same hidden states being used for the input of layer $l+1$.
In the `forward` method, we pass in the source sentence, $X$, which is converted into dense vectors using the `embedding` layer, and then dropout is applied. These embeddings are then passed into the RNN. As we pass a whole sequence to the RNN, it will automatically do the recurrent calculation of the hidden states over the whole sequence for us! Notice that we do not pass an initial hidden or cell state to the RNN. This is because, as noted in the [documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM), that if no hidden/cell state is passed to the RNN, it will automatically create an initial hidden/cell state as a tensor of all zeros.
The RNN returns: `outputs` (the top-layer hidden state for each time-step), `hidden` (the final hidden state for each layer, $h_T$, stacked on top of each other) and `cell` (the final cell state for each layer, $c_T$, stacked on top of each other).
As we only need the final hidden and cell states (to make our context vector), `forward` only returns `hidden` and `cell`.
The sizes of each of the tensors is left as comments in the code. In this implementation `n_directions` will always be 1, however note that bidirectional RNNs (covered in tutorial 3) will have `n_directions` as 2.
```
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
#src = [src len, batch size]
embedded = self.dropout(self.embedding(src))
#embedded = [src len, batch size, emb dim]
outputs, (hidden, cell) = self.rnn(embedded)
#outputs = [src len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#outputs are always from the top hidden layer
return hidden, cell
```
### Decoder
Next, we'll build our decoder, which will also be a 2-layer (4 in the paper) LSTM.

The `Decoder` class does a single step of decoding, i.e. it ouputs single token per time-step. The first layer will receive a hidden and cell state from the previous time-step, $(s_{t-1}^1, c_{t-1}^1)$, and feeds it through the LSTM with the current embedded token, $y_t$, to produce a new hidden and cell state, $(s_t^1, c_t^1)$. The subsequent layers will use the hidden state from the layer below, $s_t^{l-1}$, and the previous hidden and cell states from their layer, $(s_{t-1}^l, c_{t-1}^l)$. This provides equations very similar to those in the encoder.
$$\begin{align*}
(s_t^1, c_t^1) = \text{DecoderLSTM}^1(d(y_t), (s_{t-1}^1, c_{t-1}^1))\\
(s_t^2, c_t^2) = \text{DecoderLSTM}^2(s_t^1, (s_{t-1}^2, c_{t-1}^2))
\end{align*}$$
Remember that the initial hidden and cell states to our decoder are our context vectors, which are the final hidden and cell states of our encoder from the same layer, i.e. $(s_0^l,c_0^l)=z^l=(h_T^l,c_T^l)$.
We then pass the hidden state from the top layer of the RNN, $s_t^L$, through a linear layer, $f$, to make a prediction of what the next token in the target (output) sequence should be, $\hat{y}_{t+1}$.
$$\hat{y}_{t+1} = f(s_t^L)$$
The arguments and initialization are similar to the `Encoder` class, except we now have an `output_dim` which is the size of the vocabulary for the output/target. There is also the addition of the `Linear` layer, used to make the predictions from the top layer hidden state.
Within the `forward` method, we accept a batch of input tokens, previous hidden states and previous cell states. As we are only decoding one token at a time, the input tokens will always have a sequence length of 1. We `unsqueeze` the input tokens to add a sentence length dimension of 1. Then, similar to the encoder, we pass through an embedding layer and apply dropout. This batch of embedded tokens is then passed into the RNN with the previous hidden and cell states. This produces an `output` (hidden state from the top layer of the RNN), a new `hidden` state (one for each layer, stacked on top of each other) and a new `cell` state (also one per layer, stacked on top of each other). We then pass the `output` (after getting rid of the sentence length dimension) through the linear layer to receive our `prediction`. We then return the `prediction`, the new `hidden` state and the new `cell` state.
**Note**: as we always have a sequence length of 1, we could use `nn.LSTMCell`, instead of `nn.LSTM`, as it is designed to handle a batch of inputs that aren't necessarily in a sequence. `nn.LSTMCell` is just a single cell and `nn.LSTM` is a wrapper around potentially multiple cells. Using the `nn.LSTMCell` in this case would mean we don't have to `unsqueeze` to add a fake sequence length dimension, but we would need one `nn.LSTMCell` per layer in the decoder and to ensure each `nn.LSTMCell` receives the correct initial hidden state from the encoder. All of this makes the code less concise - hence the decision to stick with the regular `nn.LSTM`.
```
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.output_dim = output_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, cell):
#input = [batch size]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#n directions in the decoder will both always be 1, therefore:
#hidden = [n layers, batch size, hid dim]
#context = [n layers, batch size, hid dim]
input = input.unsqueeze(0)
#input = [1, batch size]
embedded = self.dropout(self.embedding(input))
#embedded = [1, batch size, emb dim]
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
#output = [seq len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#seq len and n directions will always be 1 in the decoder, therefore:
#output = [1, batch size, hid dim]
#hidden = [n layers, batch size, hid dim]
#cell = [n layers, batch size, hid dim]
prediction = self.fc_out(output.squeeze(0))
#prediction = [batch size, output dim]
return prediction, hidden, cell
```
### Seq2Seq
For the final part of the implemenetation, we'll implement the seq2seq model. This will handle:
- receiving the input/source sentence
- using the encoder to produce the context vectors
- using the decoder to produce the predicted output/target sentence
Our full model will look like this:

The `Seq2Seq` model takes in an `Encoder`, `Decoder`, and a `device` (used to place tensors on the GPU, if it exists).
For this implementation, we have to ensure that the number of layers and the hidden (and cell) dimensions are equal in the `Encoder` and `Decoder`. This is not always the case, we do not necessarily need the same number of layers or the same hidden dimension sizes in a sequence-to-sequence model. However, if we did something like having a different number of layers then we would need to make decisions about how this is handled. For example, if our encoder has 2 layers and our decoder only has 1, how is this handled? Do we average the two context vectors output by the decoder? Do we pass both through a linear layer? Do we only use the context vector from the highest layer? Etc.
Our `forward` method takes the source sentence, target sentence and a teacher-forcing ratio. The teacher forcing ratio is used when training our model. When decoding, at each time-step we will predict what the next token in the target sequence will be from the previous tokens decoded, $\hat{y}_{t+1}=f(s_t^L)$. With probability equal to the teaching forcing ratio (`teacher_forcing_ratio`) we will use the actual ground-truth next token in the sequence as the input to the decoder during the next time-step. However, with probability `1 - teacher_forcing_ratio`, we will use the token that the model predicted as the next input to the model, even if it doesn't match the actual next token in the sequence.
The first thing we do in the `forward` method is to create an `outputs` tensor that will store all of our predictions, $\hat{Y}$.
We then feed the input/source sentence, `src`, into the encoder and receive out final hidden and cell states.
The first input to the decoder is the start of sequence (`<sos>`) token. As our `trg` tensor already has the `<sos>` token appended (all the way back when we defined the `init_token` in our `TRG` field) we get our $y_1$ by slicing into it. We know how long our target sentences should be (`max_len`), so we loop that many times. The last token input into the decoder is the one **before** the `<eos>` token - the `<eos>` token is never input into the decoder.
During each iteration of the loop, we:
- pass the input, previous hidden and previous cell states ($y_t, s_{t-1}, c_{t-1}$) into the decoder
- receive a prediction, next hidden state and next cell state ($\hat{y}_{t+1}, s_{t}, c_{t}$) from the decoder
- place our prediction, $\hat{y}_{t+1}$/`output` in our tensor of predictions, $\hat{Y}$/`outputs`
- decide if we are going to "teacher force" or not
- if we do, the next `input` is the ground-truth next token in the sequence, $y_{t+1}$/`trg[t]`
- if we don't, the next `input` is the predicted next token in the sequence, $\hat{y}_{t+1}$/`top1`, which we get by doing an `argmax` over the output tensor
Once we've made all of our predictions, we return our tensor full of predictions, $\hat{Y}$/`outputs`.
**Note**: our decoder loop starts at 1, not 0. This means the 0th element of our `outputs` tensor remains all zeros. So our `trg` and `outputs` look something like:
$$\begin{align*}
\text{trg} = [<sos>, &y_1, y_2, y_3, <eos>]\\
\text{outputs} = [0, &\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
Later on when we calculate the loss, we cut off the first element of each tensor to get:
$$\begin{align*}
\text{trg} = [&y_1, y_2, y_3, <eos>]\\
\text{outputs} = [&\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
```
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.n_layers == decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#src = [src len, batch size]
#trg = [trg len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
batch_size = trg.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(src)
#first input to the decoder is the <sos> tokens
input = trg[0,:]
for t in range(1, trg_len):
#insert input token embedding, previous hidden and previous cell states
#receive output tensor (predictions) and new hidden and cell states
output, hidden, cell = self.decoder(input, hidden, cell)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#get the highest predicted token from our predictions
top1 = output.argmax(1)
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
```
# Training the Seq2Seq Model
Now we have our model implemented, we can begin training it.
First, we'll initialize our model. As mentioned before, the input and output dimensions are defined by the size of the vocabulary. The embedding dimesions and dropout for the encoder and decoder can be different, but the number of layers and the size of the hidden/cell states must be the same.
We then define the encoder, decoder and then our Seq2Seq model, which we place on the `device`.
```
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device).to(device)
```
Next up is initializing the weights of our model. In the paper they state they initialize all weights from a uniform distribution between -0.08 and +0.08, i.e. $\mathcal{U}(-0.08, 0.08)$.
We initialize weights in PyTorch by creating a function which we `apply` to our model. When using `apply`, the `init_weights` function will be called on every module and sub-module within our model. For each module we loop through all of the parameters and sample them from a uniform distribution with `nn.init.uniform_`.
```
def init_weights(m):
for name, param in m.named_parameters():
nn.init.uniform_(param.data, -0.08, 0.08)
model.apply(init_weights)
```
We also define a function that will calculate the number of trainable parameters in the model.
```
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
```
We define our optimizer, which we use to update our parameters in the training loop. Check out [this](http://ruder.io/optimizing-gradient-descent/) post for information about different optimizers. Here, we'll use Adam.
```
optimizer = optim.Adam(model.parameters())
```
Next, we define our loss function. The `CrossEntropyLoss` function calculates both the log softmax as well as the negative log-likelihood of our predictions.
Our loss function calculates the average loss per token, however by passing the index of the `<pad>` token as the `ignore_index` argument we ignore the loss whenever the target token is a padding token.
```
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)
```
Next, we'll define our training loop.
First, we'll set the model into "training mode" with `model.train()`. This will turn on dropout (and batch normalization, which we aren't using) and then iterate through our data iterator.
As stated before, our decoder loop starts at 1, not 0. This means the 0th element of our `outputs` tensor remains all zeros. So our `trg` and `outputs` look something like:
$$\begin{align*}
\text{trg} = [<sos>, &y_1, y_2, y_3, <eos>]\\
\text{outputs} = [0, &\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
Here, when we calculate the loss, we cut off the first element of each tensor to get:
$$\begin{align*}
\text{trg} = [&y_1, y_2, y_3, <eos>]\\
\text{outputs} = [&\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
At each iteration:
- get the source and target sentences from the batch, $X$ and $Y$
- zero the gradients calculated from the last batch
- feed the source and target into the model to get the output, $\hat{Y}$
- as the loss function only works on 2d inputs with 1d targets we need to flatten each of them with `.view`
- we slice off the first column of the output and target tensors as mentioned above
- calculate the gradients with `loss.backward()`
- clip the gradients to prevent them from exploding (a common issue in RNNs)
- update the parameters of our model by doing an optimizer step
- sum the loss value to a running total
Finally, we return the loss that is averaged over all batches.
```
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, trg)
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
```
Our evaluation loop is similar to our training loop, however as we aren't updating any parameters we don't need to pass an optimizer or a clip value.
We must remember to set the model to evaluation mode with `model.eval()`. This will turn off dropout (and batch normalization, if used).
We use the `with torch.no_grad()` block to ensure no gradients are calculated within the block. This reduces memory consumption and speeds things up.
The iteration loop is similar (without the parameter updates), however we must ensure we turn teacher forcing off for evaluation. This will cause the model to only use it's own predictions to make further predictions within a sentence, which mirrors how it would be used in deployment.
```
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output = model(src, trg, 0) #turn off teacher forcing
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
```
Next, we'll create a function that we'll use to tell us how long an epoch takes.
```
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
```
We can finally start training our model!
At each epoch, we'll be checking if our model has achieved the best validation loss so far. If it has, we'll update our best validation loss and save the parameters of our model (called `state_dict` in PyTorch). Then, when we come to test our model, we'll use the saved parameters used to achieve the best validation loss.
We'll be printing out both the loss and the perplexity at each epoch. It is easier to see a change in perplexity than a change in loss as the numbers are much bigger.
```
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut1-model.pt')
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
```
We'll load the parameters (`state_dict`) that gave our model the best validation loss and run it the model on the test set.
```
model.load_state_dict(torch.load('tut1-model.pt'))
test_loss = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
```
In the following notebook we'll implement a model that achieves improved test perplexity, but only uses a single layer in the encoder and the decoder.
| github_jupyter |
## IBM Quantum Challenge Fall 2021
# Challenge 3: Classify images with quantum machine learning
<div class="alert alert-block alert-info">
We recommend that you switch to **light** workspace theme under the Account menu in the upper right corner for optimal experience.
## Introduction
Machine learning is a technology that has attracted a great deal of attention due to its high performance and versatility. In fact, it has been put to practical use in many industries with the recent development of algorithms and the increase of computational resources. A typical example is computer vision, where machine learning is now able to classify images with the same or better accuracy than humans. For example, the ability to automatically classify clothing images has made online shopping for clothes more convenient.
The application of quantum computation to machine learning has recently been shown to have the potential for even greater capabilities. Various algorithms have been proposed for quantum machine learning, such as the quantum support vector machine (QSVM) and quantum generative adversarial networks (QGANs). In this challenge, you will use QSVM to tackle the clothing image classification task.
QSVM is a quantum version of the support vector machine (SVM), a classical machine learning algorithm. There are various approaches to QSVM, some aim to accelerate computation assuming fault-tolerant quantum computers, while others aim to achieve higher expressive power assuming noisy, near-term devices. In this challenge, we will focus on the latter, and the details will be explained later.
For this implementation of QSVM, you will be able to make choices on how you want to compose your quantum model, in particular focusing on the quantum feature map. This is motivated by the tradeoff that a more complex feature map would have greater representation power but be more susceptible to noise, which could be especially critical when using noisy, near-term devices.
Many of the concepts that appear in this challenge are explained in the 2021 Qiskit Global Summer School (QGSS). The materials and lecture videos are available, and it is recommended that you study them as well. Refer to the links in each part for the corresponding lectures.
<center><img src="./resources/ecommerce.jpg" width="640" /></center>
## Challenge
<div class="alert alert-block alert-success">
**Goal**
Implement a QSVM model for multiclass classification and predict labels accurately.
**Plan**
First, you will learn how to construct QSVM for binary classification of a simple dataset. Then apply what you have learned to a more complex problem, 3-class classification of a different dataset.
**1. Tutorial - QSVM for binary classification of MNIST:** familiarize yourself with a typical workflow for QSVM and find the best combination of dimentions/feature maps.
**2. Challenge - QSVM for 3-class classification of Fashion-MNIST:** implement a 3-class classifier using binary QSVM classifers. Perform similar investigation as in the first part to find the best combination of dimentions/feature maps. Achieve better accuracy with smaller feature map circuits.
</div>
<div class="alert alert-block alert-info">
Before you begin, we recommend watching the [**Qiskit Machine Learning Demo Session with Anton Dekusar**](https://youtu.be/claoY57eVIc?t=1814) and check out the corresponding [**demo notebook**](https://github.com/qiskit-community/qiskit-application-modules-demo-sessions/tree/main/qiskit-machine-learning) to learn how to do classifications using QSVM.
</div>
```
# General imports
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
from pylab import cm
import warnings
warnings.filterwarnings("ignore")
# scikit-learn imports
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Qiskit imports
from qiskit import Aer, execute
from qiskit.circuit import QuantumCircuit, Parameter, ParameterVector
from qiskit.circuit.library import PauliFeatureMap, ZFeatureMap, ZZFeatureMap
from qiskit.circuit.library import TwoLocal, NLocal, RealAmplitudes, EfficientSU2
from qiskit.circuit.library import HGate, RXGate, RYGate, RZGate, CXGate, CRXGate, CRZGate
from qiskit_machine_learning.kernels import QuantumKernel
```
## Part 1: Tutorial - QSVM for binary classification of MNIST
In this part, you will apply QSVM to the binary classification of handwritten numbers 4 and 9. Through this tutorial, you will learn the workflow of applying QSVM to binary classification. Find better combinations and achieve higher accuracy.
Related QGSS material:
- [**Lab 3**](https://www.youtube.com/watch?v=GVhCOTzAkCM&list=PLOFEBzvs-VvqJwybFxkTiDzhf5E11p8BI&index=17)
### 1. Data preparation
The data we are going to work with at the beginning is a small subset of the well known handwritten digits dataset, which is available publicly. We will be aiming to differentiate between '4' and '9'.
There are a total of 100 data in the dataset. Of these, eighty are labeled training data, and the remaining twenty are unlabeled test data. Each data is a 28x28 image of a digit, collapsed into an array, where each element is an integer between 0 (white) and 255 (black). To use the dataset for quantum classification, we need to scale the range to between -1 and 1, and reduce the dimensionality to the number of qubits we want to use (here N_DIM=5).
```
# Load MNIST dataset
DATA_PATH = './resources/ch3_part1.npz'
data = np.load(DATA_PATH)
sample_train = data['sample_train']
labels_train = data['labels_train']
sample_test = data['sample_test']
# Split train data
sample_train, sample_val, labels_train, labels_val = train_test_split(
sample_train, labels_train, test_size=0.2, random_state=42)
# Visualize samples
fig = plt.figure()
LABELS = [4, 9]
num_labels = len(LABELS)
for i in range(num_labels):
ax = fig.add_subplot(1, num_labels, i+1)
img = sample_train[labels_train==LABELS[i]][0].reshape((28, 28))
ax.imshow(img, cmap="Greys")
# Standardize
ss = StandardScaler()
sample_train = ss.fit_transform(sample_train)
sample_val = ss.transform(sample_val)
sample_test = ss.transform(sample_test)
# Reduce dimensions
N_DIM = 5
pca = PCA(n_components=N_DIM)
sample_train = pca.fit_transform(sample_train)
sample_val = pca.transform(sample_val)
sample_test = pca.transform(sample_test)
# Normalize
mms = MinMaxScaler((-1, 1))
sample_train = mms.fit_transform(sample_train)
sample_val = mms.transform(sample_val)
sample_test = mms.transform(sample_test)
```
### 2. Data Encoding
We will take the classical data and encode it to the quantum state space using a quantum feature map. The choice of which feature map to use is important and may depend on the given dataset we want to classify. Here we'll look at the feature maps available in Qiskit, before selecting and customising one to encode our data.
### 2.1 Quantum Feature Maps
As the name suggests, a quantum feature map $\phi(\mathbf{x})$ is a map from the classical feature vector $\mathbf{x}$ to the quantum state $|\Phi(\mathbf{x})\rangle\langle\Phi(\mathbf{x})|$. This is facilitated by applying the unitary operation $\mathcal{U}_{\Phi(\mathbf{x})}$ on the initial state $|0\rangle^{n}$ where _n_ is the number of qubits being used for encoding.
The following feature maps currently available in Qiskit are those introduced in [**_Havlicek et al_. Nature **567**, 209-212 (2019)**](https://www.nature.com/articles/s41586-019-0980-2), in particular the `ZZFeatureMap` is conjectured to be hard to simulate classically and can be implemented as short-depth circuits on near-term quantum devices.
- [**`PauliFeatureMap`**](https://qiskit.org/documentation/stubs/qiskit.circuit.library.PauliFeatureMap.html)
- [**`ZZFeatureMap`**](https://qiskit.org/documentation/stubs/qiskit.circuit.library.ZFeatureMap.html)
- [**`ZFeatureMap`**](https://qiskit.org/documentation/stubs/qiskit.circuit.library.ZZFeatureMap.html)
The `PauliFeatureMap` is defined as:
```python
PauliFeatureMap(feature_dimension=None, reps=2,
entanglement='full', paulis=None,
data_map_func=None, parameter_prefix='x',
insert_barriers=False)
```
and describes the unitary operator of depth $d$:
$$ \mathcal{U}_{\Phi(\mathbf{x})}=\prod_d U_{\Phi(\mathbf{x})}H^{\otimes n},\ U_{\Phi(\mathbf{x})}=\exp\left(i\sum_{S\subseteq[n]}\phi_S(\mathbf{x})\prod_{k\in S} P_i\right), $$
which contains layers of Hadamard gates interleaved with entangling blocks, $U_{\Phi(\mathbf{x})}$, encoding the classical data as shown in circuit diagram below for $d=2$.
<center><img src="./resources/featuremap.png" width="1000" /></center>
Within the entangling blocks, $U_{\Phi(\mathbf{x})}$: $P_i \in \{ I, X, Y, Z \}$ denotes the Pauli matrices, the index $S$ describes connectivities between different qubits or datapoints: $S \in \{\binom{n}{k}\ combinations,\ k = 1,... n \}$, and by default the data mapping function $\phi_S(\mathbf{x})$ is
$$\phi_S:\mathbf{x}\mapsto \Bigg\{\begin{array}{ll}
x_i & \mbox{if}\ S=\{i\} \\
(\pi-x_i)(\pi-x_j) & \mbox{if}\ S=\{i,j\}
\end{array}$$
when $k = 1, P_0 = Z$, this is the `ZFeatureMap`:
$$\mathcal{U}_{\Phi(\mathbf{x})} = \left( \exp\left(i\sum_j \phi_{\{j\}}(\mathbf{x}) \, Z_j\right) \, H^{\otimes n} \right)^d.$$
which is defined as:
```python
ZFeatureMap(feature_dimension, reps=2,
data_map_func=None, insert_barriers=False)
```
```
# 3 features, depth 2
map_z = ZFeatureMap(feature_dimension=3, reps=2)
map_z.decompose().draw('mpl')
```
Note the lack of entanglement in this feature map, this means that this feature map is simple to simulate classically and will not provide quantum advantage.
and when $k = 2, P_0 = Z, P_1 = ZZ$, this is the `ZZFeatureMap`:
$$\mathcal{U}_{\Phi(\mathbf{x})} = \left( \exp\left(i\sum_{jk} \phi_{\{j,k\}}(\mathbf{x}) \, Z_j \otimes Z_k\right) \, \exp\left(i\sum_j \phi_{\{j\}}(\mathbf{x}) \, Z_j\right) \, H^{\otimes n} \right)^d.$$
which is defined as:
```python
ZZFeatureMap(feature_dimension, reps=2,
entanglement='full', data_map_func=None,
insert_barriers=False)
```
```
# 3 features, depth 1, linear entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='linear')
map_zz.decompose().draw('mpl')
```
Note that there is entanglement in the feature map, we can define the entanglement map:
```
# 3 features, depth 1, circular entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='circular')
map_zz.decompose().draw('mpl')
```
We can customise the Pauli gates in the feature map, for example, $P_0 = X, P_1 = Y, P_2 = ZZ$:
$$\mathcal{U}_{\Phi(\mathbf{x})} = \left( \exp\left(i\sum_{jk} \phi_{\{j,k\}}(\mathbf{x}) \, Z_j \otimes Z_k\right) \, \exp\left(i\sum_{j} \phi_{\{j\}}(\mathbf{x}) \, Y_j\right) \, \exp\left(i\sum_j \phi_{\{j\}}(\mathbf{x}) \, X_j\right) \, H^{\otimes n} \right)^d.$$
```
# 3 features, depth 1
map_pauli = PauliFeatureMap(feature_dimension=3, reps=1, paulis = ['X', 'Y', 'ZZ'])
map_pauli.decompose().draw('mpl')
```
The [`NLocal`](https://qiskit.org/documentation/stubs/qiskit.circuit.library.NLocal.html) and [`TwoLocal`](https://qiskit.org/documentation/stubs/qiskit.circuit.library.TwoLocal.html) functions in Qiskit's circuit library can also be used to create parameterised quantum circuits as feature maps.
```python
TwoLocal(num_qubits=None, reps=3, rotation_blocks=None,
entanglement_blocks=None, entanglement='full',
skip_unentangled_qubits=False,
skip_final_rotation_layer=False,
parameter_prefix='θ', insert_barriers=False,
initial_state=None)
```
```python
NLocal(num_qubits=None, reps=1, rotation_blocks=None,
entanglement_blocks=None, entanglement=None,
skip_unentangled_qubits=False,
skip_final_rotation_layer=False,
overwrite_block_parameters=True,
parameter_prefix='θ', insert_barriers=False,
initial_state=None, name='nlocal')
```
Both functions create parameterised circuits of alternating rotation and entanglement layers. In both layers, parameterised circuit-blocks act on the circuit in a defined way. In the rotation layer, the blocks are applied stacked on top of each other, while in the entanglement layer according to the entanglement strategy. Each layer is repeated a number of times, and by default a final rotation layer is appended.
In `NLocal`, the circuit blocks can have arbitrary sizes (smaller equal to the number of qubits in the circuit), while in `TwoLocal`, the rotation layers are single qubit gates applied on all qubits and the entanglement layer uses two-qubit gates.
For example, here is a `TwoLocal` circuit, with $R_y$ and $R_Z$ gates in the rotation layer and $CX$ gates in the entangling layer with circular entanglement:
```
twolocal = TwoLocal(num_qubits=3, reps=2, rotation_blocks=['ry','rz'],
entanglement_blocks='cx', entanglement='circular', insert_barriers=True)
twolocal.decompose().draw('mpl')
```
and the equivalent `NLocal` circuit:
```
twolocaln = NLocal(num_qubits=3, reps=2,
rotation_blocks=[RYGate(Parameter('a')), RZGate(Parameter('a'))],
entanglement_blocks=CXGate(),
entanglement='circular', insert_barriers=True)
twolocaln.decompose().draw('mpl')
```
Let's encode the first training sample using the `PauliFeatureMap`:
```
print(f'First training data: {sample_train[0]}')
encode_map = PauliFeatureMap(feature_dimension=N_DIM, reps=1, paulis = ['X', 'Y', 'ZZ'])
encode_circuit = encode_map.bind_parameters(sample_train[0])
encode_circuit.decompose().draw(output='mpl')
```
<div class="alert alert-block alert-success">
**Challenge 3a**
Construct a feature map to encode a 5-dimensionally embedded data, using 'ZZFeatureMap' with 3 repetitions, 'circular' entanglement and the rest as default.
</div>
Submission format:
```python
ex3a_fmap = ZZFeatureMap(...)
```
```
##############################
# Provide your code here
ex3a_fmap = ZZFeatureMap(feature_dimension=N_DIM,
reps=3,
entanglement='circular',
data_map_func=None,
insert_barriers=False)
##############################
# Check your answer and submit using the following code
from qc_grader import grade_ex3a
grade_ex3a(ex3a_fmap)
```
### 2.2 Quantum Kernel Estimation
A quantum feature map, $\phi(\mathbf{x})$, naturally gives rise to a quantum kernel, $k(\mathbf{x}_i,\mathbf{x}_j)= \phi(\mathbf{x}_j)^\dagger\phi(\mathbf{x}_i)$, which can be seen as a measure of similarity: $k(\mathbf{x}_i,\mathbf{x}_j)$ is large when $\mathbf{x}_i$ and $\mathbf{x}_j$ are close.
When considering finite data, we can represent the quantum kernel as a matrix:
$K_{ij} = \left| \langle \phi^\dagger(\mathbf{x}_j)| \phi(\mathbf{x}_i) \rangle \right|^{2}$. We can calculate each element of this kernel matrix on a quantum computer by calculating the transition amplitude:
$$
\left| \langle \phi^\dagger(\mathbf{x}_j)| \phi(\mathbf{x}_i) \rangle \right|^{2} =
\left| \langle 0^{\otimes n} | \mathbf{U_\phi^\dagger}(\mathbf{x}_j) \mathbf{U_\phi}(\mathbf{x_i}) | 0^{\otimes n} \rangle \right|^{2}
$$
assuming the feature map is a parameterized quantum circuit, which can be described as a unitary transformation $\mathbf{U_\phi}(\mathbf{x})$ on $n$ qubits.
This provides us with an estimate of the quantum kernel matrix, which we can then use in a kernel machine learning algorithm, such as support vector classification.
As discussed in [***Havlicek et al*. Nature 567, 209-212 (2019)**](https://www.nature.com/articles/s41586-019-0980-2), quantum kernel machine algorithms only have the potential of quantum advantage over classical approaches if the corresponding quantum kernel is hard to estimate classically.
As we will see later, the hardness of estimating the kernel with classical resources is of course only a necessary and not always sufficient condition to obtain a quantum advantage.
However, it was proven recently in [***Liu et al.* arXiv:2010.02174 (2020)**](https://arxiv.org/abs/2010.02174) that learning problems exist for which learners with access to quantum kernel methods have a quantum advantage over all classical learners.
With our training and testing datasets ready, we set up the `QuantumKernel` class with the PauliFeatureMap, and use the `BasicAer` `statevector_simulator` to estimate the training and testing kernel matrices.
```
pauli_map = PauliFeatureMap(feature_dimension=N_DIM, reps=1, paulis = ['X', 'Y', 'ZZ'])
pauli_kernel = QuantumKernel(feature_map=pauli_map, quantum_instance=Aer.get_backend('statevector_simulator'))
```
Let's calculate the transition amplitude between the first and second training data samples, one of the entries in the training kernel matrix.
```
print(f'First training data : {sample_train[0]}')
print(f'Second training data: {sample_train[1]}')
```
First we create and draw the circuit:
```
pauli_circuit = pauli_kernel.construct_circuit(sample_train[0], sample_train[1])
pauli_circuit.decompose().decompose().draw(output='mpl')
```
The parameters in the gates are a little difficult to read, but notice how the circuit is symmetrical, with one half encoding one of the data samples, the other half encoding the other.
We then simulate the circuit. We will use the `qasm_simulator` since the circuit contains measurements, but increase the number of shots to reduce the effect of sampling noise.
```
backend = Aer.get_backend('qasm_simulator')
job = execute(pauli_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(pauli_circuit)
counts['0'*N_DIM]
counts
```
The transition amplitude is the proportion of counts in the zero state:
```
print(f"Transition amplitude: {counts['0'*N_DIM]/sum(counts.values())}")
```
This process is then repeated for each pair of training data samples to fill in the training kernel matrix, and between each training and testing data sample to fill in the testing kernel matrix. Note that each matrix is symmetric, so to reduce computation time, only half the entries are calculated explicitly.
Here we compute and plot the training and testing kernel matrices:
```
matrix_train = pauli_kernel.evaluate(x_vec=sample_train)
matrix_val = pauli_kernel.evaluate(x_vec=sample_val, y_vec=sample_train)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(np.asmatrix(matrix_train),
interpolation='nearest', origin='upper', cmap='Blues')
axs[0].set_title("training kernel matrix")
axs[1].imshow(np.asmatrix(matrix_val),
interpolation='nearest', origin='upper', cmap='Reds')
axs[1].set_title("validation kernel matrix")
plt.show()
```
</div>
<div class="alert alert-block alert-success">
**Challenge 3b**
Calculate the transition amplitude between $x = (-0.5, -0.4, 0.3, 0, -0.9)$ and $y = (0, -0.7, -0.3, 0, -0.4)$ using the 'ZZFeatureMap' with 3 repetitions, 'circular' entanglement and the rest as default. Use the 'qasm_simulator' with 'shots=8192', 'seed_simulator=1024' and 'seed_transpiler=1024'.
</div>
```
sample_train[0]
np.array([-0.5,-0.4,0.3,0,-0.9])
x = [-0.5, -0.4, 0.3, 0, -0.9]
y = [0, -0.7, -0.3, 0, -0.4]
##############################
# Provide your code here
pauli_map = ZZFeatureMap(feature_dimension=N_DIM,
reps=3,
entanglement='circular',
data_map_func=None,
insert_barriers=False)
pauli_kernel = QuantumKernel(feature_map=pauli_map, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_circuit = pauli_kernel.construct_circuit(x, y)
backend = Aer.get_backend('qasm_simulator')
job = execute(pauli_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(pauli_circuit)
ex3b_amp = counts['0'*N_DIM]/sum(counts.values())
##############################
# Check your answer and submit using the following code
from qc_grader import grade_ex3b
grade_ex3b(ex3b_amp)
```
Related QGSS materials:
- [**Kernel Trick (Lecture 6.1)**](https://www.youtube.com/watch?v=m6EzmYsEOiI&list=PLOFEBzvs-VvqJwybFxkTiDzhf5E11p8BI&index=14)
- [**Kernel Trick (Lecture 6.2)**](https://www.youtube.com/watch?v=zw3JYUrS-v8&list=PLOFEBzvs-VvqJwybFxkTiDzhf5E11p8BI&index=15)
### 2.3 Quantum Support Vector Machine (QSVM)
Introduced in [***Havlicek et al*. Nature 567, 209-212 (2019)**](https://www.nature.com/articles/s41586-019-0980-2), the quantum kernel support vector classification algorithm consists of these steps:
<center><img src="./resources/qsvc.png" width="1000"></center>
1. Build the train and test quantum kernel matrices.
1. For each pair of datapoints in the training dataset $\mathbf{x}_{i},\mathbf{x}_j$, apply the feature map and measure the transition probability: $ K_{ij} = \left| \langle 0 | \mathbf{U}^\dagger_{\Phi(\mathbf{x_j})} \mathbf{U}_{\Phi(\mathbf{x_i})} | 0 \rangle \right|^2 $.
2. For each training datapoint $\mathbf{x_i}$ and testing point $\mathbf{y_j}$, apply the feature map and measure the transition probability: $ K_{ij} = \left| \langle 0 | \mathbf{U}^\dagger_{\Phi(\mathbf{y_j})} \mathbf{U}_{\Phi(\mathbf{x_i})} | 0 \rangle \right|^2 $.
2. Use the train and test quantum kernel matrices in a classical support vector machine classification algorithm.
The `scikit-learn` `svc` algorithm allows us to [**define a custom kernel**](https://scikit-learn.org/stable/modules/svm.html#custom-kernels) in two ways: by providing the kernel as a callable function or by precomputing the kernel matrix. We can do either of these using the `QuantumKernel` class in Qiskit.
The following code takes the training and testing kernel matrices we calculated earlier and provides them to the `scikit-learn` `svc` algorithm:
```
pauli_svc = SVC(kernel='precomputed')
pauli_svc.fit(matrix_train, labels_train)
pauli_score = pauli_svc.score(matrix_val, labels_val)
print(f'Precomputed kernel classification test score: {pauli_score*100}%')
```
Related QGSS materials:
- [**Classical SVM (Lecture 4.2)**](https://www.youtube.com/watch?v=lpPij21jnZ4&list=PLOFEBzvs-VvqJwybFxkTiDzhf5E11p8BI&index=9)
- [**Quantum Classifier (Lecture 5.1)**](https://www.youtube.com/watch?v=-sxlXNz7ZxU&list=PLOFEBzvs-VvqJwybFxkTiDzhf5E11p8BI&index=11)
## Part 2: Challenge - QSVM for 3-class classification of Fashion-MNIST
In this part, you will use what your have learned so far to implement 3-class classification of clothing images and work on improving its accuracy.
<div class="alert alert-block alert-success">
**Challenge 3c**
**Goal**: Implement a 3-class classifier using QSVM and achieve 70% accuracy on clothing image dataset with smaller feature map circuits.
**Dataset**: Fashion-MNIST clothing image dataset. There are following three dataset in this challnge.
- Train: Both images and labels are given.
- Public test: Images are given and labels are hidden.
- Private test: Both images and labels are hidden.
Grading will be performed on both public test and private test data. The purpose of this is to make sure that quantum methods are used, so that cheating is not possible.
</div>
### How to implement a multi-class classifier using binary classifiers
So far, you have learned how to implement binary classification with QSVM. Now, how can you scale it up to multi-class classification? There are two approaches to do so. One is the One-vs-Rest approach, and the other is the One-vs-One approach.
1. One-vs-Rest: In this approach, multi-class classification is achieved by combining classifiers for each class that classifies the class as positive and the others as negative. Since one classifier is required for each class, the total number of classifiers required for N-class classification is N. The advantage is that fewer classifiers are needed, and the disadvantage is that the labels are likely to be imbalanced in each classification.
2. One-vs-One: In this approach, multi-class classification is achieved by combining classifiers for each pair of two classes, where one is positive and the other is negative. Since one classifier is required for each label pair, the total number of classifiers required for N-class classification is N(N-1)/2. The advantage is that labels are less likely to be imbalanced in each classification, and the disadvantage is that the number of classifiers required is larger.
Both approaches can be used to solve this problem, but here you will be given hints based on the One-vs-Rest approach. Please follow the hints to solve it.
<center><img src="./resources/onevsrest.png" width="800"></center>
Figure via [cc.gatech.edu](https://www.cc.gatech.edu/classes/AY2016/cs4476_fall/results/proj4/html/jnanda3/index.html)
### 1. Data preparation
The data we are working with here is a small subset of clothing image dataset called Fashion-MNIST, which is a variant of the MNIST dataset. We aim to classify the following labels.
- label 0: T-shirt/top
- label 2: pullover
- label 3: dress
First, let's load the dataset and display one image for each class.
```
# Load MNIST dataset
DATA_PATH = './resources/ch3_part2.npz'
data = np.load(DATA_PATH)
sample_train = data['sample_train']
labels_train = data['labels_train']
sample_test = data['sample_test']
# Split train data
sample_train, sample_val, labels_train, labels_val = train_test_split(
sample_train, labels_train, test_size=0.2, random_state=42)
# Visualize samples
fig = plt.figure()
LABELS = [0, 2, 3]
num_labels = len(LABELS)
for i in range(num_labels):
ax = fig.add_subplot(1, num_labels, i+1)
img = sample_train[labels_train==LABELS[i]][0].reshape((28, 28))
ax.imshow(img, cmap="Greys")
```
Then, preprocess the dataset in the same way as before.
- Standardization
- PCA
- Normalization
Note that you can change the number of features here by changing N_DIM.
```
# Standardize
standard_scaler = StandardScaler()
sample_train = standard_scaler.fit_transform(sample_train)
sample_val = standard_scaler.transform(sample_val)
sample_test = standard_scaler.transform(sample_test)
# Reduce dimensions
N_DIM = 5
pca = PCA(n_components=N_DIM)
sample_train = pca.fit_transform(sample_train)
sample_val = pca.transform(sample_val)
sample_test = pca.transform(sample_test)
# Normalize
min_max_scaler = MinMaxScaler((-1, 1))
sample_train = min_max_scaler.fit_transform(sample_train)
sample_val = min_max_scaler.transform(sample_val)
sample_test = min_max_scaler.transform(sample_test)
```
### 2. Modeling
Based on the One-vs-Rest approach, you need to create the following three QSVM binary classifiers
- the label 0 and the rest
- the label 2 and the rest
- the label 3 and the rest
Here is the first one as a hint.
### 2.1: Label 0 vs Rest
Create new labels with label 0 as positive(1) and the rest as negative(0) as follows.
```
labels_train_0 = np.where(labels_train==0, 1, 0)
labels_val_0 = np.where(labels_val==0, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 0 vs Rest: {labels_val_0}')
```
See only places where the original label was 0 are set to 1.
Next, construct a binary classifier using QSVM as before.
Note that PauliFeatureMap is used in this hint but you can use a different feature map.
```
pauli_map_0 = PauliFeatureMap(feature_dimension=N_DIM, reps=2, paulis = ['X', 'Y', 'ZZ'])
pauli_kernel_0 = QuantumKernel(feature_map=pauli_map_0, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_0 = SVC(kernel='precomputed', probability=True)
matrix_train_0 = pauli_kernel_0.evaluate(x_vec=sample_train)
pauli_svc_0.fit(matrix_train_0, labels_train_0)
matrix_val_0 = pauli_kernel_0.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_0 = pauli_svc_0.score(matrix_val_0, labels_val_0)
print(f'Accuracy of discriminating between label 0 and others: {pauli_score_0*100}%')
# Var 1
map_0 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement='linear')
kernel_0 = QuantumKernel(feature_map=map_0, quantum_instance=Aer.get_backend('statevector_simulator'))
svc_0 = SVC(kernel='precomputed', probability=True)
matrix_train_0 = kernel_0.evaluate(x_vec=sample_train)
svc_0.fit(matrix_train_0, labels_train_0)
matrix_val_0 = pauli_kernel_0.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_0 = svc_0.score(matrix_val_0, labels_val_0)
print(f'Accuracy of discriminating between label 0 and others: {pauli_score_0*100}%')
```
You can see that the QSVM binary classifier is able to distinguish between label 0 and the rest with a reasonable probability.
Finally, for each of the test data, calculate the probability that it has label 0. It can be obtained by ```predict_proba``` method.
```
matrix_test_0 = pauli_kernel_0.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_0 = pauli_svc_0.predict_proba(matrix_test_0)[:, 1]
print(f'Probability of label 0: {np.round(pred_0, 2)}')
```
These probabilities are important clues for multiclass classification.
Obtain the probabilities for the remaining two labels in the same way.
### 2.2: Label 2 vs Rest
Build a binary classifier using QSVM and get the probability of label 2 for test dataset.
```
labels_train_2 = np.where(labels_train==2, 1, 0)
labels_val_2 = np.where(labels_val==2, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 2 vs Rest: {labels_val_2}')
pauli_map_2 = PauliFeatureMap(feature_dimension=N_DIM, reps=2, paulis = ['X', 'Y', 'ZZ'])
pauli_kernel_2 = QuantumKernel(feature_map=pauli_map_2, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_2 = SVC(kernel='precomputed', probability=True)
matrix_train_2 = pauli_kernel_2.evaluate(x_vec=sample_train)
pauli_svc_2.fit(matrix_train_2, labels_train_2)
matrix_val_2 = pauli_kernel_2.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_2 = pauli_svc_2.score(matrix_val_2, labels_val_2)
print(f'Accuracy of discriminating between label 2 and others: {pauli_score_2*100}%')
# Var 2
map_2 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement='linear')
kernel_2 = QuantumKernel(feature_map=map_2, quantum_instance=Aer.get_backend('statevector_simulator'))
svc_2 = SVC(kernel='precomputed', probability=True)
matrix_train_2 = kernel_2.evaluate(x_vec=sample_train)
svc_2.fit(matrix_train_2, labels_train_2)
matrix_val_2 = pauli_kernel_2.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_2 = svc_2.score(matrix_val_2, labels_val_2)
print(f'Accuracy of discriminating between label 2 and others: {pauli_score_2*100}%')
##############################
# Provide your code here
matrix_test_2 = pauli_kernel_2.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_2 = pauli_svc_2.predict_proba(matrix_test_2)[:, 1]
##############################
```
### 2.3 Label 3 vs Rest
Build a binary classifier using QSVM and get the probability of label 3 for test dataset.
```
labels_train_3 = np.where(labels_train==3, 1, 0)
labels_val_3 = np.where(labels_val==3, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 3 vs Rest: {labels_val_3}')
pauli_map_3 = PauliFeatureMap(feature_dimension=N_DIM, reps=2, paulis = ['X', 'Y', 'ZZ'])
pauli_kernel_3 = QuantumKernel(feature_map=pauli_map_3, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_3 = SVC(kernel='precomputed', probability=True)
matrix_train_3 = pauli_kernel_3.evaluate(x_vec=sample_train)
pauli_svc_3.fit(matrix_train_3, labels_train_3)
matrix_val_3 = pauli_kernel_3.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_3 = pauli_svc_3.score(matrix_val_3, labels_val_3)
print(f'Accuracy of discriminating between label 3 and others: {pauli_score_3*100}%')
# Var 3
map_3 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement='linear')
kernel_3 = QuantumKernel(feature_map=map_3, quantum_instance=Aer.get_backend('statevector_simulator'))
svc_3 = SVC(kernel='precomputed', probability=True)
matrix_train_3 = kernel_3.evaluate(x_vec=sample_train)
svc_3.fit(matrix_train_3, labels_train_3)
matrix_val_3 = pauli_kernel_3.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_3 = svc_3.score(matrix_val_3, labels_val_3)
print(f'Accuracy of discriminating between label 3 and others: {pauli_score_3*100}%')
##############################
# Provide your code here
matrix_test_3 = pauli_kernel_3.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_3 = pauli_svc_3.predict_proba(matrix_test_3)[:, 1]
##############################
print(f'Probability of label 0: {np.round(pred_0, 2)}')
print(f'Probability of label 2: {np.round(pred_2, 2)}')
print(f'Probability of label 3: {np.round(pred_3, 2)}')
```
### 3. Prediction
Lastly, make a final prediction based on the probability of each label.
The prediction you submit should be in the following format.
```
sample_pred = np.load('./resources/ch3_part2_sub.npy')
print(f'Sample prediction: {sample_pred}')
```
In order to understand the method to make predictions for multiclass classification, let's begin with the case of making predictions for just two labels, label 2 and label 3.
If probabilities are as follows for a certain data, label 2 should be considered the most plausible.
- probability of label 2: 0.7
- probability of label 3: 0.2
You can implement this with ```np.where``` function. (Of course, you can use different methods.)
```
pred_2_ex = np.array([0.7])
pred_3_ex = np.array([0.2])
pred_test_ex = np.where((pred_2_ex > pred_3_ex), 2, 3)
print(f'Prediction: {pred_test_ex}')
```
You can apply this method as is to multiple data.
If second data has probabilities for each label as follows, it should be classified as label 3.
- probability of label 2: 0.1
- probability of label 3: 0.6
```
pred_2_ex = np.array([0.7, 0.1])
pred_3_ex = np.array([0.2, 0.6])
pred_test_ex = np.where((pred_2_ex > pred_3_ex), 2, 3)
print(f'Prediction: {pred_test_ex}')
```
This method can be extended to make predictions for 3-class classification.
Implement such an extended method and make the final 3-class predictions.
```
##############################
# Provide your code here
pred_test = np.array([0 if ((pred_0[i] > pred_2[i]) & (pred_0[i] > pred_3[i]))
else 2 if ((pred_2[i] > pred_0[i]) & (pred_2[i] > pred_3[i]))
else 3 if ((pred_3[i] > pred_0[i]) & (pred_3[i] > pred_2[i]))
else -1 for i in range(len(pred_0))])
##############################
print(f'Original validation labels: {labels_val}')
print(f'Prediction: {pred_test}')
```
### 4. Submission
<div class="alert alert-block alert-success">
**Challenge 3c**
**Submission**: Submit the following 11 items.
- **pred_test**: prediction for the public test dataset
- **sample_train**: train data used to obtain kernels
- **standard_scaler**: the one used to standardize data
- **pca**: the one used to reduce dimention
- **min_max_scaler**: the one used to normalize data
- **kernel_0**: the kernel for the "label 0 vs rest" classifier
- **kernel_2**: the kernel for the "label 2 vs rest" classifier
- **kernel_3**: the kernel for the "label 3 vs rest" classifier
- **svc_0**: the SVC trained to classify "label 0 vs rest"
- **svc_2**: the SVC trained to classify "label 2 vs rest"
- **svc_3**: the SVC trained to classify "label 3 vs rest"
**Criteria**: Accuracy of 70% or better on both public and private test data.
**Score**: Solutions that pass the criteria will be scored as follows. The smaller this final score is, the better.
1. Each feature map gets transpiled with:
- basis_gates=['u1', 'u2', 'u3', 'cx']
- optimization_level=0
2. Calculate the cost for each transpiled circuit:
cost = 10 * #cx + (#u1 + #u2 + #u3)
3. The sum of the costs will be the final score.
</div>
Again, the prediction you submit should be in the following format.
- prediction for the public test data (**sample_test**)
- type: numpy.ndarray
- shape: (20,)
```
print(f'Sample prediction: {sample_pred}')
# Check your answer and submit using the following code
from qc_grader import grade_ex3c
grade_ex3c(pred_test, sample_train,
standard_scaler, pca, min_max_scaler,
kernel_0, kernel_2, kernel_3,
svc_0, svc_2, svc_3)
```
## Additional information
**Created by:** Shota Nakasuji, Anna Phan
**Version:** 1.0.0
| github_jupyter |
# Session 2 - Training a Network w/ Tensorflow
<p class="lead">
Assignment: Teach a Deep Neural Network to Paint
</p>
<p class="lead">
Parag K. Mital<br />
<a href="https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info">Creative Applications of Deep Learning w/ Tensorflow</a><br />
<a href="https://www.kadenze.com/partners/kadenze-academy">Kadenze Academy</a><br />
<a href="https://twitter.com/hashtag/CADL">#CADL</a>
</p>
This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.
# Learning Goals
* Learn how to create a Neural Network
* Learn to use a neural network to paint an image
* Apply creative thinking to the inputs, outputs, and definition of a network
# Outline
<!-- MarkdownTOC autolink=true autoanchor=true bracket=round -->
- [Assignment Synopsis](#assignment-synopsis)
- [Part One - Fully Connected Network](#part-one---fully-connected-network)
- [Instructions](#instructions)
- [Code](#code)
- [Variable Scopes](#variable-scopes)
- [Part Two - Image Painting Network](#part-two---image-painting-network)
- [Instructions](#instructions-1)
- [Preparing the Data](#preparing-the-data)
- [Cost Function](#cost-function)
- [Explore](#explore)
- [A Note on Crossvalidation](#a-note-on-crossvalidation)
- [Part Three - Learning More than One Image](#part-three---learning-more-than-one-image)
- [Instructions](#instructions-2)
- [Code](#code-1)
- [Part Four - Open Exploration \(Extra Credit\)](#part-four---open-exploration-extra-credit)
- [Assignment Submission](#assignment-submission)
<!-- /MarkdownTOC -->
This next section will just make sure you have the right version of python and the libraries that we'll be using. Don't change the code here but make sure you "run" it (use "shift+enter")!
```
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n' \
'You should consider updating to Python 3.4.0 or ' \
'higher as the libraries built for this course ' \
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda '
'and then restart `jupyter notebook`:\n' \
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
except ImportError:
print('You are missing some packages! ' \
'We will try installing them before continuing!')
!pip install "numpy>=1.11.0" "matplotlib>=1.5.1" "scikit-image>=0.11.3" "scikit-learn>=0.17" "scipy>=0.17.0"
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
print('Done!')
# Import Tensorflow
try:
import tensorflow as tf
except ImportError:
print("You do not have tensorflow installed!")
print("Follow the instructions on the following link")
print("to install tensorflow before continuing:")
print("")
print("https://github.com/pkmital/CADL#installation-preliminaries")
# This cell includes the provided libraries from the zip file
# and a library for displaying images from ipython, which
# we will use to display the gif
try:
from libs import utils, gif
import IPython.display as ipyd
except ImportError:
print("Make sure you have started notebook in the same directory" +
" as the provided zip file which includes the 'libs' folder" +
" and the file 'utils.py' inside of it. You will NOT be able"
" to complete this assignment unless you restart jupyter"
" notebook inside the directory created by extracting"
" the zip file or cloning the github repo.")
# We'll tell matplotlib to inline any drawn figures like so:
%matplotlib inline
plt.style.use('ggplot')
# Bit of formatting because I don't like the default inline code style:
from IPython.core.display import HTML
HTML("""<style> .rendered_html code {
padding: 2px 4px;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
} </style>""")
```
<a name="assignment-synopsis"></a>
# Assignment Synopsis
In this assignment, we're going to create our first neural network capable of taking any two continuous values as inputs. Those two values will go through a series of multiplications, additions, and nonlinearities, coming out of the network as 3 outputs. Remember from the last homework, we used convolution to filter an image so that the representations in the image were accentuated. We're not going to be using convolution w/ Neural Networks until the next session, but we're effectively doing the same thing here: using multiplications to accentuate the representations in our data, in order to minimize whatever our cost function is. To find out what those multiplications need to be, we're going to use Gradient Descent and Backpropagation, which will take our cost, and find the appropriate updates to all the parameters in our network to best optimize the cost. In the next session, we'll explore much bigger networks and convolution. This "toy" network is really to help us get up and running with neural networks, and aid our exploration of the different components that make up a neural network. You will be expected to explore manipulations of the neural networks in this notebook as much as possible to help aid your understanding of how they effect the final result.
We're going to build our first neural network to understand what color "to paint" given a location in an image, or the row, col of the image. So in goes a row/col, and out goes a R/G/B. In the next lesson, we'll learn what this network is really doing is performing regression. For now, we'll focus on the creative applications of such a network to help us get a better understanding of the different components that make up the neural network. You'll be asked to explore many of the different components of a neural network, including changing the inputs/outputs (i.e. the dataset), the number of layers, their activation functions, the cost functions, learning rate, and batch size. You'll also explore a modification to this same network which takes a 3rd input: an index for an image. This will let us try to learn multiple images at once, though with limited success.
We'll now dive right into creating deep neural networks, and I'm going to show you the math along the way. Don't worry if a lot of it doesn't make sense, and it really takes a bit of practice before it starts to come together.
<a name="part-one---fully-connected-network"></a>
# Part One - Fully Connected Network
<a name="instructions"></a>
## Instructions
Create the operations necessary for connecting an input to a network, defined by a `tf.Placeholder`, to a series of fully connected, or linear, layers, using the formula:
$$\textbf{H} = \phi(\textbf{X}\textbf{W} + \textbf{b})$$
where $\textbf{H}$ is an output layer representing the "hidden" activations of a network, $\phi$ represents some nonlinearity, $\textbf{X}$ represents an input to that layer, $\textbf{W}$ is that layer's weight matrix, and $\textbf{b}$ is that layer's bias.
If you're thinking, what is going on? Where did all that math come from? Don't be afraid of it. Once you learn how to "speak" the symbolic representation of the equation, it starts to get easier. And once we put it into practice with some code, it should start to feel like there is some association with what is written in the equation, and what we've written in code. Practice trying to say the equation in a meaningful way: "The output of a hidden layer is equal to some input multiplied by another matrix, adding some bias, and applying a non-linearity". Or perhaps: "The hidden layer is equal to a nonlinearity applied to an input multiplied by a matrix and adding some bias". Explore your own interpretations of the equation, or ways of describing it, and it starts to become much, much easier to apply the equation.
The first thing that happens in this equation is the input matrix $\textbf{X}$ is multiplied by another matrix, $\textbf{W}$. This is the most complicated part of the equation. It's performing matrix multiplication, as we've seen from last session, and is effectively scaling and rotating our input. The bias $\textbf{b}$ allows for a global shift in the resulting values. Finally, the nonlinearity of $\phi$ allows the input space to be nonlinearly warped, allowing it to express a lot more interesting distributions of data. Have a look below at some common nonlinearities. If you're unfamiliar with looking at graphs like this, it is common to read the horizontal axis as X, as the input, and the vertical axis as Y, as the output.
```
xs = np.linspace(-6, 6, 100)
plt.plot(xs, np.maximum(xs, 0), label='relu')
plt.plot(xs, 1 / (1 + np.exp(-xs)), label='sigmoid')
plt.plot(xs, np.tanh(xs), label='tanh')
plt.xlabel('Input')
plt.xlim([-6, 6])
plt.ylabel('Output')
plt.ylim([-1.5, 1.5])
plt.title('Common Activation Functions/Nonlinearities')
plt.legend(loc='lower right')
```
Remember, having series of linear followed by nonlinear operations is what makes neural networks expressive. By stacking a lot of "linear" + "nonlinear" operations in a series, we can create a deep neural network! Have a look at the output ranges of the above nonlinearity when considering which nonlinearity seems most appropriate. For instance, the `relu` is always above 0, but does not saturate at any value above 0, meaning it can be anything above 0. That's unlike the `sigmoid` which does saturate at both 0 and 1, meaning its values for a single output neuron will always be between 0 and 1. Similarly, the `tanh` saturates at -1 and 1.
Choosing between these is often a matter of trial and error. Though you can make some insights depending on your normalization scheme. For instance, if your output is expected to be in the range of 0 to 1, you may not want to use a `tanh` function, which ranges from -1 to 1, but likely would want to use a `sigmoid`. Keep the ranges of these activation functions in mind when designing your network, especially the final output layer of your network.
<a name="code"></a>
## Code
In this section, we're going to work out how to represent a fully connected neural network with code. First, create a 2D `tf.placeholder` called $\textbf{X}$ with `None` for the batch size and 2 features. Make its `dtype` `tf.float32`. Recall that we use the dimension of `None` for the batch size dimension to say that this dimension can be any number. Here is the docstring for the `tf.placeholder` function, have a look at what args it takes:
Help on function placeholder in module `tensorflow.python.ops.array_ops`:
```python
placeholder(dtype, shape=None, name=None)
```
Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Create a placeholder with None x 2 dimensions of dtype tf.float32, and name it "X":
X = ...
```
Now multiply the tensor using a new variable, $\textbf{W}$, which has 2 rows and 20 columns, so that when it is left mutiplied by $\textbf{X}$, the output of the multiplication is None x 20, giving you 20 output neurons. Recall that the `tf.matmul` function takes two arguments, the left hand ($\textbf{X}$) and right hand side ($\textbf{W}$) of a matrix multiplication.
To create $\textbf{W}$, you will use `tf.get_variable` to create a matrix which is `2 x 20` in dimension. Look up the docstrings of functions `tf.get_variable` and `tf.random_normal_initializer` to get familiar with these functions. There are many options we will ignore for now. Just be sure to set the `name`, `shape` (this is the one that has to be [2, 20]), `dtype` (i.e. tf.float32), and `initializer` (the `tf.random_normal_intializer` you should create) when creating your $\textbf{W}$ variable with `tf.get_variable(...)`.
For the random normal initializer, often the mean is set to 0, and the standard deviation is set based on the number of neurons. But that really depends on the input and outputs of your network, how you've "normalized" your dataset, what your nonlinearity/activation function is, and what your expected range of inputs/outputs are. Don't worry about the values for the initializer for now, as this part will take a bit more experimentation to understand better!
This part is to encourage you to learn how to look up the documentation on Tensorflow, ideally using `tf.get_variable?` in the notebook. If you are really stuck, just scroll down a bit and I've shown you how to use it.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
W = tf.get_variable(...
h = tf.matmul(...
```
And add to this result another new variable, $\textbf{b}$, which has [20] dimensions. These values will be added to every output neuron after the multiplication above. Instead of the `tf.random_normal_initializer` that you used for creating $\textbf{W}$, now use the `tf.constant_initializer`. Often for bias, you'll set the constant bias initialization to 0 or 1.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
b = tf.get_variable(...
h = tf.nn.bias_add(...
```
So far we have done:
$$\textbf{X}\textbf{W} + \textbf{b}$$
Finally, apply a nonlinear activation to this output, such as `tf.nn.relu`, to complete the equation:
$$\textbf{H} = \phi(\textbf{X}\textbf{W} + \textbf{b})$$
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
h = ...
```
Now that we've done all of this work, let's stick it inside a function. I've already done this for you and placed it inside the `utils` module under the function name `linear`. We've already imported the `utils` module so we can call it like so, `utils.linear(...)`. The docstring is copied below, and the code itself. Note that this function is slightly different to the one in the lecture. It does not require you to specify `n_input`, and the input `scope` is called `name`. It also has a few more extras in there including automatically converting a 4-d input tensor to a 2-d tensor so that you can fully connect the layer with a matrix multiply (don't worry about what this means if it doesn't make sense!).
```python
utils.linear??
```
```python
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
op : tf.Tensor
Output of fully connected layer.
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
```
<a name="variable-scopes"></a>
## Variable Scopes
Note that since we are using `variable_scope` and explicitly telling the scope which name we would like, if there is *already* a variable created with the same name, then Tensorflow will raise an exception! If this happens, you should consider one of three possible solutions:
1. If this happens while you are interactively editing a graph, you may need to reset the current graph:
```python
tf.reset_default_graph()
```
You should really only have to use this if you are in an interactive console! If you are creating Python scripts to run via command line, you should really be using solution 3 listed below, and be explicit with your graph contexts!
2. If this happens and you were not expecting any name conflicts, then perhaps you had a typo and created another layer with the same name! That's a good reason to keep useful names for everything in your graph!
3. More likely, you should be using context managers when creating your graphs and running sessions. This works like so:
```python
g = tf.Graph()
with tf.Session(graph=g) as sess:
Y_pred, W = linear(X, 2, 3, activation=tf.nn.relu)
```
or:
```python
g = tf.Graph()
with tf.Session(graph=g) as sess, g.as_default():
Y_pred, W = linear(X, 2, 3, activation=tf.nn.relu)
```
You can now write the same process as the above steps by simply calling:
```
h, W = utils.linear(
x=X, n_output=20, name='linear', activation=tf.nn.relu)
```
<a name="part-two---image-painting-network"></a>
# Part Two - Image Painting Network
<a name="instructions-1"></a>
## Instructions
Follow along the steps below, first setting up input and output data of the network, $\textbf{X}$ and $\textbf{Y}$. Then work through building the neural network which will try to compress the information in $\textbf{X}$ through a series of linear and non-linear functions so that whatever it is given as input, it minimized the error of its prediction, $\hat{\textbf{Y}}$, and the true output $\textbf{Y}$ through its training process. You'll also create an animated GIF of the training which you'll need to submit for the homework!
Through this, we'll explore our first creative application: painting an image. This network is just meant to demonstrate how easily networks can be scaled to more complicated tasks without much modification. It is also meant to get you thinking about neural networks as building blocks that can be reconfigured, replaced, reorganized, and get you thinking about how the inputs and outputs can be anything you can imagine.
<a name="preparing-the-data"></a>
## Preparing the Data
We'll follow an example that Andrej Karpathy has done in his online demonstration of "image inpainting". What we're going to do is teach the network to go from the location on an image frame to a particular color. So given any position in an image, the network will need to learn what color to paint. Let's first get an image that we'll try to teach a neural network to paint.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# First load an image
img = ...
# Be careful with the size of your image.
# Try a fairly small image to begin with,
# then come back here and try larger sizes.
img = imresize(img, (100, 100))
plt.figure(figsize=(5, 5))
plt.imshow(img)
# Make sure you save this image as "reference.png"
# and include it in your zipped submission file
# so we can tell what image you are trying to paint!
plt.imsave(fname='reference.png', arr=img)
```
In the lecture, I showed how to aggregate the pixel locations and their colors using a loop over every pixel position. I put that code into a function `split_image` below. Feel free to experiment with other features for `xs` or `ys`.
```
def split_image(img):
# We'll first collect all the positions in the image in our list, xs
xs = []
# And the corresponding colors for each of these positions
ys = []
# Now loop over the image
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
# And store the inputs
xs.append([row_i, col_i])
# And outputs that the network needs to learn to predict
ys.append(img[row_i, col_i])
# we'll convert our lists to arrays
xs = np.array(xs)
ys = np.array(ys)
return xs, ys
```
Let's use this function to create the inputs (xs) and outputs (ys) to our network as the pixel locations (xs) and their colors (ys):
```
xs, ys = split_image(img)
# and print the shapes
xs.shape, ys.shape
```
Also remember, we should normalize our input values!
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Normalize the input (xs) using its mean and standard deviation
xs = ...
# Just to make sure you have normalized it correctly:
print(np.min(xs), np.max(xs))
assert(np.min(xs) > -3.0 and np.max(xs) < 3.0)
```
Similarly for the output:
```
print(np.min(ys), np.max(ys))
```
We'll normalize the output using a simpler normalization method, since we know the values range from 0-255:
```
ys = ys / 255.0
print(np.min(ys), np.max(ys))
```
Scaling the image values like this has the advantage that it is still interpretable as an image, unlike if we have negative values.
What we're going to do is use regression to predict the value of a pixel given its (row, col) position. So the input to our network is `X = (row, col)` value. And the output of the network is `Y = (r, g, b)`.
We can get our original image back by reshaping the colors back into the original image shape. This works because the `ys` are still in order:
```
plt.imshow(ys.reshape(img.shape))
```
But when we give inputs of (row, col) to our network, it won't know what order they are, because we will randomize them. So it will have to *learn* what color value should be output for any given (row, col).
Create 2 placeholders of `dtype` `tf.float32`: one for the input of the network, a `None x 2` dimension placeholder called $\textbf{X}$, and another for the true output of the network, a `None x 3` dimension placeholder called $\textbf{Y}$.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Let's reset the graph:
tf.reset_default_graph()
# Create a placeholder of None x 2 dimensions and dtype tf.float32
# This will be the input to the network which takes the row/col
X = tf.placeholder(...
# Create the placeholder, Y, with 3 output dimensions instead of 2.
# This will be the output of the network, the R, G, B values.
Y = tf.placeholder(...
```
Now create a deep neural network that takes your network input $\textbf{X}$ of 2 neurons, multiplies it by a linear and non-linear transformation which makes its shape [None, 20], meaning it will have 20 output neurons. Then repeat the same process again to give you 20 neurons again, and then again and again until you've done 6 layers of 20 neurons. Then finally one last layer which will output 3 neurons, your predicted output, which I've been denoting mathematically as $\hat{\textbf{Y}}$, for a total of 6 hidden layers, or 8 layers total including the input and output layers. Mathematically, we'll be creating a deep neural network that looks just like the previous fully connected layer we've created, but with a few more connections. So recall the first layer's connection is:
\begin{align}
\textbf{H}_1=\phi(\textbf{X}\textbf{W}_1 + \textbf{b}_1) \\
\end{align}
So the next layer will take that output, and connect it up again:
\begin{align}
\textbf{H}_2=\phi(\textbf{H}_1\textbf{W}_2 + \textbf{b}_2) \\
\end{align}
And same for every other layer:
\begin{align}
\textbf{H}_3=\phi(\textbf{H}_2\textbf{W}_3 + \textbf{b}_3) \\
\textbf{H}_4=\phi(\textbf{H}_3\textbf{W}_4 + \textbf{b}_4) \\
\textbf{H}_5=\phi(\textbf{H}_4\textbf{W}_5 + \textbf{b}_5) \\
\textbf{H}_6=\phi(\textbf{H}_5\textbf{W}_6 + \textbf{b}_6) \\
\end{align}
Including the very last layer, which will be the prediction of the network:
\begin{align}
\hat{\textbf{Y}}=\phi(\textbf{H}_6\textbf{W}_7 + \textbf{b}_7)
\end{align}
Remember if you run into issues with variable scopes/names, that you cannot recreate a variable with the same name! Revisit the section on <a href='#Variable-Scopes'>Variable Scopes</a> if you get stuck with name issues.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# We'll create 6 hidden layers. Let's create a variable
# to say how many neurons we want for each of the layers
# (try 20 to begin with, then explore other values)
n_neurons = ...
# Create the first linear + nonlinear layer which will
# take the 2 input neurons and fully connects it to 20 neurons.
# Use the `utils.linear` function to do this just like before,
# but also remember to give names for each layer, such as
# "1", "2", ... "5", or "layer1", "layer2", ... "layer6".
h1, W1 = ...
# Create another one:
h2, W2 = ...
# and four more (or replace all of this with a loop if you can!):
h3, W3 = ...
h4, W4 = ...
h5, W5 = ...
h6, W6 = ...
# Now, make one last layer to make sure your network has 3 outputs:
Y_pred, W7 = utils.linear(h6, 3, activation=None, name='pred')
assert(X.get_shape().as_list() == [None, 2])
assert(Y_pred.get_shape().as_list() == [None, 3])
assert(Y.get_shape().as_list() == [None, 3])
```
<a name="cost-function"></a>
## Cost Function
Now we're going to work on creating a `cost` function. The cost should represent how much `error` there is in the network, and provide the optimizer this value to help it train the network's parameters using gradient descent and backpropagation.
Let's say our error is `E`, then the cost will be:
$$cost(\textbf{Y}, \hat{\textbf{Y}}) = \frac{1}{\text{B}} \displaystyle\sum\limits_{b=0}^{\text{B}} \textbf{E}_b
$$
where the error is measured as, e.g.:
$$\textbf{E} = \displaystyle\sum\limits_{c=0}^{\text{C}} (\textbf{Y}_{c} - \hat{\textbf{Y}}_{c})^2$$
Don't worry if this scares you. This is mathematically expressing the same concept as: "the cost of an actual $\textbf{Y}$, and a predicted $\hat{\textbf{Y}}$ is equal to the mean across batches, of which there are $\text{B}$ total batches, of the sum of distances across $\text{C}$ color channels of every predicted output and true output". Basically, we're trying to see on average, or at least within a single minibatches average, how wrong was our prediction? We create a measure of error for every output feature by squaring the predicted output and the actual output it should have, i.e. the actual color value it should have output for a given input pixel position. By squaring it, we penalize large distances, but not so much small distances.
Consider how the square function (i.e., $f(x) = x^2$) changes for a given error. If our color values range between 0-255, then a typical amount of error would be between $0$ and $128^2$. For example if my prediction was (120, 50, 167), and the color should have been (0, 100, 120), then the error for the Red channel is (120 - 0) or 120. And the Green channel is (50 - 100) or -50, and for the Blue channel, (167 - 120) = 47. When I square this result, I get: (120)^2, (-50)^2, and (47)^2. I then add all of these and that is my error, $\textbf{E}$, for this one observation. But I will have a few observations per minibatch. So I add all the error in my batch together, then divide by the number of observations in the batch, essentially finding the mean error of my batch.
Let's try to see what the square in our measure of error is doing graphically.
```
error = np.linspace(0.0, 128.0**2, 100)
loss = error**2.0
plt.plot(error, loss)
plt.xlabel('error')
plt.ylabel('loss')
```
This is known as the $l_2$ (pronounced el-two) loss. It doesn't penalize small errors as much as it does large errors. This is easier to see when we compare it with another common loss, the $l_1$ (el-one) loss. It is linear in error, by taking the absolute value of the error. We'll compare the $l_1$ loss with normalized values from $0$ to $1$. So instead of having $0$ to $255$ for our RGB values, we'd have $0$ to $1$, simply by dividing our color values by $255.0$.
```
error = np.linspace(0.0, 1.0, 100)
plt.plot(error, error**2, label='l_2 loss')
plt.plot(error, np.abs(error), label='l_1 loss')
plt.xlabel('error')
plt.ylabel('loss')
plt.legend(loc='lower right')
```
So unlike the $l_2$ loss, the $l_1$ loss is really quickly upset if there is *any* error at all: as soon as error moves away from $0.0$, to $0.1$, the $l_1$ loss is $0.1$. But the $l_2$ loss is $0.1^2 = 0.01$. Having a stronger penalty on smaller errors often leads to what the literature calls "sparse" solutions, since it favors activations that try to explain as much of the data as possible, rather than a lot of activations that do a sort of good job, but when put together, do a great job of explaining the data. Don't worry about what this means if you are more unfamiliar with Machine Learning. There is a lot of literature surrounding each of these loss functions that we won't have time to get into, but look them up if they interest you.
During the lecture, we've seen how to create a cost function using Tensorflow. To create a $l_2$ loss function, you can for instance use tensorflow's `tf.squared_difference` or for an $l_1$ loss function, `tf.abs`. You'll need to refer to the `Y` and `Y_pred` variables only, and your resulting cost should be a single value. Try creating the $l_1$ loss to begin with, and come back here after you have trained your network, to compare the performance with a $l_2$ loss.
The equation for computing cost I mentioned above is more succintly written as, for $l_2$ norm:
$$cost(\textbf{Y}, \hat{\textbf{Y}}) = \frac{1}{\text{B}} \displaystyle\sum\limits_{b=0}^{\text{B}} \displaystyle\sum\limits_{c=0}^{\text{C}} (\textbf{Y}_{c} - \hat{\textbf{Y}}_{c})^2$$
For $l_1$ norm, we'd have:
$$cost(\textbf{Y}, \hat{\textbf{Y}}) = \frac{1}{\text{B}} \displaystyle\sum\limits_{b=0}^{\text{B}} \displaystyle\sum\limits_{c=0}^{\text{C}} \text{abs}(\textbf{Y}_{c} - \hat{\textbf{Y}}_{c})$$
Remember, to understand this equation, try to say it out loud: the $cost$ given two variables, $\textbf{Y}$, the actual output we want the network to have, and $\hat{\textbf{Y}}$ the predicted output from the network, is equal to the mean across $\text{B}$ batches, of the sum of $\textbf{C}$ color channels distance between the actual and predicted outputs. If you're still unsure, refer to the lecture where I've computed this, or scroll down a bit to where I've included the answer.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# first compute the error, the inner part of the summation.
# This should be the l1-norm or l2-norm of the distance
# between each color channel.
error = ...
assert(error.get_shape().as_list() == [None, 3])
```
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Now sum the error for each feature in Y.
# If Y is [Batch, Features], the sum should be [Batch]:
sum_error = ...
assert(sum_error.get_shape().as_list() == [None])
```
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Finally, compute the cost, as the mean error of the batch.
# This should be a single value.
cost = ...
assert(cost.get_shape().as_list() == [])
```
We now need an `optimizer` which will take our `cost` and a `learning_rate`, which says how far along the gradient to move. This optimizer calculates all the gradients in our network with respect to the `cost` variable and updates all of the weights in our network using backpropagation. We'll then create mini-batches of our training data and run the `optimizer` using a `session`.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Refer to the help for the function
optimizer = tf.train....minimize(cost)
# Create parameters for the number of iterations to run for (< 100)
n_iterations = ...
# And how much data is in each minibatch (< 500)
batch_size = ...
# Then create a session
sess = tf.Session()
```
We'll now train our network! The code below should do this for you if you've setup everything else properly. Please read through this and make sure you understand each step! Note that this can take a VERY LONG time depending on the size of your image (make it < 100 x 100 pixels), the number of neurons per layer (e.g. < 30), the number of layers (e.g. < 8), and number of iterations (< 1000). Welcome to Deep Learning :)
```
# Initialize all your variables and run the operation with your session
sess.run(tf.initialize_all_variables())
# Optimize over a few iterations, each time following the gradient
# a little at a time
imgs = []
costs = []
gif_step = n_iterations // 10
step_i = 0
for it_i in range(n_iterations):
# Get a random sampling of the dataset
idxs = np.random.permutation(range(len(xs)))
# The number of batches we have to iterate over
n_batches = len(idxs) // batch_size
# Now iterate over our stochastic minibatches:
for batch_i in range(n_batches):
# Get just minibatch amount of data
idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size]
# And optimize, also returning the cost so we can monitor
# how our optimization is doing.
training_cost = sess.run(
[cost, optimizer],
feed_dict={X: xs[idxs_i], Y: ys[idxs_i]})[0]
# Also, every 20 iterations, we'll draw the prediction of our
# input xs, which should try to recreate our image!
if (it_i + 1) % gif_step == 0:
costs.append(training_cost / n_batches)
ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess)
img = np.clip(ys_pred.reshape(img.shape), 0, 1)
imgs.append(img)
# Plot the cost over time
fig, ax = plt.subplots(1, 2)
ax[0].plot(costs)
ax[0].set_xlabel('Iteration')
ax[0].set_ylabel('Cost')
ax[1].imshow(img)
fig.suptitle('Iteration {}'.format(it_i))
plt.show()
# Save the images as a GIF
_ = gif.build_gif(imgs, saveto='single.gif', show_gif=False)
```
Let's now display the GIF we've just created:
```
ipyd.Image(url='single.gif?{}'.format(np.random.rand()),
height=500, width=500)
```
<a name="explore"></a>
## Explore
Go back over the previous cells and exploring changing different parameters of the network. I would suggest first trying to change the `learning_rate` parameter to different values and see how the cost curve changes. What do you notice? Try exponents of $10$, e.g. $10^1$, $10^2$, $10^3$... and so on. Also try changing the `batch_size`: $50, 100, 200, 500, ...$ How does it effect how the cost changes over time?
Be sure to explore other manipulations of the network, such as changing the loss function to $l_2$ or $l_1$. How does it change the resulting learning? Also try changing the activation functions, the number of layers/neurons, different optimizers, and anything else that you may think of, and try to get a basic understanding on this toy problem of how it effects the network's training. Also try comparing creating a fairly shallow/wide net (e.g. 1-2 layers with many neurons, e.g. > 100), versus a deep/narrow net (e.g. 6-20 layers with fewer neurons, e.g. < 20). What do you notice?
<a name="a-note-on-crossvalidation"></a>
## A Note on Crossvalidation
The cost curve plotted above is only showing the cost for our "training" dataset. Ideally, we should split our dataset into what are called "train", "validation", and "test" sets. This is done by taking random subsets of the entire dataset. For instance, we partition our dataset by saying we'll only use 80% of it for training, 10% for validation, and the last 10% for testing. Then when training as above, you would only use the 80% of the data you had partitioned, and then monitor accuracy on both the data you have used to train, but also that new 10% of unseen validation data. This gives you a sense of how "general" your network is. If it is performing just as well on that 10% of data, then you know it is doing a good job. Finally, once you are done training, you would test one last time on your "test" dataset. Ideally, you'd do this a number of times, so that every part of the dataset had a chance to be the test set. This would also give you a measure of the variance of the accuracy on the final test. If it changes a lot, you know something is wrong. If it remains fairly stable, then you know that it is a good representation of the model's accuracy on unseen data.
We didn't get a chance to cover this in class, as it is less useful for exploring creative applications, though it is very useful to know and to use in practice, as it avoids overfitting/overgeneralizing your network to all of the data. Feel free to explore how to do this on the application above!
<a name="part-three---learning-more-than-one-image"></a>
# Part Three - Learning More than One Image
<a name="instructions-2"></a>
## Instructions
We're now going to make use of our Dataset from Session 1 and apply what we've just learned to try and paint every single image in our dataset. How would you guess is the best way to approach this? We could for instance feed in every possible image by having multiple row, col -> r, g, b values. So for any given row, col, we'd have 100 possible r, g, b values. This likely won't work very well as there are many possible values a pixel could take, not just one. What if we also tell the network *which* image's row and column we wanted painted? We're going to try and see how that does.
You can execute all of the cells below unchanged to see how this works with the first 100 images of the celeb dataset. But you should replace the images with your own dataset, and vary the parameters of the network to get the best results!
I've placed the same code for running the previous algorithm into two functions, `build_model` and `train`. You can directly call the function `train` with a 4-d image shaped as N x H x W x C, and it will collect all of the points of every image and try to predict the output colors of those pixels, just like before. The only difference now is that you are able to try this with a few images at a time. There are a few ways we could have tried to handle multiple images. The way I've shown in the `train` function is to include an additional input neuron for *which* image it is. So as well as receiving the row and column, the network will also receive as input which image it is as a number. This should help the network to better distinguish the patterns it uses, as it has knowledge that helps it separates its process based on which image is fed as input.
```
def build_model(xs, ys, n_neurons, n_layers, activation_fn,
final_activation_fn, cost_type):
xs = np.asarray(xs)
ys = np.asarray(ys)
if xs.ndim != 2:
raise ValueError(
'xs should be a n_observates x n_features, ' +
'or a 2-dimensional array.')
if ys.ndim != 2:
raise ValueError(
'ys should be a n_observates x n_features, ' +
'or a 2-dimensional array.')
n_xs = xs.shape[1]
n_ys = ys.shape[1]
X = tf.placeholder(name='X', shape=[None, n_xs],
dtype=tf.float32)
Y = tf.placeholder(name='Y', shape=[None, n_ys],
dtype=tf.float32)
current_input = X
for layer_i in range(n_layers):
current_input = utils.linear(
current_input, n_neurons,
activation=activation_fn,
name='layer{}'.format(layer_i))[0]
Y_pred = utils.linear(
current_input, n_ys,
activation=final_activation_fn,
name='pred')[0]
if cost_type == 'l1_norm':
cost = tf.reduce_mean(tf.reduce_sum(
tf.abs(Y - Y_pred), 1))
elif cost_type == 'l2_norm':
cost = tf.reduce_mean(tf.reduce_sum(
tf.squared_difference(Y, Y_pred), 1))
else:
raise ValueError(
'Unknown cost_type: {}. '.format(
cost_type) + 'Use only "l1_norm" or "l2_norm"')
return {'X': X, 'Y': Y, 'Y_pred': Y_pred, 'cost': cost}
def train(imgs,
learning_rate=0.0001,
batch_size=200,
n_iterations=10,
gif_step=2,
n_neurons=30,
n_layers=10,
activation_fn=tf.nn.relu,
final_activation_fn=tf.nn.tanh,
cost_type='l2_norm'):
N, H, W, C = imgs.shape
all_xs, all_ys = [], []
for img_i, img in enumerate(imgs):
xs, ys = split_image(img)
all_xs.append(np.c_[xs, np.repeat(img_i, [xs.shape[0]])])
all_ys.append(ys)
xs = np.array(all_xs).reshape(-1, 3)
xs = (xs - np.mean(xs, 0)) / np.std(xs, 0)
ys = np.array(all_ys).reshape(-1, 3)
ys = ys / 127.5 - 1
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = build_model(xs, ys, n_neurons, n_layers,
activation_fn, final_activation_fn,
cost_type)
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(model['cost'])
sess.run(tf.initialize_all_variables())
gifs = []
costs = []
step_i = 0
for it_i in range(n_iterations):
# Get a random sampling of the dataset
idxs = np.random.permutation(range(len(xs)))
# The number of batches we have to iterate over
n_batches = len(idxs) // batch_size
training_cost = 0
# Now iterate over our stochastic minibatches:
for batch_i in range(n_batches):
# Get just minibatch amount of data
idxs_i = idxs[batch_i * batch_size:
(batch_i + 1) * batch_size]
# And optimize, also returning the cost so we can monitor
# how our optimization is doing.
cost = sess.run(
[model['cost'], optimizer],
feed_dict={model['X']: xs[idxs_i],
model['Y']: ys[idxs_i]})[0]
training_cost += cost
print('iteration {}/{}: cost {}'.format(
it_i + 1, n_iterations, training_cost / n_batches))
# Also, every 20 iterations, we'll draw the prediction of our
# input xs, which should try to recreate our image!
if (it_i + 1) % gif_step == 0:
costs.append(training_cost / n_batches)
ys_pred = model['Y_pred'].eval(
feed_dict={model['X']: xs}, session=sess)
img = ys_pred.reshape(imgs.shape)
gifs.append(img)
return gifs
```
<a name="code-1"></a>
## Code
Below, I've shown code for loading the first 100 celeb files. Run through the next few cells to see how this works with the celeb dataset, and then come back here and replace the `imgs` variable with your own set of images. For instance, you can try your entire sorted dataset from Session 1 as an N x H x W x C array. Explore!
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
celeb_imgs = utils.get_celeb_imgs()
plt.figure(figsize=(10, 10))
plt.imshow(utils.montage(celeb_imgs).astype(np.uint8))
# It doesn't have to be 100 images, explore!
imgs = np.array(celeb_imgs).copy()
```
Explore changing the parameters of the `train` function and your own dataset of images. Note, you do not have to use the dataset from the last assignment! Explore different numbers of images, whatever you prefer.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Change the parameters of the train function and
# explore changing the dataset
gifs = train(imgs=imgs)
```
Now we'll create a gif out of the training process. Be sure to call this 'multiple.gif' for your homework submission:
```
montage_gifs = [np.clip(utils.montage(
(m * 127.5) + 127.5), 0, 255).astype(np.uint8)
for m in gifs]
_ = gif.build_gif(montage_gifs, saveto='multiple.gif')
```
And show it in the notebook
```
ipyd.Image(url='multiple.gif?{}'.format(np.random.rand()),
height=500, width=500)
```
What we're seeing is the training process over time. We feed in our `xs`, which consist of the pixel values of each of our 100 images, it goes through the neural network, and out come predicted color values for every possible input value. We visualize it above as a gif by seeing how at each iteration the network has predicted the entire space of the inputs. We can visualize just the last iteration as a "latent" space, going from the first image (the top left image in the montage), to the last image, (the bottom right image).
```
final = gifs[-1]
final_gif = [np.clip(((m * 127.5) + 127.5), 0, 255).astype(np.uint8) for m in final]
gif.build_gif(final_gif, saveto='final.gif')
ipyd.Image(url='final.gif?{}'.format(np.random.rand()),
height=200, width=200)
```
<a name="part-four---open-exploration-extra-credit"></a>
# Part Four - Open Exploration (Extra Credit)
I now what you to explore what other possible manipulations of the network and/or dataset you could imagine. Perhaps a process that does the reverse, tries to guess where a given color should be painted? What if it was only taught a certain palette, and had to reason about other colors, how it would interpret those colors? Or what if you fed it pixel locations that weren't part of the training set, or outside the frame of what it was trained on? Or what happens with different activation functions, number of layers, increasing number of neurons or lesser number of neurons? I leave any of these as an open exploration for you.
Try exploring this process with your own ideas, materials, and networks, and submit something you've created as a gif! To aid exploration, be sure to scale the image down quite a bit or it will require a much larger machine, and much more time to train. Then whenever you think you may be happy with the process you've created, try scaling up the resolution and leave the training to happen over a few hours/overnight to produce something truly stunning!
Make sure to name the result of your gif: "explore.gif", and be sure to include it in your zip file.
<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
```
# Train a network to produce something, storing every few
# iterations in the variable gifs, then export the training
# over time as a gif.
...
gif.build_gif(montage_gifs, saveto='explore.gif')
ipyd.Image(url='explore.gif?{}'.format(np.random.rand()),
height=500, width=500)
```
<a name="assignment-submission"></a>
# Assignment Submission
After you've completed the notebook, create a zip file of the current directory using the code below. This code will make sure you have included this completed ipython notebook and the following files named exactly as:
<pre>
session-2/
session-2.ipynb
single.gif
multiple.gif
final.gif
explore.gif*
libs/
utils.py
* = optional/extra-credit
</pre>
You'll then submit this zip file for your second assignment on Kadenze for "Assignment 2: Teach a Deep Neural Network to Paint"! If you have any questions, remember to reach out on the forums and connect with your peers or with me.
To get assessed, you'll need to be a premium student! This will allow you to build an online portfolio of all of your work and receive grades. If you aren't already enrolled as a student, register now at http://www.kadenze.com/ and join the [#CADL](https://twitter.com/hashtag/CADL) community to see what your peers are doing! https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info
Also, if you share any of the GIFs on Facebook/Twitter/Instagram/etc..., be sure to use the #CADL hashtag so that other students can find your work!
```
utils.build_submission('session-2.zip',
('reference.png',
'single.gif',
'multiple.gif',
'final.gif',
'session-2.ipynb'),
('explore.gif'))
```
| github_jupyter |
```
#!pip install pandas_profiling
#!pip install matplotlib
import sys
sys.version
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import scipy.stats as stats
import pandas_profiling
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 7.5
plt.rcParams['axes.grid'] = True
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.cluster import KMeans
# center and scale the data
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import sklearn.metrics as metrics
# reading data into dataframe
Cust= pd.read_csv("CC_GENERAL.csv")
Cust.head()
### Exporting pandas profiling output to html file
output = pandas_profiling.ProfileReport(Cust)
output.to_file(output_file='pandas_profiling.html')
```
### Cols to drop
```
# CUST_ID,ONEOFF_PURCHASES
Cust.info()
Cust.drop(["CUST_ID","ONEOFF_PURCHASES"], axis=1, inplace=True)
Cust.info()
Cust.TENURE.unique()
#Handling Outliers - Method2
def outlier_capping(x):
x = x.clip(upper=x.quantile(0.99), lower=x.quantile(0.01))
return x
Cust=Cust.apply(lambda x: outlier_capping(x))
#Handling missings - Method2
def Missing_imputation(x):
x = x.fillna(x.median())
return x
Cust=Cust.apply(lambda x: Missing_imputation(x))
Cust.corr()
# visualize correlation matrix in Seaborn using a heatmap
sns.heatmap(Cust.corr())
```
### Standardrizing data
- To put data on the same scale
```
sc=StandardScaler()
Cust_scaled=sc.fit_transform(Cust)
pd.DataFrame(Cust_scaled).shape
```
### Applyting PCA
```
pc = PCA(n_components=16)
pc.fit(Cust_scaled)
pc.explained_variance_
#Eigen values
sum(pc.explained_variance_)
#The amount of variance that each PC explains
var= pc.explained_variance_ratio_
var
#Cumulative Variance explains
var1=np.cumsum(np.round(pc.explained_variance_ratio_, decimals=4)*100)
var1
```
number of components have choosen as 6 based on cumulative variacne is explaining >75 % and individual component explaining >0.8 variance
```
pc_final=PCA(n_components=6).fit(Cust_scaled)
pc_final.explained_variance_
reduced_cr=pc_final.transform(Cust_scaled)
dimensions = pd.DataFrame(reduced_cr)
dimensions
dimensions.columns = ["C1", "C2", "C3", "C4", "C5", "C6"]
dimensions.head()
```
#### Factor Loading Matrix
Loadings=Eigenvectors * sqrt(Eigenvalues)
loadings are the covariances/correlations between the original variables and the unit-scaled components.
```
Loadings = pd.DataFrame((pc_final.components_.T * np.sqrt(pc_final.explained_variance_)).T,columns=Cust.columns).T
Loadings.to_csv("Loadings.csv")
```
### Clustering
```
#selected the list variables from PCA based on factor loading matrics
list_var = ['PURCHASES_TRX','INSTALLMENTS_PURCHASES','PURCHASES_INSTALLMENTS_FREQUENCY','MINIMUM_PAYMENTS','BALANCE','CREDIT_LIMIT','CASH_ADVANCE','PRC_FULL_PAYMENT','ONEOFF_PURCHASES_FREQUENCY']
Cust_scaled1=pd.DataFrame(Cust_scaled, columns=Cust.columns)
Cust_scaled1.head(5)
Cust_scaled2=Cust_scaled1[list_var]
Cust_scaled2.head(5)
```
## Segmentation
```
km_3=KMeans(n_clusters=3,random_state=123)
km_3.fit(Cust_scaled2)
print(km_3.labels_)
km_3.cluster_centers_
km_4=KMeans(n_clusters=4,random_state=123).fit(Cust_scaled2)
#km_5.labels_a
km_5=KMeans(n_clusters=5,random_state=123).fit(Cust_scaled2)
#km_5.labels_
km_6=KMeans(n_clusters=6,random_state=123).fit(Cust_scaled2)
#km_6.labels_
km_7=KMeans(n_clusters=7,random_state=123).fit(Cust_scaled2)
#km_7.labels_
km_8=KMeans(n_clusters=8,random_state=123).fit(Cust_scaled2)
#km_5.labels_
metrics.silhouette_score(Cust_scaled2, km_3.labels_)
# 5 clusters are better
# Conactenating labels found through Kmeans with data
# save the cluster labels and sort by cluster
Cust['cluster_3'] = km_3.labels_
Cust['cluster_4'] = km_4.labels_
Cust['cluster_5'] = km_5.labels_
Cust['cluster_6'] = km_6.labels_
Cust['cluster_7'] = km_7.labels_
Cust['cluster_8'] = km_8.labels_
Cust.head()
```
### Choosing number clusters using Silhouette Coefficient
```
# calculate SC for K=6
from sklearn import metrics
metrics.silhouette_score(Cust_scaled2, km_3.labels_)
# calculate SC for K=3 through K=9
k_range = range(3, 13)
scores = []
for k in k_range:
km = KMeans(n_clusters=k, random_state=123)
km.fit(Cust_scaled2)
scores.append(metrics.silhouette_score(Cust_scaled2, km.labels_))
scores
# plot the results
plt.plot(k_range, scores)
plt.xlabel('Number of clusters')
plt.ylabel('Silhouette Coefficient')
plt.grid(True)
```
### Segment Distribution
```
Cust.cluster_3.value_counts()*100/sum(Cust.cluster_3.value_counts())
pd.Series.sort_index(Cust.cluster_3.value_counts())
```
### Profiling
```
size=pd.concat([pd.Series(Cust.cluster_3.size), pd.Series.sort_index(Cust.cluster_3.value_counts()), pd.Series.sort_index(Cust.cluster_4.value_counts()),
pd.Series.sort_index(Cust.cluster_5.value_counts()), pd.Series.sort_index(Cust.cluster_6.value_counts()),
pd.Series.sort_index(Cust.cluster_7.value_counts()), pd.Series.sort_index(Cust.cluster_8.value_counts())])
size
Seg_size=pd.DataFrame(size, columns=['Seg_size'])
Seg_Pct = pd.DataFrame(size/Cust.cluster_3.size, columns=['Seg_Pct'])
Seg_size.T
Seg_Pct.T
pd.concat([Seg_size.T, Seg_Pct.T], axis=0)
Cust.head()
# Mean value gives a good indication of the distribution of data. So we are finding mean value for each variable for each cluster
Profling_output = pd.concat([Cust.apply(lambda x: x.mean()).T, Cust.groupby('cluster_3').apply(lambda x: x.mean()).T, Cust.groupby('cluster_4').apply(lambda x: x.mean()).T,
Cust.groupby('cluster_5').apply(lambda x: x.mean()).T, Cust.groupby('cluster_6').apply(lambda x: x.mean()).T,
Cust.groupby('cluster_7').apply(lambda x: x.mean()).T, Cust.groupby('cluster_8').apply(lambda x: x.mean()).T], axis=1)
Profling_output
Profling_output_final=pd.concat([Seg_size.T, Seg_Pct.T, Profling_output], axis=0)
Profling_output_final
#Profling_output_final.columns = ['Seg_' + str(i) for i in Profling_output_final.columns]
Profling_output_final.columns = ['Overall', 'KM3_1', 'KM3_2', 'KM3_3',
'KM4_1', 'KM4_2', 'KM4_3', 'KM4_4',
'KM5_1', 'KM5_2', 'KM5_3', 'KM5_4', 'KM5_5',
'KM6_1', 'KM6_2', 'KM6_3', 'KM6_4', 'KM6_5','KM6_6',
'KM7_1', 'KM7_2', 'KM7_3', 'KM7_4', 'KM7_5','KM7_6','KM7_7',
'KM8_1', 'KM8_2', 'KM8_3', 'KM8_4', 'KM8_5','KM8_6','KM8_7','KM8_8']
Profling_output_final
Profling_output_final.to_csv('Profiling_output.csv')
```
### Check profiling Output for more details.
Submitted By, Pranjal Saxena <a>https://www.linkedin.com/in/pranjalai/ </a> <br>
[email protected]
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Writing a training loop from scratch
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/keras-team/keras-io/blob/master/tf/writing_a_training_loop_from_scratch.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/keras-team/keras-io/blob/master/guides/writing_a_training_loop_from_scratch.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/keras-io/tf/writing_a_training_loop_from_scratch.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Setup
```
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
```
## Introduction
Keras provides default training and evaluation loops, `fit()` and `evaluate()`.
Their usage is covered in the guide
[Training & evaluation with the built-in methods](https://www.tensorflow.org/guide/keras/train_and_evaluate/).
If you want to customize the learning algorithm of your model while still leveraging
the convenience of `fit()`
(for instance, to train a GAN using `fit()`), you can subclass the `Model` class and
implement your own `train_step()` method, which
is called repeatedly during `fit()`. This is covered in the guide
[Customizing what happens in `fit()`](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit/).
Now, if you want very low-level control over training & evaluation, you should write
your own training & evaluation loops from scratch. This is what this guide is about.
## Using the `GradientTape`: a first end-to-end example
Calling a model inside a `GradientTape` scope enables you to retrieve the gradients of
the trainable weights of the layer with respect to a loss value. Using an optimizer
instance, you can use these gradients to update these variables (which you can
retrieve using `model.trainable_weights`).
Let's consider a simple MNIST model:
```
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu")(inputs)
x2 = layers.Dense(64, activation="relu")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Let's train it using mini-batch gradient with a custom training loop.
First, we're going to need an optimizer, a loss function, and a dataset:
```
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
```
Here's our training loop:
- We open a `for` loop that iterates over epochs
- For each epoch, we open a `for` loop that iterates over the dataset, in batches
- For each batch, we open a `GradientTape()` scope
- Inside this scope, we call the model (forward pass) and compute the loss
- Outside the scope, we retrieve the gradients of the weights
of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the
gradients
```
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %s samples" % ((step + 1) * 64))
```
## Low-level handling of metrics
Let's add metrics monitoring to this basic loop.
You can readily reuse the built-in metrics (or custom ones you wrote) in such training
loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_states()` when you need to clear the state of the metric
(typically at the end of an epoch)
Let's use this knowledge to compute `SparseCategoricalAccuracy` on validation data at
the end of each epoch:
```
# Get model
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
# Prepare the training dataset.
batch_size = 64
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
```
Here's our training & evaluation loop:
```
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Update training metric.
train_acc_metric.update_state(y_batch_train, logits)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * 64))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
```
## Speeding-up your training step with `tf.function`
The default runtime in TensorFlow 2.0 is
[eager execution](https://www.tensorflow.org/guide/eager). As such, our training loop
above executes eagerly.
This is great for debugging, but graph compilation has a definite performance
advantage. Describing your computation as a static graph enables the framework
to apply global performance optimizations. This is impossible when
the framework is constrained to greedly execute one operation after another,
with no knowledge of what comes next.
You can compile into a static graph any function that takes tensors as input.
Just add a `@tf.function` decorator on it, like this:
```
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
```
Let's do the same with the evaluation step:
```
@tf.function
def test_step(x, y):
val_logits = model(x, training=False)
val_acc_metric.update_state(y, val_logits)
```
Now, let's re-run our training loop with this compiled training step:
```
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * 64))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
```
Much faster, isn't it?
## Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass
by layers that call `self.add_loss(value)`. The resulting list of scalar loss
values are available via the property `model.losses`
at the end of the forward pass.
If you want to be using these loss components, you should sum them
and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
```
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
```
Let's build a really simple model that uses it:
```
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Here's what our training step should look like now:
```
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
# Add any extra losses created during the forward pass.
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
```
## Summary
Now you know everything there is to know about using built-in training loops and
writing your own from scratch.
To conclude, here's a simple end-to-end example that ties together everything
you've learned in this guide: a DCGAN trained on MNIST digits.
## End-to-end example: a GAN training loop from scratch
You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new
images that look almost real, by learning the latent distribution of a training
dataset of images (the "latent space" of the images).
A GAN is made of two parts: a "generator" model that maps points in the latent
space to points in image space, a "discriminator" model, a classifier
that can tell the difference between real images (from the training dataset)
and fake images (the output of the generator network).
A GAN training loop looks like this:
1) Train the discriminator.
- Sample a batch of random points in the latent space.
- Turn the points into fake images via the "generator" model.
- Get a batch of real images and combine them with the generated images.
- Train the "discriminator" model to classify generated vs. real images.
2) Train the generator.
- Sample random points in the latent space.
- Turn the points into fake images via the "generator" network.
- Get a batch of real images and combine them with the generated images.
- Train the "generator" model to "fool" the discriminator and classify the fake images
as real.
For a much more detailed overview of how GANs works, see
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
Let's implement this training loop. First, create the discriminator meant to classify
fake vs real digits:
```
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()
```
Then let's create a generator network,
that turns latent vectors into outputs of shape `(28, 28, 1)` (representing
MNIST digits):
```
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
```
Here's the key bit: the training loop. As you can see it is quite straightforward. The
training step function only takes 17 lines.
```
# Instantiate one optimizer for the discriminator and another for the generator.
d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)
g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def train_step(real_images):
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(labels.shape)
# Train the discriminator
with tf.GradientTape() as tape:
predictions = discriminator(combined_images)
d_loss = loss_fn(labels, predictions)
grads = tape.gradient(d_loss, discriminator.trainable_weights)
d_optimizer.apply_gradients(zip(grads, discriminator.trainable_weights))
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = discriminator(generator(random_latent_vectors))
g_loss = loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, generator.trainable_weights)
g_optimizer.apply_gradients(zip(grads, generator.trainable_weights))
return d_loss, g_loss, generated_images
```
Let's train our GAN, by repeatedly calling `train_step` on batches of images.
Since our discriminator and generator are convnets, you're going to want to
run this code on a GPU.
```
import os
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 1 # In practice you need at least 20 epochs to generate nice digits.
save_dir = "./"
for epoch in range(epochs):
print("\nStart epoch", epoch)
for step, real_images in enumerate(dataset):
# Train the discriminator & generator on one batch of real images.
d_loss, g_loss, generated_images = train_step(real_images)
# Logging.
if step % 200 == 0:
# Print metrics
print("discriminator loss at step %d: %.2f" % (step, d_loss))
print("adversarial loss at step %d: %.2f" % (step, g_loss))
# Save one generated image
img = tf.keras.preprocessing.image.array_to_img(
generated_images[0] * 255.0, scale=False
)
img.save(os.path.join(save_dir, "generated_img" + str(step) + ".png"))
# To limit execution time we stop after 10 steps.
# Remove the lines below to actually train the model!
if step > 10:
break
```
That's it! You'll get nice-looking fake MNIST digits after just ~30s of training on the
Colab GPU.
| github_jupyter |
# Week 3: Improve MNIST with Convolutions
In the videos you looked at how you would improve Fashion MNIST using Convolutions. For this exercise see if you can improve MNIST to 99.5% accuracy or more by adding only a single convolutional layer and a single MaxPooling 2D layer to the model from the assignment of the previous week.
You should stop training once the accuracy goes above this amount. It should happen in less than 10 epochs, so it's ok to hard code the number of epochs for training, but your training must end once it hits the above metric. If it doesn't, then you'll need to redesign your callback.
When 99.5% accuracy has been hit, you should print out the string "Reached 99.5% accuracy so cancelling training!"
```
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
```
Begin by loading the data. A couple of things to notice:
- The file `mnist.npz` is already included in the current workspace under the `data` directory. By default the `load_data` from Keras accepts a path relative to `~/.keras/datasets` but in this case it is stored somewhere else, as a result of this, you need to specify the full path.
- `load_data` returns the train and test sets in the form of the tuples `(x_train, y_train), (x_test, y_test)` but in this exercise you will be needing only the train set so you can ignore the second tuple.
```
# Load the data
# Get current working directory
current_dir = os.getcwd()
# Append data/mnist.npz to the previous path to get the full path
data_path = os.path.join(current_dir, "data/mnist.npz")
# Get only training set
(training_images, training_labels), _ = tf.keras.datasets.mnist.load_data(path=data_path)
```
One important step when dealing with image data is to preprocess the data. During the preprocess step you can apply transformations to the dataset that will be fed into your convolutional neural network.
Here you will apply two transformations to the data:
- Reshape the data so that it has an extra dimension. The reason for this
is that commonly you will use 3-dimensional arrays (without counting the batch dimension) to represent image data. The third dimension represents the color using RGB values. This data might be in black and white format so the third dimension doesn't really add any additional information for the classification process but it is a good practice regardless.
- Normalize the pixel values so that these are values between 0 and 1. You can achieve this by dividing every value in the array by the maximum.
Remember that these tensors are of type `numpy.ndarray` so you can use functions like [reshape](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html) or [divide](https://numpy.org/doc/stable/reference/generated/numpy.divide.html) to complete the `reshape_and_normalize` function below:
```
# GRADED FUNCTION: reshape_and_normalize
def reshape_and_normalize(images):
### START CODE HERE
# Reshape the images to add an extra dimension
images = np.reshape(images, images.shape + (1,))
# Normalize pixel values
images = np.divide(images,255)
### END CODE HERE
return images
```
Test your function with the next cell:
```
# Reload the images in case you run this cell multiple times
(training_images, _), _ = tf.keras.datasets.mnist.load_data(path=data_path)
# Apply your function
training_images = reshape_and_normalize(training_images)
print(f"Maximum pixel value after normalization: {np.max(training_images)}\n")
print(f"Shape of training set after reshaping: {training_images.shape}\n")
print(f"Shape of one image after reshaping: {training_images[0].shape}")
```
**Expected Output:**
```
Maximum pixel value after normalization: 1.0
Shape of training set after reshaping: (60000, 28, 28, 1)
Shape of one image after reshaping: (28, 28, 1)
```
Now complete the callback that will ensure that training will stop after an accuracy of 99.5% is reached:
```
# GRADED CLASS: myCallback
### START CODE HERE
# Remember to inherit from the correct class
class myCallback(tf.keras.callbacks.Callback):
# Define the method that checks the accuracy at the end of each epoch
def on_epoch_end(self, epoch, logs={}):
# check accuracy
if logs.get('accuracy') >= 0.995:
print('\nReached 99.5% accuracy so cancelling training!')
self.model.stop_training = True
### END CODE HERE
```
Finally, complete the `convolutional_model` function below. This function should return your convolutional neural network:
```
# GRADED FUNCTION: convolutional_model
def convolutional_model():
### START CODE HERE
# Define the model, it should have 5 layers:
# - A Conv2D layer with 32 filters, a kernel_size of 3x3, ReLU activation function
# and an input shape that matches that of every image in the training set
# - A MaxPooling2D layer with a pool_size of 2x2
# - A Flatten layer with no arguments
# - A Dense layer with 128 units and ReLU activation function
# - A Dense layer with 10 units and softmax activation function
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation = 'relu', input_shape = (28, 28, 1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation = 'relu'),
tf.keras.layers.Dense(10, activation = 'softmax')
])
### END CODE HERE
# Compile the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
# Save your untrained model
model = convolutional_model()
# Instantiate the callback class
callbacks = myCallback()
# Train your model (this can take up to 5 minutes)
history = model.fit(training_images, training_labels, epochs=10, callbacks=[callbacks])
```
If you see the message that you defined in your callback printed out after less than 10 epochs it means your callback worked as expected. You can also double check by running the following cell:
```
print(f"Your model was trained for {len(history.epoch)} epochs")
```
**Congratulations on finishing this week's assignment!**
You have successfully implemented a CNN to assist you in the image classification task. Nice job!
**Keep it up!**
| github_jupyter |
## Data and Training
The **augmented** cough audio dataset of the [Project Coswara](https://coswara.iisc.ac.in/about) was used to train the deep CNN model.
The preprocessing steps and CNN architecture is as shown below. The training code is concealed on Github to protect the exact hyperparameters and maintain performance integrity of the model.
<img src = "../assets/ml-pipeline.png" alt="../assets/ml-pipeline.png" width="800"/>
## Model Deployment on IBM Watson Machine Learning
Below are the contents of an IBM Watson Studio Notebook for deploying our trained ML model IBM Watson Machine Learning.
Outputs, Keys, Endpoints and URLs are removed (replaced with <>) to maintain privacy.
### Import model
```
import ibm_boto3
from ibm_botocore.client import Config
# @hidden_cell
# The following code contains the credentials for a file in your IBM Cloud Object Storage.
# You might want to remove those credentials before you share your notebook.
credentials_2 = {
'IAM_SERVICE_ID': <>,
'IBM_API_KEY_ID': <>,
'ENDPOINT': <>,
'IBM_AUTH_ENDPOINT': <>,
'BUCKET': <>,
'FILE': 'cough-it-model.tgz'
}
cos = ibm_boto3.client(service_name='s3',
ibm_api_key_id=credentials_2['IBM_API_KEY_ID'],
ibm_auth_endpoint=credentials_2['IBM_AUTH_ENDPOINT'],
ibm_service_instance_id=credentials_2['IAM_SERVICE_ID'],
config=Config(signature_version='oauth'),
endpoint_url=credentials_2['ENDPOINT'])
cos.download_file(Bucket=credentials_2['BUCKET'], Key='cough-it-model.h5.tgz', Filename='cough-it-model.h5.tgz')
model_path = 'cough-it-model.h5.tgz'
```
### Set up Watson Machine Learning Client and Deployment space
```
from ibm_watson_machine_learning import APIClient
wml_credentials = {
"apikey" : <>,
"url" : <>
}
client = APIClient( wml_credentials )
space_guid = <>
client.set.default_space(space_guid)
```
### Store the model
```
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.8")
metadata = {
client.repository.ModelMetaNames.NAME: "cough-it model",
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid,
client.repository.ModelMetaNames.TYPE: "tensorflow_2.4"
}
published_model = client.repository.store_model( model= model_path, meta_props=metadata )
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
```
### Create a deployment
```
dep_metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of external Keras model",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
created_deployment = client.deployments.create(published_model_uid, meta_props=dep_metadata)
deployment_uid = client.deployments.get_uid(created_deployment)
client.deployments.get_details(deployment_uid)
```
| github_jupyter |
# Joy Ride - Part 3: Parallel Parking
In this section you will write a function that implements the correct sequence of steps required to parallel park a vehicle.
NOTE: for this segment the vehicle's maximum speed has been set to just over 4 mph. This should make parking a little easier.

If you have never heard of WASD keys, please check out this [link](https://en.wikipedia.org/wiki/Arrow_keys#WASD_keys).
## Instructions to get started
1. Run the `SETUP CELL` below this one by pressing `Ctrl + Enter`.
1. Click the button that says "Load Car Simulator". The simulator will appear below the button.
1. Run the cell below the simulator, marked `CODE CELL` (hit `Ctrl + Enter`).
1. Try to drive the car using WASD keys. You might notice a problem...
1. Press the **Reset** button in the simulator and then modify the code in the `CODE CELL` as per the instructions in TODO comments.
1. When you think you've fixed the problem, run the code cell again.
**NOTE** - Depending on your computer, it may take a few minutes for the simulator to load! Please be patient.
### Instructions to Reload the Simulator
Once the simulator is loaded, the `SETUP CELL` cannot be rerun, or it will prevent the simulator from appearing. If something happens to the simulator, you can do the following:
- Go to Jupyter’s menu: Kernel --> Restart and Clear Output
- Reload the page (Cmd-R)
- Run the first cell again
- Click the Green `Load Car Simulator` button again
```
# SETUP CELL
%%HTML
<link rel="stylesheet" type="text/css" href="buttonStyle.css">
<button id="launcher">Load Car Simulator </button>
<button id="restart">Restart Connection</button>
<script src="setupLauncher.js"></script><div id="simulator_frame"></sim>
<script src="kernelRestart.js"></script>
# CODE CELL
# Before/After running any code changes make sure to click the button "Restart Connection" above first.
# Also make sure to click Reset in the simulator to refresh the connection.
# You need to wait for the Kernel Ready message.
car_parameters = {"throttle": 0, "steer": 0, "brake": 0}
def control(pos_x, pos_y, time, velocity):
""" Controls the simulated car"""
global car_parameters
# The car will back up with a steering of 25 for 3 seconds
# then the car will back up with a steering of -25 until its y position is less than 32.5
# then the car will steer straight and brake
if time < 3:
car_parameters['throttle'] = -1
car_parameters['steer'] = 25
elif pos_y > 32.5:
car_parameters['throttle'] = -1
car_parameters['steer'] = -25
else:
car_parameters['steer'] = 0
car_parameters['brake'] = 1
return car_parameters
import src.simulate as sim
sim.run(control)
```
# Submitting this Project!
Your parallel park function is "correct" when:
1. Your car doesn't hit any other cars.
2. Your car stops completely inside of the right lane.
Once you've got it working, it's time to submit. Submit by pressing the `SUBMIT` button at the lower right corner of this page.
```
# CODE CELL
# Before/After running any code changes make sure to click the button "Restart Connection" above first.
# Also make sure to click Reset in the simulator to refresh the connection.
# You need to wait for the Kernel Ready message.
car_parameters = {"throttle": 0, "steer": 0, "brake": 0}
def control(pos_x, pos_y, time, velocity):
""" Controls the simulated car"""
global car_parameters
# The car will back up with a steering of 25 for 3 seconds
# then the car will back up with a steering of -25 until its y position is less than 32.5
# then the car will steer straight and brake
if time < 3:
car_parameters['throttle'] = -1
car_parameters['steer'] = 25
elif pos_y > 32.5:
car_parameters['throttle'] = -1
car_parameters['steer'] = -25
else:
car_parameters['steer'] = 0
car_parameters['brake'] = 1
return car_parameters
import src.simulate as sim
sim.run(control)
```
| github_jupyter |
# The Basics of NumPy Arrays
<!--NAVIGATION-->
### **Python- Numpy Practice Session-S4 : Save a Copy in local drive and Work**
Data manipulation in Python is nearly synonymous with NumPy array manipulation: even newer tools like Pandas ([Chapter 3](03.00-Introduction-to-Pandas.ipynb)) are built around the NumPy array.
This section will present several examples of using NumPy array manipulation to access data and subarrays, and to split, reshape, and join the arrays.
While the types of operations shown here may seem a bit dry and pedantic, they comprise the building blocks of many other examples used throughout the book.
Get to know them well!
We'll cover a few categories of basic array manipulations here:
- *Attributes of arrays*: Determining the size, shape, memory consumption, and data types of arrays
- *Indexing of arrays*: Getting and setting the value of individual array elements
- *Slicing of arrays*: Getting and setting smaller subarrays within a larger array
- *Reshaping of arrays*: Changing the shape of a given array
- *Joining and splitting of arrays*: Combining multiple arrays into one, and splitting one array into many
## NumPy Array Attributes
First let's discuss some useful array attributes.
We'll start by defining three random arrays, a one-dimensional, two-dimensional, and three-dimensional array.
We'll use NumPy's random number generator, which we will *seed* with a set value in order to ensure that the same random arrays are generated each time this code is run:
```
import numpy as np
np.random.seed(0) # seed for reproducibility
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
```
Each array has attributes ``ndim`` (the number of dimensions), ``shape`` (the size of each dimension), and ``size`` (the total size of the array):
```
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
```
Another useful attribute is the ``dtype``, the data type of the array (which we discussed previously in [Understanding Data Types in Python](02.01-Understanding-Data-Types.ipynb)):
```
print("dtype:", x3.dtype)
```
Other attributes include ``itemsize``, which lists the size (in bytes) of each array element, and ``nbytes``, which lists the total size (in bytes) of the array:
```
print("itemsize:", x3.itemsize, "bytes")
print("nbytes:", x3.nbytes, "bytes")
```
In general, we expect that ``nbytes`` is equal to ``itemsize`` times ``size``.
## Array Indexing: Accessing Single Elements
If you are familiar with Python's standard list indexing, indexing in NumPy will feel quite familiar.
In a one-dimensional array, the $i^{th}$ value (counting from zero) can be accessed by specifying the desired index in square brackets, just as with Python lists:
```
x1
x1[0]
x1[4]
```
To index from the end of the array, you can use negative indices:
```
x1[-1]
x1[-2]
```
In a multi-dimensional array, items can be accessed using a comma-separated tuple of indices:
```
x2
x2[0, 0]
x2[2, 0]
x2[2, -1]
```
Values can also be modified using any of the above index notation:
```
x2[0, 0] = 12
x2
```
Keep in mind that, unlike Python lists, NumPy arrays have a fixed type.
This means, for example, that if you attempt to insert a floating-point value to an integer array, the value will be silently truncated. Don't be caught unaware by this behavior!
```
x1[0] = 3.14159 # this will be truncated!
x1
```
## Array Slicing: Accessing Subarrays
Just as we can use square brackets to access individual array elements, we can also use them to access subarrays with the *slice* notation, marked by the colon (``:``) character.
The NumPy slicing syntax follows that of the standard Python list; to access a slice of an array ``x``, use this:
``` python
x[start:stop:step]
```
If any of these are unspecified, they default to the values ``start=0``, ``stop=``*``size of dimension``*, ``step=1``.
We'll take a look at accessing sub-arrays in one dimension and in multiple dimensions.
### One-dimensional subarrays
```
x = np.arange(10)
x
x[:5] # first five elements
x[5:] # elements after index 5
x[4:7] # middle sub-array
x[::2] # every other element
x[1::2] # every other element, starting at index 1
```
A potentially confusing case is when the ``step`` value is negative.
In this case, the defaults for ``start`` and ``stop`` are swapped.
This becomes a convenient way to reverse an array:
```
x[::-1] # all elements, reversed
x[5::-2] # reversed every other from index 5
```
### Multi-dimensional subarrays
Multi-dimensional slices work in the same way, with multiple slices separated by commas.
For example:
```
x2
x2[:2, :3] # two rows, three columns
x2[:3, ::2] # all rows, every other column
```
Finally, subarray dimensions can even be reversed together:
```
x2[::-1, ::-1]
```
#### Accessing array rows and columns
One commonly needed routine is accessing of single rows or columns of an array.
This can be done by combining indexing and slicing, using an empty slice marked by a single colon (``:``):
```
print(x2[:, 0]) # first column of x2
print(x2[0, :]) # first row of x2
```
In the case of row access, the empty slice can be omitted for a more compact syntax:
```
print(x2[0]) # equivalent to x2[0, :]
```
### Subarrays as no-copy views
One important–and extremely useful–thing to know about array slices is that they return *views* rather than *copies* of the array data.
This is one area in which NumPy array slicing differs from Python list slicing: in lists, slices will be copies.
Consider our two-dimensional array from before:
```
print(x2)
```
Let's extract a $2 \times 2$ subarray from this:
```
x2_sub = x2[:2, :2]
print(x2_sub)
```
Now if we modify this subarray, we'll see that the original array is changed! Observe:
```
x2_sub[0, 0] = 99
print(x2_sub)
print(x2)
```
This default behavior is actually quite useful: it means that when we work with large datasets, we can access and process pieces of these datasets without the need to copy the underlying data buffer.
### Creating copies of arrays
Despite the nice features of array views, it is sometimes useful to instead explicitly copy the data within an array or a subarray. This can be most easily done with the ``copy()`` method:
```
x2_sub_copy = x2[:2, :2].copy()
print(x2_sub_copy)
```
If we now modify this subarray, the original array is not touched:
```
x2_sub_copy[0, 0] = 42
print(x2_sub_copy)
print(x2)
```
## Reshaping of Arrays
Another useful type of operation is reshaping of arrays.
The most flexible way of doing this is with the ``reshape`` method.
For example, if you want to put the numbers 1 through 9 in a $3 \times 3$ grid, you can do the following:
```
grid = np.arange(1, 10).reshape((3, 3))
print(grid)
```
Note that for this to work, the size of the initial array must match the size of the reshaped array.
Where possible, the ``reshape`` method will use a no-copy view of the initial array, but with non-contiguous memory buffers this is not always the case.
Another common reshaping pattern is the conversion of a one-dimensional array into a two-dimensional row or column matrix.
This can be done with the ``reshape`` method, or more easily done by making use of the ``newaxis`` keyword within a slice operation:
```
x = np.array([1, 2, 3])
# row vector via reshape
x.reshape((1, 3))
# row vector via newaxis
x[np.newaxis, :]
# column vector via reshape
x.reshape((3, 1))
# column vector via newaxis
x[:, np.newaxis]
```
We will see this type of transformation often throughout the remainder of the book.
## Array Concatenation and Splitting
All of the preceding routines worked on single arrays. It's also possible to combine multiple arrays into one, and to conversely split a single array into multiple arrays. We'll take a look at those operations here.
### Concatenation of arrays
Concatenation, or joining of two arrays in NumPy, is primarily accomplished using the routines ``np.concatenate``, ``np.vstack``, and ``np.hstack``.
``np.concatenate`` takes a tuple or list of arrays as its first argument, as we can see here:
```
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
np.concatenate([x, y])
```
You can also concatenate more than two arrays at once:
```
z = [99, 99, 99]
print(np.concatenate([x, y, z]))
```
It can also be used for two-dimensional arrays:
```
grid = np.array([[1, 2, 3],
[4, 5, 6]])
# concatenate along the first axis
np.concatenate([grid, grid])
# concatenate along the second axis (zero-indexed)
np.concatenate([grid, grid], axis=1)
```
For working with arrays of mixed dimensions, it can be clearer to use the ``np.vstack`` (vertical stack) and ``np.hstack`` (horizontal stack) functions:
```
x = np.array([1, 2, 3])
grid = np.array([[9, 8, 7],
[6, 5, 4]])
# vertically stack the arrays
np.vstack([x, grid])
# horizontally stack the arrays
y = np.array([[99],
[99]])
np.hstack([grid, y])
```
Similary, ``np.dstack`` will stack arrays along the third axis.
### Splitting of arrays
The opposite of concatenation is splitting, which is implemented by the functions ``np.split``, ``np.hsplit``, and ``np.vsplit``. For each of these, we can pass a list of indices giving the split points:
```
x = [1, 2, 3, 99, 99, 3, 2, 1]
x1, x2, x3 = np.split(x, [3, 5])
print(x1, x2, x3)
```
Notice that *N* split-points, leads to *N + 1* subarrays.
The related functions ``np.hsplit`` and ``np.vsplit`` are similar:
```
grid = np.arange(16).reshape((4, 4))
grid
upper, lower = np.vsplit(grid, [2])
print(upper)
print(lower)
left, right = np.hsplit(grid, [2])
print(left)
print(right)
```
Similarly, ``np.dsplit`` will split arrays along the third axis.
| github_jupyter |
# Scraping and Parsing: EAD XML Finding Aids from the Library of Congress
```
import os
from urllib.request import urlopen
from bs4 import BeautifulSoup
import subprocess
## Creating a directory called 'LOC_Metadata' and setting it as our current working directory
!mkdir /sharedfolder/LOC_Metadata
os.chdir('/sharedfolder/LOC_Metadata')
## To make this notebook self-contained, we'll download a list of XML finding aid files the 'right' way.
## (In practice I normally use the 'find-and-replace + grep + wget' approach we covered in class,
## because it takes some extra effort to remind myself how to parse the HTML page via BeautifulSoup.)
## We first load a page with links to finding aids in the 'recorded sound' collection.
finding_aid_list_url = 'http://findingaids.loc.gov/source/RS'
finding_aid_list_page = urlopen(finding_aid_list_url).read().decode('utf8') # Loading the page
print(finding_aid_list_page[:700]) # Printing the first 700 characters in the page we just loaded
## Now we'll parse the page's HTML using BeautifulSoup ...
soup = BeautifulSoup(finding_aid_list_page, 'lxml')
## ... and examine soup.find_all('a'), which returns a list of 'a' elements (i.e., HTML links).
print(len(soup.find_all('a'))) # Checking the number of links on the page
print() # Printing a blank line for readability
print(soup.find_all('a')[70]) # Printing element #70 in the list
## We can access the 'href' attribute of an element (i.e., the link URL) using 'href' in
## brackets, just like a dictionary.
soup.find_all('a')[70]['href']
## Now let's make a list of every link on the page.
all_links = []
for element in soup.find_all('a'): # Looping through all 'a' elements.
try: # Because some 'a' elements do not contain 'href' attributes,
all_links.append(element['href']) ## we can use a try/except statement to skip elements that
except: ## would otherwise raise an error.
pass
all_links[:15] # Outputting the first 15 links in the list
## We know that the URL for every XML file we're looking for ends in '.2', so we can
## use that fact to filter out irrelevant links.
xml_urls = []
for link in all_links:
if link[-2:] == '.2': # Checking whether the last two characters of a link are '.2'
xml_urls.append(link)
xml_urls # Outputting the full list of relevant XML URLs
## Downloading each XML file in our list of URLs
## We can use the subprocess module (which we imported above) to issue commands in the bash shell.
## In an interactive bash shell session we'd use spaces to separate arguments; instead, subprocess
## takes arguments in the form of a Python list.
## For each item in our list, the following issues a command with two arguments: 'wget' followed by the URL.
## It thus downloads each XML file to the current directory.
for url in xml_urls:
subprocess.call(['wget', url])
## Outputting a list of filenames in the current directory
## In Unix-like operating systems, './' always refers to the current directory.
os.listdir('./')
## Just in case there are other files in the current directory, we can use a
## list comprehension to create a list of filenames that end in '.2' and assign
## it to the variable 'xml_filenames'.
xml_filenames = [item for item in os.listdir('./') if item[-2:]=='.2']
xml_filenames
## Now let's choose an arbitrary XML file in our collection so we can figure out how to parse it.
xml_filename = xml_filenames[4] ## Selecting filename #4 in our list
xml_text = open(xml_filename).read() ## Reading the file and assigning its content to the variable 'xml_text'
print(xml_text[:700]) ## Printing the first 700 characters in the XML text we just loaded
## Parse the XML text from the previous cell using Beautiful Soup
soup = BeautifulSoup(xml_text, 'lxml')
## By looking at the XML text above, we can see that the 'ead' element is the root of our XML tree.
## Let's use a for loop to look at the names of elements one next level down in the tree.
for element in soup.ead:
print(element.name)
## In practice you'd usually just look through the XML file by eye, identify the elements
## you're looking for, and use soup.find_all('...') to extract them. For now, let's continue
## working down the XML tree with BeautifulSoup.
# You can find a glossary of EAD element names here:
# https://loc.gov/ead/EAD3taglib/index.html
## Since the 'eadheader' element is administrative metadata we don't care about, let's
## repeat the process for 'soup.ead.archdesc' ('archdesc' is 'archival description' in EAD parlance).
for element in soup.ead.archdesc:
if element.name != None: ## Filtering out 'None' elements, which in this case are irrelevant comments
print(element.name)
## By looking at the XML file in a text editor, I notice the 'did' element ('descriptive identification')
## contains the item-level information we're looking for. Let's run another for loop to look at the
## names of elements contained within each 'did' element.
for element in soup.ead.archdesc.did:
if element.name != None:
print(element.name)
## Note that 'soup.ead.archdesc.did' only refers to the first 'did' element in the XML document.
## OK, that's enough exploring. Let's use soup.find_all() to create a list of 'did' elements.
did_elements = soup.find_all('did')
print(len(did_elements)) ## Printing the number of 'did' elements in our list
print()
print(did_elements[4]) ## Printing item #4 in the the list
## Not every 'did' element contains the same fields; different objects are described differently.
## Try running this cell several times, plugging in other index numbers to compare the way
## different items' records are formatted.
print(did_elements[7])
## If you run the cell above several times with different index numbers, you'll notice that the
## first item in the list (index 0) refers to the entire box of records, while the others are
## individual folders or series of folders.
## To make things more complicated, some items are physically described using 'container' elements
## while others use 'extent' instead. Most appear to include 'unittitle' and 'unitdate'.
## Our goal is to create a CSV that contains a basic description of each 'unit', or 'did' element,
## in each XML finding aid. For the purposes of this exercise, let's include the following pieces
## of information for each unit, where available:
#### title of the source collection
#### unittitle
#### unitdate
#### container type
#### container number
#### extent
## Since each XML finding aid represents a single collection, we'll want to include a column that
## identifies which collection it comes from. By reading through the XML files, we see that each
## has a single element called 'titleproper' that describes the whole collection.
## Let's create a recipe to extract that text. Here's a first try:
collection_title = soup.find('titleproper').get_text()
collection_title
## That format is OK, but we should remove the tab and newline characters. Let's try again, using
## the replace() function to replace them with spaces.
collection_title = soup.find('titleproper').get_text().replace('\t', ' ').replace('\n', ' ')
collection_title
## We can add the strip() function to remove the space at the end of the string.
collection_title = soup.find('titleproper').get_text().replace('\t', ' ').replace('\n', ' ').strip()
collection_title
## We still have a series of spaces in a row in the middle of the string. We can use a 'while loop'
## to repeatedly replace any occurrence of ' ' (two spaces) with ' ' (one space).
collection_title = soup.find('titleproper').get_text().replace('\t', ' ').replace('\n', ' ').strip()
while ' ' in collection_title:
collection_title = collection_title.replace(' ', ' ')
collection_title
## Perfect. We'll extract the collection name whenever we open an XML finding aid and include it
## in each CSV row associated with that collection.
## Now on to 'unittitle'. Recall that we created a list of 'did' elements above, called 'did_elements'.
element = did_elements[4]
unittitle = element.find('unittitle').get_text()
unittitle
## Since those tabs and newlines are a recurring probem, we should define a function that
## removes them from any given text string.
def clean_text(text):
temp_text = text.replace('\t', ' ').replace('\n', ' ').strip()
while ' ' in temp_text:
temp_text = temp_text.replace(' ', ' ')
return temp_text
# Let's test our clean_text() function.
element = did_elements[4]
unittitle = element.find('unittitle').get_text()
unittitle = clean_text(unittitle)
unittitle
## Now let's try extracting the 'unittitle' field for each 'did' element in our list.
for element in did_elements:
unittitle = element.get_text().replace('\t', ' ').replace('\n', ' ').strip()
print(clean_text(unittitle))
print('-----------------') # Printing a divider between elements
## The first element in the list above contains more information than we need, but we can
## let that slide for this exercise.
## Next is 'unitdate'. We'll use our clean_text() function once again.
element = did_elements[4]
unitdate = element.find('unitdate').get_text()
unitdate = clean_text(unitdate)
unitdate
## Let's loop through the list of 'did' elements and see if our 'unittitle' recipe holds up.
for element in did_elements:
unitdate = element.find('unitdate').get_text()
print(clean_text(unitdate))
print('-----------------') # Printing a divider between elements
## Now on to container type and number. Let's examine a 'container' XML element.
element = did_elements[4]
element.find('container')
## Since the container type ('folder', in this case) is an attribute in the 'container' tag,
## we can extract it using bracket notation.
element = did_elements[4]
container_type = element.find('container')['type']
container_type
## The container number is specified between the opening and closing 'container' tags,
## so we can get it using get_text().
element = did_elements[4]
container_number = element.find('container').get_text()
container_number
## Next we'll try to get the container type and number for each 'did' element in our list ...
for element in did_elements:
container_type = element.find('container')['type']
print(container_type)
container_number = element.find('container').get_text()
print(container_number)
print('-----------------') # Printing a divider between elements
## ... and we get an error. The reason is that some 'did' elements don't include a 'container' field.
## Using try/accept notation, whenever we get an error because a container element isn't found,
## we can revert to '' (an empty string) instead.
for element in did_elements:
try:
container_type = element.find('container')['type']
except:
container_type = ''
print(container_type)
try:
container_number = element.find('container').get_text()
except:
container_number = ''
print(container_number)
print('-----------------') # Printing a divider between elements
## The last field we'll extract is 'extent', which is only included in a handful of 'did' elements.
element = did_elements[3]
extent = element.find('extent').get_text()
extent
## Let's extract 'extent' from each element in our list of 'did' elements (for those that happen to include it).
for element in did_elements:
try:
extent = element.find('extent').get_text()
except:
extent = ''
print(extent)
print('-----------------') # Printing a divider between elements
## Let's put it all together and view our chosen fields for a single 'did' element.
## We will combine our fields in a list to create a 'row' for our future CSV file.
element = did_elements[6]
# unittitle
try: # Added try/except statements for 'unittitle' and 'unitdate' just to be safe
unittitle = clean_text(element.find('unittitle').get_text())
except:
unittitle = ''
# unitdate
try:
unitdate = clean_text(element.find('unitdate').get_text())
except:
unitdate = ''
# container type and number
try:
container_type = element.find('container')['type']
except:
container_type = ''
try:
container_number = element.find('container').get_text()
except:
container_number = ''
# extent
try:
extent = element.find('extent').get_text()
except:
extent = ''
row = [unittitle, unitdate, container_type, container_number, extent]
print(row)
## Let's take a step back and generalize, so that we can extract metadata for each
## 'did' element in a single XML file.
## We will also include the 'collection title' field ('titleproper' in EAD's vocabulary) as
## the first item in each row.
xml_filename = xml_filenames[3] # <-- Change the index number there to run the script on another XML file in the list.
xml_text = open(xml_filename).read()
soup = BeautifulSoup(xml_text, 'lxml')
list_of_lists = [] # Creating an empty list, which we will use to hold our rows (each row represented as a list)
try:
collection_title = clean_text(soup.find('titleproper').get_text())
except:
collection_title = xml_filename # If the 'titleproper' field is missing for some reason,
## we'll use the XML filename instead.
for element in soup.find_all('did'):
# unittitle
try:
unittitle = clean_text(element.find('unittitle').get_text())
except:
unittitle = ''
# unitdate
try:
unitdate = clean_text(element.find('unitdate').get_text())
except:
unitdate = ''
# container type and number
try:
container_type = element.find('container')['type']
except:
container_type = ''
try:
container_number = element.find('container').get_text()
except:
container_number = ''
# extent
try:
extent = element.find('extent').get_text()
except:
extent = ''
row = [collection_title, unittitle, unitdate, container_type, container_number, extent]
list_of_lists.append(row) ## Adding the row list we defined in the previous line to 'list_of_lists'
list_of_lists[:15] ## Outputting the first 15 rows in our list of lists
## Almost there! Next we'll run the script above on each XML file in our list, creating a
## master list of lists that we'll write to disk as a CSV in the next cell.
## Let's begin by re-loading our list of XML filenames:
os.chdir('/sharedfolder/LOC_Metadata')
xml_filenames = [item for item in os.listdir('./') if item[-2:]=='.2'] # Creating a list of XML filenames
list_of_lists = [] # Creating an empty list
## Now we'll extract metadata from the full batch of XML files. This may take a few moments to complete.
for xml_filename in xml_filenames:
xml_text = open(xml_filename).read()
soup = BeautifulSoup(xml_text, 'lxml')
try:
collection_title = clean_text(soup.find('titleproper').get_text())
except:
collection_title = xml_filename # If the 'titleproper' field is missing for some reason,
## we'll use the XML filename instead.
for element in soup.find_all('did'):
# unittitle
try:
unittitle = clean_text(element.find('unittitle').get_text())
except:
unittitle = ''
# unitdate
try:
unitdate = clean_text(element.find('unitdate').get_text())
except:
unitdate = ''
# container type and number
try:
container_type = element.find('container')['type']
except:
container_type = ''
try:
container_number = element.find('container').get_text()
except:
container_number = ''
# extent
try:
extent = element.find('extent').get_text()
except:
extent = ''
row = [collection_title, unittitle, unitdate, container_type, container_number, extent]
list_of_lists.append(row)
print(len(list_of_lists)) ## Printing the number of rows in our table
## Finally, we write the extracted metadata to disk as a CSV called 'LOC_RS_Reduced_Metadata.csv'
out_path = "./LOC_RS_Reduced_Metadata.csv" # The './' part is optional; it just means we're writing to
# the current working directory.
# Defining a list of column headers, which we will write as the first row in our CSV
column_headers = ['Collection Title', 'Unit Title', 'Unit Date', 'Container Type', 'Container Number', 'Extent']
import csv # Importing Python's built-in CSV input/output package
with open(out_path, 'w') as fo: # Creating a tempory file stream object called 'fo' (my abbreviation for 'file out')
csv_writer = csv.writer(fo) # Initializing our CSV writer
csv_writer.writerow(column_headers) # Writing one row (our column headers)
csv_writer.writerows(list_of_lists) # Writing a list of lists as a sequence of rows
## Go to 'sharedfolder' on your desktop and use LibreOffice or Excel to open your new CSV.
## As you scroll through the CSV file, you will probably see more formatting oddities you can fix
## by tweaking the code above.
```
| github_jupyter |
## Dependencies
```
import warnings, glob
from tensorflow.keras import Sequential, Model
from cassava_scripts import *
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore')
```
### Hardware configuration
```
# TPU or GPU detection
# Detect hardware, return appropriate distribution strategy
strategy, tpu = set_up_strategy()
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
```
# Model parameters
```
BATCH_SIZE = 8 * REPLICAS
HEIGHT = 380
WIDTH = 380
CHANNELS = 3
N_CLASSES = 5
TTA_STEPS = 0 # Do TTA if > 0
```
# Augmentation
```
def data_augment(image, label):
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# Flips
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial > .75:
image = tf.image.transpose(image)
return image, label
```
## Auxiliary functions
```
# Datasets utility functions
def resize_image(image, label):
image = tf.image.resize(image, [HEIGHT, WIDTH])
image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS])
return image, label
def process_path(file_path):
name = get_name(file_path)
img = tf.io.read_file(file_path)
img = decode_image(img)
# img, _ = scale_image(img, None)
# img = center_crop(img, HEIGHT, WIDTH)
return img, name
def get_dataset(files_path, shuffled=False, tta=False, extension='jpg'):
dataset = tf.data.Dataset.list_files(f'{files_path}*{extension}', shuffle=shuffled)
dataset = dataset.map(process_path, num_parallel_calls=AUTO)
if tta:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.map(resize_image, num_parallel_calls=AUTO)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
```
# Load data
```
database_base_path = '/kaggle/input/cassava-leaf-disease-classification/'
submission = pd.read_csv(f'{database_base_path}sample_submission.csv')
display(submission.head())
TEST_FILENAMES = tf.io.gfile.glob(f'{database_base_path}test_tfrecords/ld_test*.tfrec')
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print(f'GCS: test: {NUM_TEST_IMAGES}')
!ls /kaggle/input/
model_path_list = glob.glob('/kaggle/input/162-cassava-leaf-effnetb4-dcr-04-380x380/*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep='\n')
```
# Model
```
def model_fn(input_shape, N_CLASSES):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = tf.keras.applications.EfficientNetB4(input_tensor=inputs,
include_top=False,
drop_connect_rate=.4,
weights=None)
x = L.GlobalAveragePooling2D()(base_model.output)
x = L.Dropout(.5)(x)
output = L.Dense(N_CLASSES, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=output)
return model
with strategy.scope():
model = model_fn((None, None, CHANNELS), N_CLASSES)
model.summary()
```
# Test set predictions
```
files_path = f'{database_base_path}test_images/'
test_size = len(os.listdir(files_path))
test_preds = np.zeros((test_size, N_CLASSES))
for model_path in model_path_list:
print(model_path)
K.clear_session()
model.load_weights(model_path)
if TTA_STEPS > 0:
test_ds = get_dataset(files_path, tta=True).repeat()
ct_steps = TTA_STEPS * ((test_size/BATCH_SIZE) + 1)
preds = model.predict(test_ds, steps=ct_steps, verbose=1)[:(test_size * TTA_STEPS)]
preds = np.mean(preds.reshape(test_size, TTA_STEPS, N_CLASSES, order='F'), axis=1)
test_preds += preds / len(model_path_list)
else:
test_ds = get_dataset(files_path, tta=False)
x_test = test_ds.map(lambda image, image_name: image)
test_preds += model.predict(x_test) / len(model_path_list)
test_preds = np.argmax(test_preds, axis=-1)
test_names_ds = get_dataset(files_path)
image_names = [img_name.numpy().decode('utf-8') for img, img_name in iter(test_names_ds.unbatch())]
submission = pd.DataFrame({'image_id': image_names, 'label': test_preds})
submission.to_csv('submission.csv', index=False)
display(submission.head())
```
| github_jupyter |
```
%matplotlib inline
```
This notebook deals with banks of cylinders in a cross flow. Cylinder banks are common heat exchangers where the cylinders may be heated by electricity or a fluid may be flowing within the cylinder to cool or heat the flow around the cylinders. The advantage of cylinder banks is the increase mixing in the fluid, thus the temperature downstream of the bank is likely to be quite homogeneous.
<img src='figures_Tube_Banks/fig_07_11.jpg' alt="my awesome sketch" width=50% >
The arrangement of cylinders may be aligned or staggered as shown in the following figures. The flow and geometrical parameters will be used in the derivation of temperature equations and Nusselt number correlation.
<img src='figures_Tube_Banks/fig_07_12.jpg' alt="my awesome sketch" width=50% >
This notebook should cover a wide variety of problems, providing that the assumption of isothermal boundary conditions on the tubes is (approximately) valid. The tube surface temperature is $T_s$.
The flow and geometrical parameters of importance to solve this problem are:
* Arithmetic mean of temperature between inlet $T_i$ and outlet $T_o$ of the bank.
$$
T_m = \frac{T_i+T_o}{2}
$$
* Reynolds number based on the max velocity within the bank $V_\text{max}$, the density and viscosity based on $T_m$:
$$
Re=\frac{\rho V_\text{max}D}{\mu}
$$
**Question: At what temperature should you estimate $\rho$ and $\mu$?** The energy of the flow comes from the inlet and the velocity $V_\mathrm{max}$ is calculated from the inlet velocity. The density should therefore be estimated at $T_i$. The viscous forces however occur throughout the domain, so $\mu$ should be estimated at $T_m$. In some cases $T_o$ is the quantity to be found. It is acceptable to use $\mu(T_i)$, but you must verify that the temperature difference $\Delta T=\vert T_i-T_o\vert$ is not too large. If it is, you must repeat the calculation iteratively with $\mu(T_i)$ until $T_o$ converges.
* Prandtl number $Pr$ based on $T_m$
* Surface Prandtl number $Pr_s$ based on $T_s$
* Number of tubes in the transversal direction $N_T$, longitudinal direction $N_L$ and total $N=N_T\times N_L$
* The transversal $S_T$ and longitudinal $S_L$ separations between tubes in a row and between rows.
* The type of tube arrangement:
* Aligned
$$
V_\text{max}=\frac{S_T}{S_T-D}V_i
$$
* Staggered
$$
V_\text{max}=\frac{S_T}{2(S_D-D)}V_i\text{ with }S_D=\sqrt{S_L^2+\left(\frac{S_T}{2}\right)^2}
$$
The Nusselt number correlation for a bank of tubes is a variation of the Zukauskas correlation:
$$
Nu = C_2C_1Re^mPr^{0.36}\left(\frac{Pr}{Pr_s}\right)^{1/4}
$$
where $C_2$ depends on $N_L$. In the library, the function for this correlation is
<FONT FACE="courier" style="color:red">Nu_tube_banks(Re,Pr,Pr_s,S_L,S_T,N_L,arrangement) </FONT>.
The heat rate per unit length across the tube bank is
$$
q'=N\overline{h}\pi D \Delta T_\text{lm}
$$
where the temperature drop is the log-mean temperature difference
$$
\Delta T_\text{lm}=\cfrac{(T_s-T_i)-(T_s-T_o)}{\ln\left(\cfrac{T_s-T_i}{T_s-T_o}\right)}
$$
which accounts for the exponential variation of temperature across the bank
$$
\cfrac{T_s-T_o}{T_s-T_i}=\exp\left(-\cfrac{\pi D N \overline{h}}{\rho V_i N_T S_T C_p}\right)
$$
where $\rho$, $C_p$ and $V_i$ are inlet quantities if $T_o$ is unknown of the arthimetic mean temperature if available. Note that $N=N_L\times N_T$ thus
$$
\cfrac{T_s-T_o}{T_s-T_i}=\exp\left(-\cfrac{\pi D N_L \overline{h}}{\rho V_i S_T C_p}\right)
$$
One may want to determine the number of tubes necessary to achieve a given $T_o$. The number of tubes in the transverse directions is typically dictated by the geometry of the system, so we are looking for $N_L$:
$$
N_L = \cfrac{\rho V_i S_T C_p}{\pi D \overline{h}} \log\left(\cfrac{T_s-T_i}{T_s-T_o}\right)
$$
The pressure loss through the tube bank is a critical component of the heat exchanger design. The presence of obstacles in the flow requires an increase in the mechanical energy necessary to drive the flow at a given flow rate. The pressure loss, given all parameters above, is
$$
\Delta p = N_L\,\chi\, f\,\frac{\rho V_\text{max}^2}{2}
$$
where the friction factor $f$ and the parameter $\chi$ are given by the graphs below for the aligned (top) and staggered (bottom) arrangements. These graphs use two new quantities, the longitudnal and transverse pitches:
$$
P_L=\frac{S_L}{D}\text{ and } P_T=\frac{S_T}{D}
$$
<img src='figures_Tube_Banks/fig_07_14.jpg' alt="my awesome sketch" width=100% >
<img src='figures_Tube_Banks/fig_07_15.jpg' alt="my awesome sketch" width=100% >
## Problem1
A preheater involves the use of condensing steam on the inside of a bank of tubes to heat air that enters at $P_i=1 \text{ atm}$ and $T_i=25^\circ\text{C}$. The air moves at $V_i=5\text{ m/s}$ in cross flow over the tubes. Each tube is $L=1\text{ m}$ long and has an outside diameter of $D=10 \text{ mm}$. The bank consists of columns of 14 tubes in the transversal direction $N_T=14$ and $N_L$ rows in the direction of flow. The arrangement of tubes is aligned array for which $S_T=S_L=15\text{ mm}$. What is the minimum value of $N_L$ needed to achieve an outlet temperature of $T_o=75^\circ\text{C}$? What is the corresponding pressure drop across the tube bank?
```
import numpy as np
from Libraries import thermodynamics as thermo
from Libraries import HT_external_convection as extconv
T_i = 25 #C
T_o = 75 #C
T_s = 100 #C
V_i = 5 #m/s
L = 1 #m
D = 10e-3 #mm
N_L = 14
S_T = S_L = 15e-3 #m
# ?extconv.BankofTubes
bank = extconv.BankofTubes('aligned','air',T_i,T_s,T_o,"C",V_i,D,S_L,S_T,N_L)
print("The number of rows required to reach T_o=%.0f C is %.2f" %(bank.T_o,bank.N_L_for_given_To))
```
If the outlet temperature can be slightly below $75^\circ\mathrm{C}$, then the number of rows is 15.
If the outlet temperature has to be at least $75^\circ\mathrm{C}$, then the number of rows is 16.
```
N_L = 15
bank = extconv.BankofTubes('aligned','air',T_i,T_s,T_o,"C",V_i,D,S_L,S_T,N_L)
N_T = 14
bank.temperature_outlet_tube_banks(N_T,N_L)
print("With N_L=%.0f, T_o=%.2f" %(bank.N_L,bank.T_o))
print("Re=%.0f, P_L = %.2f" %(bank.Re,bank.S_T/bank.D))
bank.pressure_drop(N_L,3.2,1)
print("Pressure drop is %.2f Pa" %(bank.Delta_p))
```
## Problem 2
A preheater involves the use of condensing steam at $100^\circ\text{C}$ on the inside of a bank of tubes to heat air that enters at $1 \text{ atm}$ and $25^\circ\text{C}$. The air moves at $5\text{ m/s}$ in cross flow over the tubes. Each tube is $1\text{ m}$ long and has an outside diameter of $10 \text{ mm}$. The bank consists of 196 tubes in a square, aligned array for which $S_T=S_L=15\text{ mm}$. What is the total rate of heat transfer to the air? What is the pressure drop associated with the airflow?
```
N_L = N_T = 14
# T_o = 50.
# bank = extconv.BankofTubes('aligned','air',T_i,T_s,T_o,"C",V_i,D,S_L,S_T,N_L)
# bank.temperature_outlet_tube_banks(N_T,N_L)
# print(bank.T_o)
# print(bank.Re)
# print(bank.Nu)
T_o = 72.6
bank = extconv.BankofTubes('aligned','air',T_i,T_s,T_o,"C",V_i,D,S_L,S_T,N_L)
bank.temperature_outlet_tube_banks(N_T,N_L)
print(bank.T_o)
print(bank.Re)
print(bank.Nu)
bank.heat_rate(N_T,N_L,L)
print(bank.q)
```
## Problem 3
<img src='figures_Tube_Banks/probun_07_34.jpg' alt="my awesome sketch" width=100% >
An air duct heater consists of an aligned array of electrical heating elements in which the longitudinal and transverse pitches are $S_L=S_T= 24\text{ mm}$. There are 3 rows of elements in the flow direction ($N_L=3$) and 4 elements per row ($N_T=4$). Atmospheric air with an upstream velocity of $12\text{ m/s}$ and a temperature of $25^\circ\text{C}$ moves in cross flow over the elements, which have a diameter of $12\text{ mm}$, a length of $250\text{ mm}$, and are maintained at a surface temperature of $350^\circ\text{C}$.
<ol>
<li>
Determine the total heat transfer to the air and the temperature of the air leaving the duct heater.
</li>
<li>
Determine the pressure drop across the element bank and the fan power requirement.
</li>
<li>
Compare the average convection coefficient obtained in your analysis with the value for an isolated (single) element. Explain the difference between the results.
</li>
<li>
What effect would increasing the longitudinal and transverse pitches to 30 mm have on the exit temperature of the air, the total heat rate, and the pressure drop?
</li>
</ol>
```
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Training Pipeline - Custom Script
_**Training many models using a custom script**_
----
This notebook demonstrates how to create a pipeline that trains and registers many models using a custom script. We utilize the [ParallelRunStep](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-parallel-run-step) to parallelize the process of training the models to make the process more efficient. For this solution accelerator we are using the [OJ Sales Dataset](https://azure.microsoft.com/en-us/services/open-datasets/catalog/sample-oj-sales-simulated/) to train individual models that predict sales for each store and brand of orange juice.
The model we use here is a simple, regression-based forecaster built on scikit-learn and pandas utilities. See the [training script](scripts/train.py) to see how the forecaster is constructed. This forecaster is intended for demonstration purposes, so it does not handle the large variety of special cases that one encounters in time-series modeling. For instance, the model here assumes that all time-series are comprised of regularly sampled observations on a contiguous interval with no missing values. The model does not include any handling of categorical variables. For a more general-use forecaster that handles missing data, advanced featurization, and automatic model selection, see the [AutoML Forecasting task](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-forecast). Also, see the notebooks demonstrating [AutoML forecasting in a many models scenario](../Automated_ML).
### Prerequisites
At this point, you should have already:
1. Created your AML Workspace using the [00_Setup_AML_Workspace notebook](../00_Setup_AML_Workspace.ipynb)
2. Run [01_Data_Preparation.ipynb](../01_Data_Preparation.ipynb) to setup your compute and create the dataset
#### Please ensure you have the latest version of the Azure ML SDK and also install Pipeline Steps Package
```
#!pip install --upgrade azureml-sdk
# !pip install azureml-pipeline-steps
```
## 1.0 Connect to workspace and datastore
```
from azureml.core import Workspace
# set up workspace
ws = Workspace.from_config()
# set up datastores
dstore = ws.get_default_datastore()
print('Workspace Name: ' + ws.name,
'Azure Region: ' + ws.location,
'Subscription Id: ' + ws.subscription_id,
'Resource Group: ' + ws.resource_group,
sep = '\n')
```
## 2.0 Create an experiment
```
from azureml.core import Experiment
experiment = Experiment(ws, 'oj_training_pipeline')
print('Experiment name: ' + experiment.name)
```
## 3.0 Get the training Dataset
Next, we get the training Dataset using the [Dataset.get_by_name()](https://docs.microsoft.com/python/api/azureml-core/azureml.core.dataset.dataset#get-by-name-workspace--name--version--latest--) method.
This is the training dataset we created and registered in the [data preparation notebook](../01_Data_Preparation.ipynb). If you chose to use only a subset of the files, the training dataset name will be `oj_data_small_train`. Otherwise, the name you'll have to use is `oj_data_train`.
We recommend to start with the small dataset and make sure everything runs successfully, then scale up to the full dataset.
```
dataset_name = 'oj_data_small_train'
from azureml.core.dataset import Dataset
dataset = Dataset.get_by_name(ws, name=dataset_name)
dataset_input = dataset.as_named_input(dataset_name)
```
## 4.0 Create the training pipeline
Now that the workspace, experiment, and dataset are set up, we can put together a pipeline for training.
### 4.1 Configure environment for ParallelRunStep
An [environment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-environments) defines a collection of resources that we will need to run our pipelines. We configure a reproducible Python environment for our training script including the [scikit-learn](https://scikit-learn.org/stable/index.html) python library.
```
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
train_env = Environment(name="many_models_environment")
train_conda_deps = CondaDependencies.create(pip_packages=['sklearn', 'pandas', 'joblib', 'azureml-defaults', 'azureml-core', 'azureml-dataprep[fuse]'])
train_env.python.conda_dependencies = train_conda_deps
```
### 4.2 Choose a compute target
Currently ParallelRunConfig only supports AMLCompute. This is the compute cluster you created in the [setup notebook](../00_Setup_AML_Workspace.ipynb#3.0-Create-compute-cluster).
```
cpu_cluster_name = "cpucluster"
from azureml.core.compute import AmlCompute
compute = AmlCompute(ws, cpu_cluster_name)
```
### 4.3 Set up ParallelRunConfig
[ParallelRunConfig](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_config.parallelrunconfig?view=azure-ml-py) provides the configuration for the ParallelRunStep we'll be creating next. Here we specify the environment and compute target we created above along with the entry script that will be for each batch.
There's a number of important parameters to configure including:
- **mini_batch_size**: The number of files per batch. If you have 500 files and mini_batch_size is 10, 50 batches would be created containing 10 files each. Batches are split across the various nodes.
- **node_count**: The number of compute nodes to be used for running the user script. For the small sample of OJ datasets, we only need a single node, but you will likely need to increase this number for larger datasets composed of more files. If you increase the node count beyond five here, you may need to increase the max_nodes for the compute cluster as well.
- **process_count_per_node**: The number of processes per node. The compute cluster we are using has 8 cores so we set this parameter to 8.
- **run_invocation_timeout**: The run() method invocation timeout in seconds. The timeout should be set to be higher than the maximum training time of one model (in seconds), by default it's 60. Since the batches that takes the longest to train are about 120 seconds, we set it to be 180 to ensure the method has adequate time to run.
We also added tags to preserve the information about our training cluster's node count, process count per node, and dataset name. You can find the 'Tags' column in Azure Machine Learning Studio.
```
from azureml.pipeline.steps import ParallelRunConfig
processes_per_node = 8
node_count = 1
timeout = 180
parallel_run_config = ParallelRunConfig(
source_directory='./scripts',
entry_script='train.py',
mini_batch_size="1",
run_invocation_timeout=timeout,
error_threshold=-1,
output_action="append_row",
environment=train_env,
process_count_per_node=processes_per_node,
compute_target=compute,
node_count=node_count)
```
### 4.4 Set up ParallelRunStep
This [ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) is the main step in our training pipeline.
First, we set up the output directory and define the pipeline's output name. The datastore that stores the pipeline's output data is Workspace's default datastore.
```
from azureml.pipeline.core import PipelineData
output_dir = PipelineData(name="training_output", datastore=dstore)
```
We provide our ParallelRunStep with a name, the ParallelRunConfig created above and several other parameters:
- **inputs**: A list of input datasets. Here we'll use the dataset created in the previous notebook. The number of files in that path determines the number of models will be trained in the ParallelRunStep.
- **output**: A PipelineData object that corresponds to the output directory. We'll use the output directory we just defined.
- **arguments**: A list of arguments required for the train.py entry script. Here, we provide the schema for the timeseries data - i.e. the names of target, timestamp, and id columns - as well as columns that should be dropped prior to modeling, a string identifying the model type, and the number of observations we want to leave aside for testing.
```
from azureml.pipeline.steps import ParallelRunStep
parallel_run_step = ParallelRunStep(
name="many-models-training",
parallel_run_config=parallel_run_config,
inputs=[dataset_input],
output=output_dir,
allow_reuse=False,
arguments=['--target_column', 'Quantity',
'--timestamp_column', 'WeekStarting',
'--timeseries_id_columns', 'Store', 'Brand',
'--drop_columns', 'Revenue', 'Store', 'Brand',
'--model_type', 'lr',
'--test_size', 20]
)
```
## 5.0 Run the pipeline
Next, we submit our pipeline to run. The run will train models for each dataset using a train set, compute accuracy metrics for the fits using a test set, and finally re-train models with all the data available. With 10 files, this should only take a few minutes but with the full dataset this can take over an hour.
```
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(workspace=ws, steps=[parallel_run_step])
run = experiment.submit(pipeline)
#Wait for the run to complete
run.wait_for_completion(show_output=False, raise_on_error=True)
```
## 6.0 View results of training pipeline
The dataframe we return in the run method of train.py is outputted to *parallel_run_step.txt*. To see the results of our training pipeline, we'll download that file, read in the data to a DataFrame, and then visualize the results, including the in-sample metrics.
The run submitted to the Azure Machine Learning Training Compute Cluster may take a while. The output is not generated until the run is complete. You can monitor the status of the run in Azure Portal https://ml.azure.com
### 6.1 Download parallel_run_step.txt locally
```
import os
def download_results(run, target_dir=None, step_name='many-models-training', output_name='training_output'):
stitch_run = run.find_step_run(step_name)[0]
port_data = stitch_run.get_output_data(output_name)
port_data.download(target_dir, show_progress=True)
return os.path.join(target_dir, 'azureml', stitch_run.id, output_name)
file_path = download_results(run, 'output')
file_path
```
### 6.2 Convert the file to a dataframe
```
import pandas as pd
df = pd.read_csv(file_path + '/parallel_run_step.txt', sep=" ", header=None)
df.columns = ['Store', 'Brand', 'Model', 'File Name', 'ModelName', 'StartTime', 'EndTime', 'Duration',
'MSE', 'RMSE', 'MAE', 'MAPE', 'Index', 'Number of Models', 'Status']
df['StartTime'] = pd.to_datetime(df['StartTime'])
df['EndTime'] = pd.to_datetime(df['EndTime'])
df['Duration'] = df['EndTime'] - df['StartTime']
df.head()
```
### 6.3 Review Results
```
total = df['EndTime'].max() - df['StartTime'].min()
print('Number of Models: ' + str(len(df)))
print('Total Duration: ' + str(total)[6:])
print('Average MAPE: ' + str(round(df['MAPE'].mean(), 5)))
print('Average MSE: ' + str(round(df['MSE'].mean(), 5)))
print('Average RMSE: ' + str(round(df['RMSE'].mean(), 5)))
print('Average MAE: '+ str(round(df['MAE'].mean(), 5)))
print('Maximum Duration: '+ str(df['Duration'].max())[7:])
print('Minimum Duration: ' + str(df['Duration'].min())[7:])
print('Average Duration: ' + str(df['Duration'].mean())[7:])
```
### 6.4 Visualize Performance across models
Here, we produce some charts from the errors metrics calculated during the run using a subset put aside for testing.
First, we examine the distribution of mean absolute percentage error (MAPE) over all the models:
```
import seaborn as sns
import matplotlib.pyplot as plt
fig = sns.boxplot(y='MAPE', data=df)
fig.set_title('MAPE across all models')
```
Next, we can break that down by Brand or Store to see variations in error across our models
```
fig = sns.boxplot(x='Brand', y='MAPE', data=df)
fig.set_title('MAPE by Brand')
```
We can also look at how long models for different brands took to train
```
brand = df.groupby('Brand')
brand = brand['Duration'].sum()
brand = pd.DataFrame(brand)
brand['time_in_seconds'] = [time.total_seconds() for time in brand['Duration']]
brand.drop(columns=['Duration']).plot(kind='bar')
plt.xlabel('Brand')
plt.ylabel('Seconds')
plt.title('Total Training Time by Brand')
plt.show()
```
## 7.0 Publish and schedule the pipeline (Optional)
### 7.1 Publish the pipeline
Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines.
```
# published_pipeline = pipeline.publish(name = 'train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
```
### 7.2 Schedule the pipeline
You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
```
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="training_pipeline_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
```
## Next Steps
Now that you've trained and scored the models, move on to [03_CustomScript_Forecasting_Pipeline.ipynb](03_CustomScript_Forecasting_Pipeline.ipynb) to make forecasts with your models.
| github_jupyter |
```
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
# Pg. 125: http://incompleteideas.net/book/bookdraft2018mar21.pdf
#0 is the left terminal state
# 6 is the right terminal state
# 1 ... 5 represents A ... E
VALUES = np.zeros(7)
VALUES[1:6] = 0.5
# For convenience, we assume all rewards are 0
# and the left terminal state has value 0, the right terminal state has value 1
# This trick has been used in the Gambler's Problem
VALUES[6] = 1
# set up true state values
TRUE_VALUE = np.zeros(7)
TRUE_VALUE[1:6] = np.arange(1, 6) / 6.0
TRUE_VALUE[6] = 1
ACTION_LEFT = 0
ACTION_RIGHT = 1
def temporal_difference(values, alpha = 0.6, batch = False):
state = 3
trajectory = [state]
rewards = [0]
while True:
prior_state = state
if np.random.binomial(1, 0.5) == ACTION_LEFT:
state -= 1
else:
state += 1
reward = 0
trajectory.append(state)
# TD Update
if not batch:
values[prior_state] += alpha * (reward + values[state] - values[prior_state])
if state == 6 or state == 0:
break
rewards.append(reward)
return trajectory, rewards
# @values: current states value, will be updated if @batch is False
# @alpha: step size
# @batch: whether to update @values
def monte_carlo(values, alpha=0.1, batch=False):
state = 3
trajectory = [3]
# if end up with left terminal state, all returns are 0
# if end up with right terminal state, all returns are 1
while True:
if np.random.binomial(1, 0.5) == ACTION_LEFT:
state -= 1
else:
state += 1
trajectory.append(state)
if state == 6:
returns = 1.0
break
elif state == 0:
returns = 0.0
break
if not batch:
# traverse backwards
for state_ in trajectory[:-1]:
# MC update
values[state_] += alpha * (returns - values[state_])
return trajectory, [returns] * (len(trajectory) - 1)
def compute_state_value():
episodes = [0, 1, 10, 100]
current_values = np.copy(VALUES)
plt.figure(1)
for i in range(episodes[-1] + 1):
if i in episodes:
plt.plot(current_values, label=str(i) + ' episodes')
temporal_difference(current_values)
plt.plot(TRUE_VALUE, label='true values')
plt.xlabel('State')
plt.ylabel('Estimated Value')
plt.legend()
# Example 6.2 right
def rms_error():
# Same alpha value can appear in both arrays
td_alphas = [0.15, 0.1, 0.05]
mc_alphas = [0.01, 0.02, 0.03, 0.04]
episodes = 100 + 1
runs = 100
for i, alpha in enumerate(td_alphas + mc_alphas):
total_errors = np.zeros(episodes)
if i < len(td_alphas):
method = 'TD'
linestyle = 'solid'
else:
method = 'MC'
linestyle = 'dashdot'
for r in tqdm(range(runs)):
errors = []
current_values = np.copy(VALUES)
for i in range(0, episodes):
errors.append(np.sqrt(np.sum(np.power(TRUE_VALUE - current_values, 2)) / 5.0))
if method == 'TD':
temporal_difference(current_values, alpha=alpha)
else:
monte_carlo(current_values, alpha=alpha)
total_errors += np.asarray(errors)
total_errors /= runs
plt.plot(total_errors, linestyle=linestyle, label=method + ', alpha = %.02f' % (alpha))
plt.xlabel('episodes')
plt.ylabel('RMS')
plt.legend()
# Figure 6.2
# @method: 'TD' or 'MC'
def batch_updating(method, episodes, alpha=0.001):
# perform 100 independent runs
runs = 100
total_errors = np.zeros(episodes)
for r in tqdm(range(0, runs)):
current_values = np.copy(VALUES)
errors = []
# track shown trajectories and reward/return sequences
trajectories = []
rewards = []
for ep in range(episodes):
if method == 'TD':
trajectory_, rewards_ = temporal_difference(current_values, batch=True)
else:
trajectory_, rewards_ = monte_carlo(current_values, batch=True)
trajectories.append(trajectory_)
rewards.append(rewards_)
while True:
# keep feeding our algorithm with trajectories seen so far until state value function converges
updates = np.zeros(7)
for trajectory_, rewards_ in zip(trajectories, rewards):
for i in range(0, len(trajectory_) - 1):
if method == 'TD':
updates[trajectory_[i]] += rewards_[i] + current_values[trajectory_[i + 1]] - current_values[trajectory_[i]]
else:
updates[trajectory_[i]] += rewards_[i] - current_values[trajectory_[i]]
updates *= alpha
if np.sum(np.abs(updates)) < 1e-3:
break
# perform batch updating
current_values += updates
# calculate rms error
errors.append(np.sqrt(np.sum(np.power(current_values - TRUE_VALUE, 2)) / 5.0))
total_errors += np.asarray(errors)
total_errors /= runs
return total_errors
def example_6_2():
plt.figure(figsize=(10, 20))
plt.subplot(2, 1, 1)
compute_state_value()
plt.subplot(2, 1, 2)
rms_error()
plt.tight_layout()
plt.savefig('./images/example_6_2.png')
plt.close()
def figure_6_2():
episodes = 100 + 1
td_erros = batch_updating('TD', episodes)
mc_erros = batch_updating('MC', episodes)
plt.plot(td_erros, label='TD')
plt.plot(mc_erros, label='MC')
plt.xlabel('episodes')
plt.ylabel('RMS error')
plt.legend()
plt.savefig('./images/figure_6_2.png')
plt.close()
example_6_2()
figure_6_2()
```
| github_jupyter |
```
import xarray as xr
from mpl_toolkits.axes_grid1 import make_axes_locatable
# file = '/Users/mikesmith/Downloads/5MHz_6km_realtime-agg_2f30_fcd6_a21e.nc'
file = '/Users/mikesmith/Downloads/5MHz_6km_realtime-agg_a667_a2f2_f11b.nc'
ds = xr.open_dataset(file).mean('time')
ds
tds = ds.coarsen(longitude=2, latitude=2, boundary='pad').mean()
tds
import cartopy.crs as ccrs
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import cartopy.feature as cfeature
projection = ccrs.Mercator()
lon = tds.longitude
lat = tds.latitude
extent = [
lon.min() - 1,
lon.max() + 1,
lat.min() - 1,
lat.max() + 1
]
LAND = cfeature.NaturalEarthFeature(
'physical', 'land', '10m',
edgecolor='face',
facecolor='tan'
)
state_lines = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none'
)
```
#### Let's turn the mapping features into a function
```
def get_ticks(bounds, dirs, otherbounds):
dirs = dirs.lower()
l0 = np.float(bounds[0])
l1 = np.float(bounds[1])
r = np.max([l1 - l0, np.float(otherbounds[1]) - np.float(otherbounds[0])])
if r <= 1.5:
# <1.5 degrees: 15' major ticks, 5' minor ticks
minor_int = 1.0 / 12.0
major_int = 1.0 / 4.0
elif r <= 3.0:
# <3 degrees: 30' major ticks, 10' minor ticks
minor_int = 1.0 / 6.0
major_int = 0.5
elif r <= 7.0:
# <7 degrees: 1d major ticks, 15' minor ticks
minor_int = 0.25
major_int = np.float(1)
elif r <= 15:
# <15 degrees: 2d major ticks, 30' minor ticks
minor_int = 0.5
major_int = np.float(2)
elif r <= 30:
# <30 degrees: 3d major ticks, 1d minor ticks
minor_int = np.float(1)
major_int = np.float(3)
else:
# >=30 degrees: 5d major ticks, 1d minor ticks
minor_int = np.float(1)
major_int = np.float(5)
minor_ticks = np.arange(np.ceil(l0 / minor_int) * minor_int, np.ceil(l1 / minor_int) * minor_int + minor_int,
minor_int)
minor_ticks = minor_ticks[minor_ticks <= l1]
major_ticks = np.arange(np.ceil(l0 / major_int) * major_int, np.ceil(l1 / major_int) * major_int + major_int,
major_int)
major_ticks = major_ticks[major_ticks <= l1]
if major_int < 1:
d, m, s = dd2dms(np.array(major_ticks))
if dirs == 'we' or dirs == 'ew' or dirs == 'lon' or dirs == 'long' or dirs == 'longitude':
n = 'W' * sum(d < 0)
p = 'E' * sum(d >= 0)
dir = n + p
major_tick_labels = [str(np.abs(int(d[i]))) + u"\N{DEGREE SIGN}" + str(int(m[i])) + "'" + dir[i] for i in
range(len(d))]
elif dirs == 'sn' or dirs == 'ns' or dirs == 'lat' or dirs == 'latitude':
n = 'S' * sum(d < 0)
p = 'N' * sum(d >= 0)
dir = n + p
major_tick_labels = [str(np.abs(int(d[i]))) + u"\N{DEGREE SIGN}" + str(int(m[i])) + "'" + dir[i] for i in
range(len(d))]
else:
major_tick_labels = [str(int(d[i])) + u"\N{DEGREE SIGN}" + str(int(m[i])) + "'" for i in range(len(d))]
else:
d = major_ticks
if dirs == 'we' or dirs == 'ew' or dirs == 'lon' or dirs == 'long' or dirs == 'longitude':
n = 'W' * sum(d < 0)
p = 'E' * sum(d >= 0)
dir = n + p
major_tick_labels = [str(np.abs(int(d[i]))) + u"\N{DEGREE SIGN}" + dir[i] for i in range(len(d))]
elif dirs == 'sn' or dirs == 'ns' or dirs == 'lat' or dirs == 'latitude':
n = 'S' * sum(d < 0)
p = 'N' * sum(d >= 0)
dir = n + p
major_tick_labels = [str(np.abs(int(d[i]))) + u"\N{DEGREE SIGN}" + dir[i] for i in range(len(d))]
else:
major_tick_labels = [str(int(d[i])) + u"\N{DEGREE SIGN}" for i in range(len(d))]
return minor_ticks, major_ticks, major_tick_labels
def add_map_features(ax, extent):
# # Gridlines and grid labels
# gl = ax.gridlines(
# draw_labels=True,
# linewidth=.5,
# color='black',
# alpha=0.25,
# linestyle='--',
# )
# gl.xlabels_top = gl.ylabels_right = False
# gl.xlabel_style = {'size': 16, 'color': 'black'}
# gl.ylabel_style = {'size': 16, 'color': 'black'}
# gl.xformatter = LONGITUDE_FORMATTER
# gl.yformatter = LATITUDE_FORMATTER
xl = [extent[0], extent[1]]
yl = [extent[2], extent[3]]
tick0x, tick1, ticklab = get_ticks(xl, 'we', yl)
ax.set_xticks(tick0x, minor=True, crs=ccrs.PlateCarree())
ax.set_xticks(tick1, crs=ccrs.PlateCarree())
ax.set_xticklabels(ticklab, fontsize=14)
# get and add latitude ticks/labels
tick0y, tick1, ticklab = get_ticks(yl, 'sn', xl)
ax.set_yticks(tick0y, minor=True, crs=ccrs.PlateCarree())
ax.set_yticks(tick1, crs=ccrs.PlateCarree())
ax.set_yticklabels(ticklab, fontsize=14)
gl = ax.gridlines(draw_labels=False, linewidth=.5, color='gray', alpha=0.75, linestyle='--', crs=ccrs.PlateCarree())
gl.xlocator = mticker.FixedLocator(tick0x)
gl.ylocator = mticker.FixedLocator(tick0y)
ax.tick_params(which='major',
direction='out',
bottom=True, top=True,
labelbottom=True, labeltop=False,
left=True, right=True,
labelleft=True, labelright=False,
length=5, width=2)
ax.tick_params(which='minor',
direction='out',
bottom=True, top=True,
labelbottom=True, labeltop=False,
left=True, right=True,
labelleft=True, labelright=False,
width=1)
# Axes properties and features
ax.set_extent(extent)
ax.add_feature(LAND, zorder=0, edgecolor='black')
ax.add_feature(cfeature.LAKES)
ax.add_feature(cfeature.BORDERS)
ax.add_feature(state_lines, edgecolor='black')
return ax
```
### Let's change the arrows
```
# velocity_min = np.int32(np.nanmin(speed)) # Get the minimum speed from the data
# velocity_max =np.int32(np.nanmax(speed)) # Get the maximum speed from the data
# velocity_min = 0 # Get the minimum speed from the data
# velocity_max = 40 # Get the maximum speed from the data
# Setup a keyword argument, kwargs, dictionary to pass optional arguments to the quiver plot
kwargs = dict(
transform=ccrs.PlateCarree(),
scale=65, # Number of data units per arrow length unit, e.g., m/s per plot width; a smaller scale parameter makes the arrow longer. Default is None.
headwidth=2.75, # Head width as multiple of shaft width.
headlength=2.75, #Head length as multiple of shaft width.
headaxislength=2.5, # Head length at shaft intersection.
minshaft=1,
minlength=1
)
# Clip the colors
# color_clipped = np.clip(speed, velocity_min, velocity_max).squeeze(),
# Set the colorbar ticks to correspond to the velocity minimum and maximum of the data with a step of 20... Append the max velocity
# ticks = np.append(np.arange(velocity_min, velocity_max, 5), velocity_max)
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
lon, lat = np.meshgrid(tds.longitude, tds.latitude)
u = tds.u.data
v = tds.v.data
#
# resample onto a 50x50 grid
nx, ny = 50, 50
# (N, 2) arrays of input x,y coords and u,v values
pts = np.vstack((lon.ravel(), lat.ravel())).T
vals = np.vstack((u.ravel(), v.ravel())).T
# the new x and y coordinates for the grid, which will correspond to the
# columns and rows of u and v respectively
xi = np.linspace(lon.min(), lon.max(), nx)
yi = np.linspace(lat.min(), lat.max(), ny)
# an (nx * ny, 2) array of x,y coordinates to interpolate at
ipts = np.vstack(a.ravel() for a in np.meshgrid(yi, xi)[::-1]).T
# an (nx * ny, 2) array of interpolated u, v values
ivals = griddata(pts, vals, ipts, method='linear') # Only works with nearest
# reshape interpolated u,v values into (ny, nx) arrays
ui, vi = ivals.T
ui.shape = vi.shape = (ny, nx)
np.nanmax(yi)
# Initialize blank plot with a mercator projection
fig, ax = plt.subplots(
figsize=(22, 16),
subplot_kw=dict(projection=ccrs.Mercator())
)
norm = np.sqrt(ui**2 + vi**2)
norm_flat = norm.flatten()
start_points = np.array([xi.flatten(), yi.flatten()]).T
scale = .2/np.nanmax(norm)
for i in range(start_points.shape[0]):
plt.streamplot(xi, yi, ui, vi,
color='k',
start_points=np.array([start_points[i,:]]),
minlength=.95*norm_flat[i]*scale,
maxlength=1.0*norm_flat[i]*scale,
integration_direction='backward',
density=10,
arrowsize=0.0,
transform=ccrs.PlateCarree()
)
# Add map features to the axes
add_map_features(ax, extent)
# plt.quiver(xi, yi, ui/norm, vi/norm, scale=30, transform=ccrs.PlateCarree())
import matplotlib.pyplot as plt
import numpy as np
w = 3
Y, X = np.mgrid[-w:w:8j, -w:w:8j]
U = -Y
V = X
norm = np.sqrt(U**2 + V**2)
norm_flat = norm.flatten()
start_points = np.array([X.flatten(),Y.flatten()]).T
plt.clf()
scale = .2/np.max(norm)
plt.subplot(121)
plt.title('scaling only the length')
for i in range(start_points.shape[0]):
plt.streamplot(X,Y,U,V, color='k', start_points=np.array([start_points[i,:]]),minlength=.95*norm_flat[i]*scale, maxlength=1.0*norm_flat[i]*scale,
integration_direction='backward', density=10, arrowsize=0.0)
plt.quiver(X,Y,U/norm, V/norm,scale=30)
plt.axis('square')
plt.subplot(122)
plt.title('scaling length, arrowhead and linewidth')
for i in range(start_points.shape[0]):
plt.streamplot(X,Y,U,V, color='k', start_points=np.array([start_points[i,:]]),minlength=.95*norm_flat[i]*scale, maxlength=1.0*norm_flat[i]*scale,
integration_direction='backward', density=10, arrowsize=0.0, linewidth=.5*norm_flat[i])
plt.quiver(X,Y,U/np.max(norm), V/np.max(norm),scale=30)
plt.axis('square')
"""
Streamline plotting for 2D vector fields.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from scipy.interpolate import interp1d
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.lines as mlines
import matplotlib.patches as patches
def velovect(axes, x, y, u, v, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
transform=None, zorder=None, start_points=None,
scale=1.0, grains=15):
"""Draws streamlines of a vector flow.
*x*, *y* : 1d arrays
an *evenly spaced* grid.
*u*, *v* : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
*density* : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 30x30 grid---*density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
*linewidth* : numeric or 2d array
vary linewidth when given a 2d array with the same shape as velocities.
*color* : matplotlib color code, or 2d array
Streamline color. When given an array with the same shape as
velocities, *color* values are converted to colors using *cmap*.
*cmap* : :class:`~matplotlib.colors.Colormap`
Colormap used to plot streamlines and arrows. Only necessary when using
an array input for *color*.
*norm* : :class:`~matplotlib.colors.Normalize`
Normalize object used to scale luminance data to 0, 1. If None, stretch
(min, max) to (0, 1). Only necessary when *color* is an array.
*arrowsize* : float
Factor scale arrow size.
*arrowstyle* : str
Arrow style specification.
See :class:`~matplotlib.patches.FancyArrowPatch`.
*minlength* : float
Minimum length of streamline in axes coordinates.
*start_points*: Nx2 array
Coordinates of starting points for the streamlines.
In data coordinates, the same as the ``x`` and ``y`` arrays.
*zorder* : int
any number
*scale* : float
Maximum length of streamline in axes coordinates.
Returns:
*stream_container* : StreamplotSet
Container object with attributes
- lines: `matplotlib.collections.LineCollection` of streamlines
- arrows: collection of `matplotlib.patches.FancyArrowPatch`
objects representing arrows half-way along stream
lines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(10)
dmap = DomainMap(grid, mask)
if zorder is None:
zorder = mlines.Line2D.zorder
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = axes._get_lines.get_next_color()
if linewidth is None:
linewidth = matplotlib.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
if color.shape != grid.shape:
raise ValueError(
"If 'color' is given, must have the shape of 'Grid(x,y)'")
line_colors = []
color = np.ma.masked_invalid(color)
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
if linewidth.shape != grid.shape:
raise ValueError(
"If 'linewidth' is given, must have the shape of 'Grid(x,y)'")
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
line_kw['zorder'] = zorder
arrow_kw['zorder'] = zorder
## Sanity checks.
if u.shape != grid.shape or v.shape != grid.shape:
raise ValueError("'u' and 'v' must be of shape 'Grid(x,y)'")
u = np.ma.masked_invalid(u)
v = np.ma.masked_invalid(v)
magnitude = np.sqrt(u**2 + v**2)
magnitude/=np.max(magnitude)
resolution = scale/grains
minlength = .9*resolution
integrate = get_integrator(u, v, dmap, minlength, resolution, magnitude)
trajectories = []
edges = []
if start_points is None:
start_points=_gen_starting_points(x,y,grains)
sp2 = np.asanyarray(start_points, dtype=float).copy()
# Check if start_points are outside the data boundaries
for xs, ys in sp2:
if not (grid.x_origin <= xs <= grid.x_origin + grid.width
and grid.y_origin <= ys <= grid.y_origin + grid.height):
raise ValueError("Starting point ({}, {}) outside of data "
"boundaries".format(xs, ys))
# Convert start_points from data to array coords
# Shift the seed points from the bottom left of the data so that
# data2grid works properly.
sp2[:, 0] -= grid.x_origin
sp2[:, 1] -= grid.y_origin
for xs, ys in sp2:
xg, yg = dmap.data2grid(xs, ys)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t[0])
edges.append(t[1])
if use_multicolor_lines:
if norm is None:
norm = mcolors.Normalize(color.min(), color.max())
if cmap is None:
cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
else:
cmap = cm.get_cmap(cmap)
streamlines = []
arrows = []
for t, edge in zip(trajectories,edges):
tgx = np.array(t[0])
tgy = np.array(t[1])
# Rescale from grid-coordinates to data-coordinates.
tx, ty = dmap.grid2data(*np.array(t))
tx += grid.x_origin
ty += grid.y_origin
points = np.transpose([tx, ty]).reshape(-1, 1, 2)
streamlines.extend(np.hstack([points[:-1], points[1:]]))
# Add arrows half way along each trajectory.
s = np.cumsum(np.sqrt(np.diff(tx) ** 2 + np.diff(ty) ** 2))
n = np.searchsorted(s, s[-1])
arrow_tail = (tx[n], ty[n])
arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
if isinstance(linewidth, np.ndarray):
line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
line_kw['linewidth'].extend(line_widths)
arrow_kw['linewidth'] = line_widths[n]
if use_multicolor_lines:
color_values = interpgrid(color, tgx, tgy)[:-1]
line_colors.append(color_values)
arrow_kw['color'] = cmap(norm(color_values[n]))
if not edge:
p = patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, **arrow_kw)
else:
continue
ds = np.sqrt((arrow_tail[0]-arrow_head[0])**2+(arrow_tail[1]-arrow_head[1])**2)
if ds<1e-15: continue #remove vanishingly short arrows that cause Patch to fail
axes.add_patch(p)
arrows.append(p)
lc = mcollections.LineCollection(
streamlines, transform=transform, **line_kw)
lc.sticky_edges.x[:] = [grid.x_origin, grid.x_origin + grid.width]
lc.sticky_edges.y[:] = [grid.y_origin, grid.y_origin + grid.height]
if use_multicolor_lines:
lc.set_array(np.ma.hstack(line_colors))
lc.set_cmap(cmap)
lc.set_norm(norm)
axes.add_collection(lc)
axes.autoscale_view()
ac = matplotlib.collections.PatchCollection(arrows)
stream_container = StreamplotSet(lc, ac)
return stream_container
class StreamplotSet(object):
def __init__(self, lines, arrows, **kwargs):
self.lines = lines
self.arrows = arrows
# Coordinate definitions
# ========================
class DomainMap(object):
"""Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
# Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = (mask.nx - 1) / grid.nx
self.y_grid2mask = (mask.ny - 1) / grid.ny
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = 1. / grid.dx
self.y_data2grid = 1. / grid.dy
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return (int((xi * self.x_grid2mask) + 0.5),
int((yi * self.y_grid2mask) + 0.5))
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def grid2data(self, xg, yg):
return xg / self.x_data2grid, yg / self.y_data2grid
def start_trajectory(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
#self.mask._update_trajectory(xm, ym)
def undo_trajectory(self):
self.mask._undo_trajectory()
class Grid(object):
"""Grid of data."""
def __init__(self, x, y):
if x.ndim == 1:
pass
elif x.ndim == 2:
x_row = x[0, :]
if not np.allclose(x_row, x):
raise ValueError("The rows of 'x' must be equal")
x = x_row
else:
raise ValueError("'x' can have at maximum 2 dimensions")
if y.ndim == 1:
pass
elif y.ndim == 2:
y_col = y[:, 0]
if not np.allclose(y_col, y.T):
raise ValueError("The columns of 'y' must be equal")
y = y_col
else:
raise ValueError("'y' can have at maximum 2 dimensions")
self.nx = len(x)
self.ny = len(y)
self.dx = x[1] - x[0]
self.dy = y[1] - y[0]
self.x_origin = x[0]
self.y_origin = y[0]
self.width = x[-1] - x[0]
self.height = y[-1] - y[0]
@property
def shape(self):
return self.ny, self.nx
def within_grid(self, xi, yi):
"""Return True if point is a valid index of grid."""
# Note that xi/yi can be floats; so, for example, we can't simply check
# `xi < self.nx` since `xi` can be `self.nx - 1 < xi < self.nx`
return xi >= 0 and xi <= self.nx - 1 and yi >= 0 and yi <= self.ny - 1
class StreamMask(object):
"""Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
if np.isscalar(density):
if density <= 0:
raise ValueError("If a scalar, 'density' must be positive")
self.nx = self.ny = int(30 * density)
else:
if len(density) != 2:
raise ValueError("'density' can have at maximum 2 dimensions")
self.nx = int(30 * density[0])
self.ny = int(30 * density[1])
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, *args):
return self._mask.__getitem__(*args)
def _start_trajectory(self, xm, ym):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask.__setitem__(t, 0)
def _update_trajectory(self, xm, ym):
"""Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
#if self._current_xy != (xm, ym):
# if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
# else:
# raise InvalidIndexError
# Integrator definitions
#========================
def get_integrator(u, v, dmap, minlength, resolution, magnitude):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / dmap.grid.nx
v_ax = v / dmap.grid.ny
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def integrate(x0, y0):
"""Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
stotal, x_traj, y_traj = 0., [], []
dmap.start_trajectory(x0, y0)
dmap.reset_start_point(x0, y0)
stotal, x_traj, y_traj, m_total, hit_edge = _integrate_rk12(x0, y0, dmap, forward_time, resolution, magnitude)
if len(x_traj)>1:
return (x_traj, y_traj), hit_edge
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
def _integrate_rk12(x0, y0, dmap, f, resolution, magnitude):
"""2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as both the RK4 and RK45
solvers in most setups on my machine. I would recommend removing the
other two to keep things simple.
"""
# This error is below that needed to match the RK4 integrator. It
# is set for visual reasons -- too low and corners start
# appearing ugly and jagged. Can be tuned.
maxerror = 0.003
# This limit is important (for all integrators) to avoid the
# trajectory skipping some mask cells. We could relax this
# condition if we use the code which is commented out below to
# increment the location gradually. However, due to the efficient
# nature of the interpolation, this doesn't boost speed by much
# for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
ds = maxds
stotal = 0
xi = x0
yi = y0
xf_traj = []
yf_traj = []
m_total = []
hit_edge = False
while dmap.grid.within_grid(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
m_total.append(interpgrid(magnitude, xi, yi))
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x,
yi + ds * k1y)
except IndexError:
# Out of the domain on one of the intermediate integration steps.
# Take an Euler step to the boundary to improve neatness.
ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f)
stotal += ds
hit_edge = True
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
nx, ny = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.sqrt(((dx2 - dx1) / nx) ** 2 + ((dy2 - dy1) / ny) ** 2)
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
dmap.update_trajectory(xi, yi)
if not dmap.grid.within_grid(xi, yi):
hit_edge=True
if (stotal + ds) > resolution*np.mean(m_total):
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xf_traj, yf_traj, m_total, hit_edge
def _euler_step(xf_traj, yf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi = xf_traj[-1]
yi = yf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xf_traj.append(xi + cx * ds)
yf_traj.append(yi + cy * ds)
return ds, xf_traj, yf_traj
# Utility functions
# ========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(int)
y = yi.astype(int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = int(xi)
y = int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 2):
xn = x
else:
xn = x + 1
if y == (Ny - 2):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(x,y,grains):
eps = np.finfo(np.float32).eps
tmp_x = np.linspace(x.min()+eps, x.max()-eps, grains)
tmp_y = np.linspace(y.min()+eps, y.max()-eps, grains)
xs = np.tile(tmp_x, grains)
ys = np.repeat(tmp_y, grains)
seed_points = np.array([list(xs), list(ys)])
return seed_points.T
f, ax = plt.subplots(figsize=(15,4))
grains = 15
tmp = np.linspace(-3, 3, grains)
xs = np.tile(tmp, grains)
ys = np.repeat(tmp, grains)
seed_points = np.array([list(xs), list(ys)])
scale=2.
velovect(ax, xi, yi, ui, vi, arrowstyle='fancy', scale = 1.5, grains = 15, color='k')
# cs = ax.contourf(xi,yi, W, cmap=plt.cm.viridis, alpha=0.5, zorder=-1)
# ax1.set_title("Quiver")
# ax2.set_title("Streamplot")
# ax3.set_title("Curved quivers")
# plt.colorbar(cs, ax=[ax1,ax2,ax3])
plt.show()
```
| github_jupyter |
# Amazon SageMaker Object Detection for Bird Species
1. [Introduction](#Introduction)
2. [Setup](#Setup)
3. [Data Preparation](#Data-Preparation)
1. [Download and unpack the dataset](#Download-and-unpack-the-dataset)
2. [Understand the dataset](#Understand-the-dataset)
3. [Generate RecordIO files](#Generate-RecordIO-files)
4. [Train the model](#Train-the-model)
5. [Host the model](#Host-the-model)
6. [Test the model](#Test-the-model)
7. [Clean up](#Clean-up)
8. [Improve the model](#Improve-the-model)
9. [Final cleanup](#Final-cleanup)
## Introduction
Object detection is the process of identifying and localizing objects in an image. A typical object detection solution takes an image as input and provides a bounding box on the image where an object of interest is found. It also identifies what type of object the box encapsulates. To create such a solution, we need to acquire and process a traning dataset, create and setup a training job for the alorithm so that it can learn about the dataset. Finally, we can then host the trained model in an endpoint, to which we can supply images.
This notebook is an end-to-end example showing how the Amazon SageMaker Object Detection algorithm can be used with a publicly available dataset of bird images. We demonstrate how to train and to host an object detection model based on the [Caltech Birds (CUB 200 2011)](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html) dataset. Amazon SageMaker's object detection algorithm uses the Single Shot multibox Detector ([SSD](https://arxiv.org/abs/1512.02325)) algorithm, and this notebook uses a [ResNet](https://arxiv.org/pdf/1603.05027.pdf) base network with that algorithm.

We will also demonstrate how to construct a training dataset using the RecordIO format, as this is the format that the training job consumes. This notebook is similar to the [Object Detection using the RecordIO format](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_recordio_format.ipynb) notebook, with the following key differences:
- We provide an example of how to translate bounding box specifications when providing images to SageMaker's algorithm. You will see code for generating the train.lst and val.lst files used to create [recordIO](https://mxnet.incubator.apache.org/architecture/note_data_loading.html) files.
- We demonstrate how to improve an object detection model by adding training images that are flipped horizontally (mirror images).
- We give you a notebook for experimenting with object detection challenges with an order of magnitude more classes (200 bird species, as opposed to the 20 categories used by [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/)).
- We show how to chart the accuracy improvements that occur across the epochs of the training job.
Note that Amazon SageMaker Object Detection also allows training with the image and JSON format, which is illustrated in the [image and JSON Notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_image_json_format.ipynb).
## Setup
Before preparing the data, there are some initial steps required for setup.
This notebook requires two additional Python packages:
* **OpenCV** is required for gathering image sizes and flipping of images horizontally.
* The **MXNet** runtime is required for using the im2rec tool.
```
import sys
!{sys.executable} -m pip install opencv-python
!{sys.executable} -m pip install mxnet
```
We need to identify the S3 bucket that you want to use for providing training and validation datasets. It will also be used to store the tranied model artifacts. In this notebook, we use a custom bucket. You could alternatively use a default bucket for the session. We use an object prefix to help organize the bucket content.
```
bucket = "<your_s3_bucket_name_here>" # custom bucket name.
prefix = "DEMO-ObjectDetection-birds"
```
To train the Object Detection algorithm on Amazon SageMaker, we need to setup and authenticate the use of AWS services. To begin with, we need an AWS account role with SageMaker access. Here we will use the execution role the current notebook instance was given when it was created. This role has necessary permissions, including access to your data in S3.
```
import sagemaker
from sagemaker import get_execution_role
role = get_execution_role()
print(role)
sess = sagemaker.Session()
```
# Data Preparation
The [Caltech Birds (CUB 200 2011)](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html) dataset contains 11,788 images across 200 bird species (the original technical report can be found [here](http://www.vision.caltech.edu/visipedia/papers/CUB_200_2011.pdf)). Each species comes with around 60 images, with a typical size of about 350 pixels by 500 pixels. Bounding boxes are provided, as are annotations of bird parts. A recommended train/test split is given, but image size data is not.

The dataset can be downloaded [here](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html).
## Download and unpack the dataset
Here we download the birds dataset from CalTech.
```
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
%%time
# download('http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz')
# CalTech's download is (at least temporarily) unavailable since August 2020.
# Can now use one made available by fast.ai .
download("https://s3.amazonaws.com/fast-ai-imageclas/CUB_200_2011.tgz")
```
Now we unpack the dataset into its own directory structure.
```
%%time
# Clean up prior version of the downloaded dataset if you are running this again
!rm -rf CUB_200_2011
# Unpack and then remove the downloaded compressed tar file
!gunzip -c ./CUB_200_2011.tgz | tar xopf -
!rm CUB_200_2011.tgz
```
# Understand the dataset
## Set some parameters for the rest of the notebook to use
Here we define a few parameters that help drive the rest of the notebook. For example, `SAMPLE_ONLY` is defaulted to `True`. This will force the notebook to train on only a handful of species. Setting to false will make the notebook work with the entire dataset of 200 bird species. This makes the training a more difficult challenge, and you will need many more epochs to complete.
The file parameters define names and locations of metadata files for the dataset.
```
import pandas as pd
import cv2
import boto3
import json
runtime = boto3.client(service_name="runtime.sagemaker")
import matplotlib.pyplot as plt
%matplotlib inline
RANDOM_SPLIT = False
SAMPLE_ONLY = True
FLIP = False
# To speed up training and experimenting, you can use a small handful of species.
# To see the full list of the classes available, look at the content of CLASSES_FILE.
CLASSES = [17, 36, 47, 68, 73]
# Otherwise, you can use the full set of species
if not SAMPLE_ONLY:
CLASSES = []
for c in range(200):
CLASSES += [c + 1]
RESIZE_SIZE = 256
BASE_DIR = "CUB_200_2011/"
IMAGES_DIR = BASE_DIR + "images/"
CLASSES_FILE = BASE_DIR + "classes.txt"
BBOX_FILE = BASE_DIR + "bounding_boxes.txt"
IMAGE_FILE = BASE_DIR + "images.txt"
LABEL_FILE = BASE_DIR + "image_class_labels.txt"
SIZE_FILE = BASE_DIR + "sizes.txt"
SPLIT_FILE = BASE_DIR + "train_test_split.txt"
TRAIN_LST_FILE = "birds_ssd_train.lst"
VAL_LST_FILE = "birds_ssd_val.lst"
if SAMPLE_ONLY:
TRAIN_LST_FILE = "birds_ssd_sample_train.lst"
VAL_LST_FILE = "birds_ssd_sample_val.lst"
TRAIN_RATIO = 0.8
CLASS_COLS = ["class_number", "class_id"]
IM2REC_SSD_COLS = [
"header_cols",
"label_width",
"zero_based_id",
"xmin",
"ymin",
"xmax",
"ymax",
"image_file_name",
]
```
## Explore the dataset images
For each species, there are dozens of images of various shapes and sizes. By dividing the entire dataset into individual named (numbered) folders, the images are in effect labelled for supervised learning using image classification and object detection algorithms.
The following function displays a grid of thumbnail images for all the image files for a given species.
```
def show_species(species_id):
_im_list = !ls $IMAGES_DIR/$species_id
NUM_COLS = 6
IM_COUNT = len(_im_list)
print('Species ' + species_id + ' has ' + str(IM_COUNT) + ' images.')
NUM_ROWS = int(IM_COUNT / NUM_COLS)
if ((IM_COUNT % NUM_COLS) > 0):
NUM_ROWS += 1
fig, axarr = plt.subplots(NUM_ROWS, NUM_COLS)
fig.set_size_inches(8.0, 16.0, forward=True)
curr_row = 0
for curr_img in range(IM_COUNT):
# fetch the url as a file type object, then read the image
f = IMAGES_DIR + species_id + '/' + _im_list[curr_img]
a = plt.imread(f)
# find the column by taking the current index modulo 3
col = curr_img % NUM_ROWS
# plot on relevant subplot
axarr[col, curr_row].imshow(a)
if col == (NUM_ROWS - 1):
# we have finished the current row, so increment row counter
curr_row += 1
fig.tight_layout()
plt.show()
# Clean up
plt.clf()
plt.cla()
plt.close()
```
Show the list of bird species or dataset classes.
```
classes_df = pd.read_csv(CLASSES_FILE, sep=" ", names=CLASS_COLS, header=None)
criteria = classes_df["class_number"].isin(CLASSES)
classes_df = classes_df[criteria]
print(classes_df.to_csv(columns=["class_id"], sep="\t", index=False, header=False))
```
Now for any given species, display thumbnail images of each of the images provided for training and testing.
```
show_species("017.Cardinal")
```
# Generate RecordIO files
## Step 1. Gather image sizes
For this particular dataset, bounding box annotations are specified in absolute terms. RecordIO format requires them to be defined in terms relative to the image size. The following code visits each image, extracts the height and width, and saves this information into a file for subsequent use. Some other publicly available datasets provide such a file for exactly this purpose.
```
%%time
SIZE_COLS = ["idx", "width", "height"]
def gen_image_size_file():
print("Generating a file containing image sizes...")
images_df = pd.read_csv(
IMAGE_FILE, sep=" ", names=["image_pretty_name", "image_file_name"], header=None
)
rows_list = []
idx = 0
for i in images_df["image_file_name"]:
# TODO: add progress bar
idx += 1
img = cv2.imread(IMAGES_DIR + i)
dimensions = img.shape
height = img.shape[0]
width = img.shape[1]
image_dict = {"idx": idx, "width": width, "height": height}
rows_list.append(image_dict)
sizes_df = pd.DataFrame(rows_list)
print("Image sizes:\n" + str(sizes_df.head()))
sizes_df[SIZE_COLS].to_csv(SIZE_FILE, sep=" ", index=False, header=None)
gen_image_size_file()
```
## Step 2. Generate list files for producing RecordIO files
[RecordIO](https://mxnet.incubator.apache.org/architecture/note_data_loading.html) files can be created using the [im2rec tool](https://mxnet.incubator.apache.org/faq/recordio.html) (images to RecordIO), which takes as input a pair of list files, one for training images and the other for validation images. Each list file has one row for each image. For object detection, each row must contain bounding box data and a class label.
For the CalTech birds dataset, we need to convert absolute bounding box dimensions to relative dimensions based on image size. We also need to adjust class id's to be zero-based (instead of 1 to 200, they need to be 0 to 199). This dataset comes with recommended train/test split information ("is_training_image" flag). This notebook is built flexibly to either leverage this suggestion, or to create a random train/test split with a specific train/test ratio. The `RAMDOM_SPLIT` variable defined earlier controls whether or not the split happens randomly.
```
def split_to_train_test(df, label_column, train_frac=0.8):
train_df, test_df = pd.DataFrame(), pd.DataFrame()
labels = df[label_column].unique()
for lbl in labels:
lbl_df = df[df[label_column] == lbl]
lbl_train_df = lbl_df.sample(frac=train_frac)
lbl_test_df = lbl_df.drop(lbl_train_df.index)
print(
"\n{}:\n---------\ntotal:{}\ntrain_df:{}\ntest_df:{}".format(
lbl, len(lbl_df), len(lbl_train_df), len(lbl_test_df)
)
)
train_df = train_df.append(lbl_train_df)
test_df = test_df.append(lbl_test_df)
return train_df, test_df
def gen_list_files():
# use generated sizes file
sizes_df = pd.read_csv(
SIZE_FILE, sep=" ", names=["image_pretty_name", "width", "height"], header=None
)
bboxes_df = pd.read_csv(
BBOX_FILE,
sep=" ",
names=["image_pretty_name", "x_abs", "y_abs", "bbox_width", "bbox_height"],
header=None,
)
split_df = pd.read_csv(
SPLIT_FILE, sep=" ", names=["image_pretty_name", "is_training_image"], header=None
)
print(IMAGE_FILE)
images_df = pd.read_csv(
IMAGE_FILE, sep=" ", names=["image_pretty_name", "image_file_name"], header=None
)
print("num images total: " + str(images_df.shape[0]))
image_class_labels_df = pd.read_csv(
LABEL_FILE, sep=" ", names=["image_pretty_name", "class_id"], header=None
)
# Merge the metadata into a single flat dataframe for easier processing
full_df = pd.DataFrame(images_df)
full_df.reset_index(inplace=True)
full_df = pd.merge(full_df, image_class_labels_df, on="image_pretty_name")
full_df = pd.merge(full_df, sizes_df, on="image_pretty_name")
full_df = pd.merge(full_df, bboxes_df, on="image_pretty_name")
full_df = pd.merge(full_df, split_df, on="image_pretty_name")
full_df.sort_values(by=["index"], inplace=True)
# Define the bounding boxes in the format required by SageMaker's built in Object Detection algorithm.
# the xmin/ymin/xmax/ymax parameters are specified as ratios to the total image pixel size
full_df["header_cols"] = 2 # one col for the number of header cols, one for the label width
full_df["label_width"] = 5 # number of cols for each label: class, xmin, ymin, xmax, ymax
full_df["xmin"] = full_df["x_abs"] / full_df["width"]
full_df["xmax"] = (full_df["x_abs"] + full_df["bbox_width"]) / full_df["width"]
full_df["ymin"] = full_df["y_abs"] / full_df["height"]
full_df["ymax"] = (full_df["y_abs"] + full_df["bbox_height"]) / full_df["height"]
# object detection class id's must be zero based. map from
# class_id's given by CUB to zero-based (1 is 0, and 200 is 199).
if SAMPLE_ONLY:
# grab a small subset of species for testing
criteria = full_df["class_id"].isin(CLASSES)
full_df = full_df[criteria]
unique_classes = full_df["class_id"].drop_duplicates()
sorted_unique_classes = sorted(unique_classes)
id_to_zero = {}
i = 0.0
for c in sorted_unique_classes:
id_to_zero[c] = i
i += 1.0
full_df["zero_based_id"] = full_df["class_id"].map(id_to_zero)
full_df.reset_index(inplace=True)
# use 4 decimal places, as it seems to be required by the Object Detection algorithm
pd.set_option("display.precision", 4)
train_df = []
val_df = []
if RANDOM_SPLIT:
# split into training and validation sets
train_df, val_df = split_to_train_test(full_df, "class_id", TRAIN_RATIO)
train_df[IM2REC_SSD_COLS].to_csv(TRAIN_LST_FILE, sep="\t", float_format="%.4f", header=None)
val_df[IM2REC_SSD_COLS].to_csv(VAL_LST_FILE, sep="\t", float_format="%.4f", header=None)
else:
train_df = full_df[(full_df.is_training_image == 1)]
train_df[IM2REC_SSD_COLS].to_csv(TRAIN_LST_FILE, sep="\t", float_format="%.4f", header=None)
val_df = full_df[(full_df.is_training_image == 0)]
val_df[IM2REC_SSD_COLS].to_csv(VAL_LST_FILE, sep="\t", float_format="%.4f", header=None)
print("num train: " + str(train_df.shape[0]))
print("num val: " + str(val_df.shape[0]))
return train_df, val_df
train_df, val_df = gen_list_files()
```
Here we take a look at a few records from the training list file to understand better what is being fed to the RecordIO files.
The first column is the image number or index. The second column indicates that the label is made up of 2 columns (column 2 and column 3). The third column specifies the label width of a single object. In our case, the value 5 indicates each image has 5 numbers to describe its label information: the class index, and the 4 bounding box coordinates. If there are multiple objects within one image, all the label information should be listed in one line. Our dataset contains only one bounding box per image.
The fourth column is the class label. This identifies the bird species using a zero-based class id. Columns 4 through 7 represent the bounding box for where the bird is found in this image.
The classes should be labeled with successive numbers and start with 0. The bounding box coordinates are ratios of its top-left (xmin, ymin) and bottom-right (xmax, ymax) corner indices to the overall image size. Note that the top-left corner of the entire image is the origin (0, 0). The last column specifies the relative path of the image file within the images directory.
```
!tail -3 $TRAIN_LST_FILE
```
## Step 2. Convert data into RecordIO format
Now we create im2rec databases (.rec files) for training and validation based on the list files created earlier.
```
!python tools/im2rec.py --resize $RESIZE_SIZE --pack-label birds_ssd_sample $BASE_DIR/images/
```
## Step 3. Upload RecordIO files to S3
Upload the training and validation data to the S3 bucket. We do this in multiple channels. Channels are simply directories in the bucket that differentiate the types of data provided to the algorithm. For the object detection algorithm, we call these directories `train` and `validation`.
```
# Upload the RecordIO files to train and validation channels
train_channel = prefix + "/train"
validation_channel = prefix + "/validation"
sess.upload_data(path="birds_ssd_sample_train.rec", bucket=bucket, key_prefix=train_channel)
sess.upload_data(path="birds_ssd_sample_val.rec", bucket=bucket, key_prefix=validation_channel)
s3_train_data = "s3://{}/{}".format(bucket, train_channel)
s3_validation_data = "s3://{}/{}".format(bucket, validation_channel)
```
# Train the model
Next we define an output location in S3, where the model artifacts will be placed on completion of the training. These artifacts are the output of the algorithm's traning job. We also get the URI to the Amazon SageMaker Object Detection docker image. This ensures the estimator uses the correct algorithm from the current region.
```
from sagemaker.amazon.amazon_estimator import get_image_uri
training_image = get_image_uri(sess.boto_region_name, "object-detection", repo_version="latest")
print(training_image)
s3_output_location = "s3://{}/{}/output".format(bucket, prefix)
od_model = sagemaker.estimator.Estimator(
training_image,
role,
train_instance_count=1,
train_instance_type="ml.p3.2xlarge",
train_volume_size=50,
train_max_run=360000,
input_mode="File",
output_path=s3_output_location,
sagemaker_session=sess,
)
```
## Define hyperparameters
The object detection algorithm at its core is the [Single-Shot Multi-Box detection algorithm (SSD)](https://arxiv.org/abs/1512.02325). This algorithm uses a `base_network`, which is typically a [VGG](https://arxiv.org/abs/1409.1556) or a [ResNet](https://arxiv.org/abs/1512.03385). The Amazon SageMaker object detection algorithm supports VGG-16 and ResNet-50. It also has a number of hyperparameters that help configure the training job. The next step in our training, is to setup these hyperparameters and data channels for training the model. See the SageMaker Object Detection [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/object-detection.html) for more details on its specific hyperparameters.
One of the hyperparameters here for example is `epochs`. This defines how many passes of the dataset we iterate over and drives the training time of the algorithm. Based on our tests, we can achieve 70% accuracy on a sample mix of 5 species with 100 epochs. When using the full 200 species, we can achieve 52% accuracy with 1,200 epochs.
Note that Amazon SageMaker also provides [Automatic Model Tuning](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning.html). Automatic model tuning, also known as hyperparameter tuning, finds the best version of a model by running many training jobs on your dataset using the algorithm and ranges of hyperparameters that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by a metric that you choose. When [tuning an Object Detection](https://docs.aws.amazon.com/sagemaker/latest/dg/object-detection-tuning.html) algorithm for example, the tuning job could find the best `validation:mAP` score by trying out various values for certain hyperparameters such as `mini_batch_size`, `weight_decay`, and `momentum`.
```
def set_hyperparameters(num_epochs, lr_steps):
num_classes = classes_df.shape[0]
num_training_samples = train_df.shape[0]
print("num classes: {}, num training images: {}".format(num_classes, num_training_samples))
od_model.set_hyperparameters(
base_network="resnet-50",
use_pretrained_model=1,
num_classes=num_classes,
mini_batch_size=16,
epochs=num_epochs,
learning_rate=0.001,
lr_scheduler_step=lr_steps,
lr_scheduler_factor=0.1,
optimizer="sgd",
momentum=0.9,
weight_decay=0.0005,
overlap_threshold=0.5,
nms_threshold=0.45,
image_shape=512,
label_width=350,
num_training_samples=num_training_samples,
)
set_hyperparameters(100, "33,67")
```
Now that the hyperparameters are setup, we define the data channels to be passed to the algorithm. To do this, we need to create the `sagemaker.session.s3_input` objects from our data channels. These objects are then put in a simple dictionary, which the algorithm consumes. Note that you could add a third channel named `model` to perform incremental training (continue training from where you had left off with a prior model).
```
train_data = sagemaker.session.s3_input(
s3_train_data,
distribution="FullyReplicated",
content_type="application/x-recordio",
s3_data_type="S3Prefix",
)
validation_data = sagemaker.session.s3_input(
s3_validation_data,
distribution="FullyReplicated",
content_type="application/x-recordio",
s3_data_type="S3Prefix",
)
data_channels = {"train": train_data, "validation": validation_data}
```
## Submit training job
We have our `Estimator` object, we have set the hyperparameters for this object, and we have our data channels linked with the algorithm. The only remaining thing to do is to train the algorithm using the `fit` method. This will take more than 10 minutes in our example.
The training process involves a few steps. First, the instances that we requested while creating the `Estimator` classes are provisioned and setup with the appropriate libraries. Then, the data from our channels are downloaded into the instance. Once this is done, the actual training begins. The provisioning and data downloading will take time, depending on the size of the data. Therefore it might be a few minutes before our training job logs show up in CloudWatch. The logs will also print out Mean Average Precision (mAP) on the validation data, among other losses, for every run of the dataset (once per epoch). This metric is a proxy for the accuracy of the model.
Once the job has finished, a `Job complete` message will be printed. The trained model artifacts can be found in the S3 bucket that was setup as `output_path` in the estimator.
```
%%time
od_model.fit(inputs=data_channels, logs=True)
```
Now that the training job is complete, you can also see the job listed in the `Training jobs` section of your SageMaker console. Note that the job name is uniquely identified by the name of the algorithm concatenated with the date and time stamp. You can click on the job to see the details including the hyperparameters, the data channel definitions, and the full path to the resulting model artifacts. You could even clone the job from the console, and tweak some of the parameters to generate a new training job.
Without having to go to the CloudWatch console, you can see how the job progressed in terms of the key object detection algorithm metric, mean average precision (mAP). This function below prepares a simple chart of that metric against the epochs.
```
import boto3
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
%matplotlib inline
client = boto3.client("logs")
BASE_LOG_NAME = "/aws/sagemaker/TrainingJobs"
def plot_object_detection_log(model, title):
logs = client.describe_log_streams(
logGroupName=BASE_LOG_NAME, logStreamNamePrefix=model._current_job_name
)
cw_log = client.get_log_events(
logGroupName=BASE_LOG_NAME, logStreamName=logs["logStreams"][0]["logStreamName"]
)
mAP_accs = []
for e in cw_log["events"]:
msg = e["message"]
if "validation mAP <score>=" in msg:
num_start = msg.find("(")
num_end = msg.find(")")
mAP = msg[num_start + 1 : num_end]
mAP_accs.append(float(mAP))
print(title)
print("Maximum mAP: %f " % max(mAP_accs))
fig, ax = plt.subplots()
plt.xlabel("Epochs")
plt.ylabel("Mean Avg Precision (mAP)")
(val_plot,) = ax.plot(range(len(mAP_accs)), mAP_accs, label="mAP")
plt.legend(handles=[val_plot])
ax.yaxis.set_ticks(np.arange(0.0, 1.05, 0.1))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%0.2f"))
plt.show()
plot_object_detection_log(od_model, "mAP tracking for job: " + od_model._current_job_name)
```
# Host the model
Once the training is done, we can deploy the trained model as an Amazon SageMaker real-time hosted endpoint. This lets us make predictions (or inferences) from the model. Note that we don't have to host using the same type of instance that we used to train. Training is a prolonged and compute heavy job with different compute and memory requirements that hosting typically does not. In our case we chose the `ml.p3.2xlarge` instance to train, but we choose to host the model on the less expensive cpu instance, `ml.m4.xlarge`. The endpoint deployment takes several minutes, and can be accomplished with a single line of code calling the `deploy` method.
Note that some use cases require large sets of inferences on a predefined body of images. In those cases, you do not need to make the inferences in real time. Instead, you could use SageMaker's [batch transform jobs](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html).
```
%%time
object_detector = od_model.deploy(initial_instance_count=1, instance_type="ml.m4.xlarge")
```
# Test the model
Now that the trained model is deployed at an endpoint that is up-and-running, we can use this endpoint for inference. The results of a call to the inference endpoint are in a format that is similar to the .lst format, with the addition of a confidence score for each detected object. The format of the output can be represented as `[class_index, confidence_score, xmin, ymin, xmax, ymax]`. Typically, we don't visualize low-confidence predictions.
We have provided a script to easily visualize the detection outputs. You can visulize the high-confidence preditions with bounding box by filtering out low-confidence detections using the script below:
```
def visualize_detection(img_file, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread(img_file)
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
num_detections = 0
for det in dets:
(klass, score, x0, y0, x1, y1) = det
if score < thresh:
continue
num_detections += 1
cls_id = int(klass)
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(x0 * width)
ymin = int(y0 * height)
xmax = int(x1 * width)
ymax = int(y1 * height)
rect = plt.Rectangle(
(xmin, ymin),
xmax - xmin,
ymax - ymin,
fill=False,
edgecolor=colors[cls_id],
linewidth=3.5,
)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
print("{},{}".format(class_name, score))
plt.gca().text(
xmin,
ymin - 2,
"{:s} {:.3f}".format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12,
color="white",
)
print("Number of detections: " + str(num_detections))
plt.show()
```
Now we use our endpoint to try to detect objects within an image. Since the image is a jpeg, we use the appropriate content_type to run the prediction. The endpoint returns a JSON object that we can simply load and peek into. We have packaged the prediction code into a function to make it easier to test other images. Note that we are defaulting the confidence threshold to 30% in our example, as a couple of the birds in our sample images were not being detected as clearly. Defining an appropriate threshold is entirely dependent on your use case.
```
OBJECT_CATEGORIES = classes_df["class_id"].values.tolist()
def show_bird_prediction(filename, ep, thresh=0.40):
b = ""
with open(filename, "rb") as image:
f = image.read()
b = bytearray(f)
endpoint_response = runtime.invoke_endpoint(EndpointName=ep, ContentType="image/jpeg", Body=b)
results = endpoint_response["Body"].read()
detections = json.loads(results)
visualize_detection(filename, detections["prediction"], OBJECT_CATEGORIES, thresh)
```
Here we download images that the algorithm has not yet seen.
```
!wget -q -O multi-goldfinch-1.jpg https://t3.ftcdn.net/jpg/01/44/64/36/500_F_144643697_GJRUBtGc55KYSMpyg1Kucb9yJzvMQooW.jpg
!wget -q -O northern-flicker-1.jpg https://upload.wikimedia.org/wikipedia/commons/5/5c/Northern_Flicker_%28Red-shafted%29.jpg
!wget -q -O northern-cardinal-1.jpg https://cdn.pixabay.com/photo/2013/03/19/04/42/bird-94957_960_720.jpg
!wget -q -O blue-jay-1.jpg https://cdn12.picryl.com/photo/2016/12/31/blue-jay-bird-feather-animals-b8ee04-1024.jpg
!wget -q -O hummingbird-1.jpg http://res.freestockphotos.biz/pictures/17/17875-hummingbird-close-up-pv.jpg
def test_model():
show_bird_prediction("hummingbird-1.jpg", object_detector.endpoint)
show_bird_prediction("blue-jay-1.jpg", object_detector.endpoint)
show_bird_prediction("multi-goldfinch-1.jpg", object_detector.endpoint)
show_bird_prediction("northern-flicker-1.jpg", object_detector.endpoint)
show_bird_prediction("northern-cardinal-1.jpg", object_detector.endpoint)
test_model()
```
# Clean up
Here we delete the SageMaker endpoint, as we will no longer be performing any inferences. This is an important step, as your account is billed for the amount of time an endpoint is running, even when it is idle.
```
sagemaker.Session().delete_endpoint(object_detector.endpoint)
```
# Improve the model
## Define Function to Flip the Images Horizontally (on the X Axis)
```
from PIL import Image
def flip_images():
print("Flipping images...")
SIZE_COLS = ["idx", "width", "height"]
IMAGE_COLS = ["image_pretty_name", "image_file_name"]
LABEL_COLS = ["image_pretty_name", "class_id"]
BBOX_COLS = ["image_pretty_name", "x_abs", "y_abs", "bbox_width", "bbox_height"]
SPLIT_COLS = ["image_pretty_name", "is_training_image"]
images_df = pd.read_csv(BASE_DIR + "images.txt", sep=" ", names=IMAGE_COLS, header=None)
image_class_labels_df = pd.read_csv(
BASE_DIR + "image_class_labels.txt", sep=" ", names=LABEL_COLS, header=None
)
bboxes_df = pd.read_csv(BASE_DIR + "bounding_boxes.txt", sep=" ", names=BBOX_COLS, header=None)
split_df = pd.read_csv(
BASE_DIR + "train_test_split.txt", sep=" ", names=SPLIT_COLS, header=None
)
NUM_ORIGINAL_IMAGES = images_df.shape[0]
rows_list = []
bbox_rows_list = []
size_rows_list = []
label_rows_list = []
split_rows_list = []
idx = 0
full_df = images_df.copy()
full_df.reset_index(inplace=True)
full_df = pd.merge(full_df, image_class_labels_df, on="image_pretty_name")
full_df = pd.merge(full_df, bboxes_df, on="image_pretty_name")
full_df = pd.merge(full_df, split_df, on="image_pretty_name")
full_df.sort_values(by=["index"], inplace=True)
if SAMPLE_ONLY:
# grab a small subset of species for testing
criteria = full_df["class_id"].isin(CLASSES)
full_df = full_df[criteria]
for rel_image_fn in full_df["image_file_name"]:
idx += 1
full_img_content = full_df[(full_df.image_file_name == rel_image_fn)]
class_id = full_img_content.iloc[0].class_id
img = Image.open(IMAGES_DIR + rel_image_fn)
width, height = img.size
new_idx = idx + NUM_ORIGINAL_IMAGES
flip_core_file_name = rel_image_fn[:-4] + "_flip.jpg"
flip_full_file_name = IMAGES_DIR + flip_core_file_name
img_flip = img.transpose(Image.FLIP_LEFT_RIGHT)
img_flip.save(flip_full_file_name)
# append a new image
dict = {"image_pretty_name": new_idx, "image_file_name": flip_core_file_name}
rows_list.append(dict)
# append a new split, use same flag for flipped image from original image
is_training_image = full_img_content.iloc[0].is_training_image
split_dict = {"image_pretty_name": new_idx, "is_training_image": is_training_image}
split_rows_list.append(split_dict)
# append a new image class label
label_dict = {"image_pretty_name": new_idx, "class_id": class_id}
label_rows_list.append(label_dict)
# add a size row for the original and the flipped image, same height and width
size_dict = {"idx": idx, "width": width, "height": height}
size_rows_list.append(size_dict)
size_dict = {"idx": new_idx, "width": width, "height": height}
size_rows_list.append(size_dict)
# append bounding box for flipped image
x_abs = full_img_content.iloc[0].x_abs
y_abs = full_img_content.iloc[0].y_abs
bbox_width = full_img_content.iloc[0].bbox_width
bbox_height = full_img_content.iloc[0].bbox_height
flipped_x_abs = width - bbox_width - x_abs
bbox_dict = {
"image_pretty_name": new_idx,
"x_abs": flipped_x_abs,
"y_abs": y_abs,
"bbox_width": bbox_width,
"bbox_height": bbox_height,
}
bbox_rows_list.append(bbox_dict)
print("Done looping through original images")
images_df = images_df.append(rows_list)
images_df[IMAGE_COLS].to_csv(IMAGE_FILE, sep=" ", index=False, header=None)
bboxes_df = bboxes_df.append(bbox_rows_list)
bboxes_df[BBOX_COLS].to_csv(BBOX_FILE, sep=" ", index=False, header=None)
split_df = split_df.append(split_rows_list)
split_df[SPLIT_COLS].to_csv(SPLIT_FILE, sep=" ", index=False, header=None)
sizes_df = pd.DataFrame(size_rows_list)
sizes_df[SIZE_COLS].to_csv(SIZE_FILE, sep=" ", index=False, header=None)
image_class_labels_df = image_class_labels_df.append(label_rows_list)
image_class_labels_df[LABEL_COLS].to_csv(LABEL_FILE, sep=" ", index=False, header=None)
print("Done saving metadata in text files")
```
## Re-train the model with the expanded dataset
```
%%time
BBOX_FILE = BASE_DIR + "bounding_boxes_with_flip.txt"
IMAGE_FILE = BASE_DIR + "images_with_flip.txt"
LABEL_FILE = BASE_DIR + "image_class_labels_with_flip.txt"
SIZE_FILE = BASE_DIR + "sizes_with_flip.txt"
SPLIT_FILE = BASE_DIR + "train_test_split_with_flip.txt"
# add a set of flipped images
flip_images()
# show the new full set of images for a species
show_species("017.Cardinal")
# create new sizes file
gen_image_size_file()
# re-create and re-deploy the RecordIO files with the updated set of images
train_df, val_df = gen_list_files()
!python tools/im2rec.py --resize $RESIZE_SIZE --pack-label birds_ssd_sample $BASE_DIR/images/
sess.upload_data(path="birds_ssd_sample_train.rec", bucket=bucket, key_prefix=train_channel)
sess.upload_data(path="birds_ssd_sample_val.rec", bucket=bucket, key_prefix=validation_channel)
# account for the new number of training images
set_hyperparameters(100, "33,67")
# re-train
od_model.fit(inputs=data_channels, logs=True)
# check out the new accuracy
plot_object_detection_log(od_model, "mAP tracking for job: " + od_model._current_job_name)
```
## Re-deploy and test
```
# host the updated model
object_detector = od_model.deploy(initial_instance_count=1, instance_type="ml.m4.xlarge")
# test the new model
test_model()
```
## Final cleanup
Here we delete the SageMaker endpoint, as we will no longer be performing any inferences. This is an important step, as your account is billed for the amount of time an endpoint is running, even when it is idle.
```
# delete the new endpoint
sagemaker.Session().delete_endpoint(object_detector.endpoint)
```
| github_jupyter |
# Test
```
import fastai.train
import pandas as pd
import torch
import torch.nn as nn
from captum.attr import LayerIntegratedGradients
# --- Model Setup ---
# Load a fast.ai `Learner` trained to predict IMDB review category `[negative, positive]`
awd = fastai.train.load_learner(".", "imdb_fastai_trained_lm_clf.pth")
awd.model[0].bptt = 200
# getting to the actual layer that holds embeddings
embedding_layer = awd.model[0]._modules["module"]._modules["encoder_dp"]
# working around the model prediction - first output only, apply softmax
forward_func = lambda x: torch.softmax(awd.model(x)[0], dim=-1)
# make integrated gradients instance
lig = LayerIntegratedGradients(forward_func, embedding_layer)
# Explainer logic
def get_attributions_for_sentence(
sentence,
awd_model=awd,
lig_instance=lig,
target=None,
lig_n_steps=200,
baseline_token="\n \n ",
):
awd = awd_model
lig = lig_instance
vocab = awd.data.x.vocab
sentence_tokens = awd.data.one_item(sentence)[0]
reversed_tokens = [vocab.itos[w] for w in sentence_tokens[0]]
baseline = (
torch.ones_like(sentence_tokens) * vocab.stoi[baseline_token]
) # see "how to choose a good baseline"
baseline[0, 0] = vocab.stoi["xxbos"] # beginning of sentence is always #1
y = awd.predict(sentence)
if target is None:
target = y[1].item()
attrs = lig.attribute(sentence_tokens, baseline, target, n_steps=lig_n_steps)
a = attrs.sum(-1)
a = a / torch.norm(a)
return (pd.Series(a.numpy()[0], index=reversed_tokens), y)
# https://www.imdb.com/review/rw5384922/?ref_=tt_urv
review_1917 = """I sat in a packed yet silent theater this morning and watched, what I believe to be, the next Academy Award winner for the Best Picture."""
"""I'm not at all a fan of war movies but I am a fan of great movies... and 1917 is a great movie. I have never been so mesmerized by set design and direction, the mass human emotion of this film is astonishingly captured and embedded magically in the audience. It keeps running through my mind...the poetry and beauty intertwined with the raw misery of war. Treat yourself... see this movie!
""";
import ipyvuetify as v
import ipywidgets as w
class Chip(v.Chip):
positive = "0, 255, 0"
negative = "255, 0, 0"
def __init__(self, word, attribution):
direction = self.positive if attribution >= 0 else self.negative
color = f"rgba({direction}, {abs(attribution):.2f})"
super().__init__(
class_="mx-0 px-1",
children=[word],
color=color,
value=attribution,
label=True,
small=True,
)
def saliency_chips(attributions: pd.Series) -> v.ChipGroup:
children = [Chip(w, a) for w, a in attributions.iteritems()]
return v.ChipGroup(column=True, children=children)
@w.interact_manual(
sentence=w.Textarea(review_1917),
target=[None, 0, 1],
baseline_token=["\n \n", ".", "<BOS>"],
)
def display_attributions(sentence="Great film", target=None, baseline_token="\n \n "):
attributions, prediction = get_attributions_for_sentence(sentence)
return saliency_chips(attributions)
```
| github_jupyter |
## 데이터 불러오기
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import matplotlib as mat
import matplotlib.font_manager as fonm
font_list = [font.name for font in fonm.fontManager.ttflist]
# for f in font_list:
# print(f"{f}.ttf")
mat.rcParams['font.family'] = 'Hancom Gothic'
def str_col(df):
col = []
for i in range(0,len(df.dtypes)):
if str(df.dtypes[i]) == 'object':
col.append(df.dtypes.index[i])
print(col)
return col
def int_col(df):
col = []
for i in range(0,len(df.dtypes)):
if str(df.dtypes[i]) != 'object':
col.append(df.dtypes.index[i])
print(col)
return col
def p_100(a, b):
print( round( (a/(a+b))*100,2), "%" )
def extraction_func(df, col_name, num_list):
temp = pd.DataFrame()
for i in num_list:
temp = pd.concat([ temp, df.loc[df[col_name] == i ] ],axis=0)
return temp
def unique_check(df):
for i in range(0,len(df.columns)):
if df[df.columns[i]].isnull().sum() > 0:
print("Impossible if there are None : ",df.columns[i])
col_1 = []
col_2 = []
for i in range(0,len(df.columns)):
if type(df[df.columns[i]][0]) == str:
col_1.append(df.columns[i])
if df[df.columns[i]].nunique() > 5:
col_2.append(df.columns[i])
print(df.columns[i],"컬럼의 unique 개수는 ",df[df.columns[i]].nunique(),"개")
return col_1, col_2
insurance = pd.read_csv('./temp_data/insurance.csv',encoding='utf-8')
print(insurance.shape)
print(insurance.dtypes)
print(insurance.isnull().sum())
insurance.tail(5)
insurance = insurance.astype({'RESI_TYPE_CODE': str,
'MINCRDT':str,
'MAXCRDT':str,
'ACCI_DVSN':str,
'DMND_RESN_CODE':str,
'CUST_ROLE':str})
```
## 데이터 복사
```
copy_insurance = insurance.copy()
```
## 비식별화 및 고유값이 많은 컬럼 삭제
- unique한 값이 많으면 인코딩이 어려움으로 해당하는 컬럼들 삭제
- 실제로 컬럼삭제를 진행하지 않은 결과 인코딩 시 차원이 60000여개로 늘어나는 문제 발생
```
col_1, col_2 = unique_check(copy_insurance)
col_2.remove('RESI_TYPE_CODE')
col_2.remove('OCCP_GRP_1')
col_2.remove('MINCRDT')
col_2.remove('MAXCRDT')
col_2.remove('DMND_RESN_CODE')
col_2.remove('CUST_ROLE')
# index를 CUST_ID로 변경
copy_insurance.set_index('CUST_ID', inplace=True)
copy_insurance.drop(col_2, axis=1, inplace=True)
```
## 데이터 파악하기
#### 변수간 상관관계 확인
```
### 필요한 모듈 불러오기
#%matplotlib inline # 시각화 결과를 Jupyter Notebook에서 바로 보기
# import matplotlib.pyplot as plt # 모듈 불러오기
### 상관계수 테이블
corr = copy_insurance.corr() # 'df'라는 데이터셋을 'corr'라는 이름의 상관계수 테이블로 저장
### 상관계수 히트맵 그리기
# 히트맵 사이즈 설정
plt.figure(figsize = (20, 15))
# 히트맵 형태 정의. 여기서는 삼각형 형태(위 쪽 삼각형에 True, 아래 삼각형에 False)
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# 히트맵 그리기
sns.heatmap(data = corr, # 'corr' = 상관계수 테이블
annot = True, # 히트맵에 값 표시
mask=mask, # 히트맵 형태. 여기서는 위에서 정의한 삼각형 형태
fmt = '.2f', # 값 표시 방식. 소숫점 2번째자리까지
linewidths = 1., # 경계면 실선 구분 여부
cmap = 'RdYlBu_r') # 사용할 색 지정 ('python colormap 검색')
plt.title('상관계수 히트맵')
plt.show()
```
##### 연관성이 높은 컬럼 제거
```
copy_insurance = copy_insurance[copy_insurance.columns.difference(['LTBN_CHLD_AGE','JPBASE_HSHD_INCM'])]
```
#### 데이터가 정규분포를 이루는지 확인하기
- 최소 최대 정규화: 모든 feature들의 스케일이 동일하지만, 이상치(outlier)를 잘 처리하지 못한다. (X - MIN) / (MAX-MIN)
- Z-점수 정규화(표준화) : 이상치(outlier)를 잘 처리하지만, 정확히 동일한 척도로 정규화 된 데이터를 생성하지는 않는다. (X - 평균) / 표준편차
```
plot_target = int_col(copy_insurance)
import scipy.stats as stats
for i in plot_target:
print(i,"의 가우시안 분포 확인")
fig = plt.figure(figsize=(15,3))
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
stats.probplot(copy_insurance[i], dist=stats.norm,plot=ax1)
mu = copy_insurance[i].mean()
variance = copy_insurance[i].var()
sigma = variance ** 0.5
x=np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
ax2.plot(x, stats.norm.pdf(x,mu,sigma), color="blue",label="theoretical")
sns.distplot(ax=ax2, a=copy_insurance[i], bins=100, color="red", label="observed")
ax2.legend()
plt.show()
print()
```
#### stats.kstest으로 가설검증하기
- 귀무가설은 '정규분포를 따른다' 이다.
```
for i in plot_target:
print(i,"귀무가설의 기각 여부 확인")
test_state, p_val = stats.kstest(copy_insurance[i],'norm',args=(copy_insurance[i].mean(), copy_insurance[i].var()**0.5) )
print("Test-statistics : {:.5f}, p-value : {:.5f}".format(test_state, p_val))
print()
```
##### AGE를 제외한 모든 컬럼이 정규분포를 따르지 않으므로 MinMaxScaler를 이용해 정규화 적용
```
from sklearn.preprocessing import MinMaxScaler
int_data = copy_insurance[plot_target]
# 인덱스 빼두기
index = int_data.index
# MinMaxcaler 객체 생성
scaler = MinMaxScaler()
# MinMaxcaler로 데이터 셋 변환 .fit( ) 과 .transform( ) 호출
scaler.fit(int_data)
data_scaled = scaler.transform(int_data)
# int_data.loc[:,:] = data_scaled
# transform( )시 scale 변환된 데이터 셋이 numpy ndarry로 반환되어 이를 DataFrame으로 변환
data_scaled = pd.DataFrame(data=data_scaled, columns=int_data.columns, index=index)
print('feature 들의 정규화 최소 값')
print(data_scaled.min())
print('\nfeature 들의 정규화 최대 값')
print(data_scaled.max())
```
##### label컬럼을 제외한 나머지 카테고리 데이터들은 원핫 인코딩을 진행
```
onehot_target = str_col(copy_insurance)
onehot_target.remove('SIU_CUST_YN')
str_data = copy_insurance[onehot_target]
onehot_data = pd.get_dummies(str_data)
```
#### 인코딩과 스케일링 데이터, 라벨을 합쳐서 저장
```
concat_data = pd.concat([data_scaled, onehot_data, copy_insurance['SIU_CUST_YN']], axis=1)
concat_data.to_csv('./temp_data/save_scaled_insurance.csv',index = True)
```
| github_jupyter |
# Repertoire classification subsampling
When training a classifier to assign repertoires to the subject from which they were obtained, we need a set of subsampled sequences. The sequences have been condensed to just the V- and J-gene assignments and the CDR3 length (VJ-CDR3len). Subsample sizes range from 10 to 10,000 sequences per biological replicate.
The [`abutils`](https://www.github.com/briney/abutils) Python package is required for this notebook, and can be installed by running `pip install abutils`.
*NOTE: this notebook requires the use of the Unix command line tool `shuf`. Thus, it requires a Unix-based operating system to run correctly (MacOS and most flavors of Linux should be fine). Running this notebook on Windows 10 may be possible using the [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/about) but we have not tested this.*
```
from __future__ import print_function, division
from collections import Counter
import os
import subprocess as sp
import sys
import tempfile
from abutils.utils.pipeline import make_dir
```
## Subjects, subsample sizes, and directories
The `input_dir` should contain deduplicated clonotype sequences. The datafiles are too large to be included in the Github repository, but may be downloaded [**here**](http://burtonlab.s3.amazonaws.com/GRP_github_data/techrep-merged_vj-cdr3len_no-header.tar.gz). If downloading the data (which will be downloaded as a compressed archive), decompress the archive in the `data` directory (in the same parent directory as this notebook) and you should be ready to go. If you want to store the downloaded data in some other location, adjust the `input_dir` path below as needed.
By default, subsample sizes increase by 10 from 10 to 100, by 100 from 100 to 1,000, and by 1,000 from 1,000 to 10,000.
```
with open('./data/subjects.txt') as f:
subjects = sorted(f.read().split())
subsample_sizes = list(range(10, 100, 10)) + list(range(100, 1000, 100)) + list(range(1000, 11000, 1000))
input_dir = './data/techrep-merged_vj-cdr3len_no-header/'
subsample_dir = './data/repertoire_classification/user-created_subsamples_vj-cdr3len'
make_dir(subsample_dir)
```
## Subsampling
```
def subsample(infile, outfile, n_seqs, iterations):
with open(outfile, 'w') as f:
f.write('')
shuf_cmd = 'shuf -n {} {}'.format(n_seqs, infile)
p = sp.Popen(shuf_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
with open(outfile, 'a') as f:
for iteration in range(iterations):
seqs = ['_'.join(s.strip().split()) for s in stdout.strip().split('\n') if s.strip()]
counts = Counter(seqs)
count_strings = []
for k, v in counts.items():
count_strings.append('{}:{}'.format(k, v))
f.write(','.join(count_strings) + '\n')
for subject in subjects:
print(subject)
files = list_files(os.path.join(input_dir, subject))
for file_ in files:
for subsample_size in subsample_sizes:
num = os.path.basename(file_).split('_')[0]
ofile = os.path.join(subsample_dir, '{}_{}-{}'.format(subject, subsample_size, num))
subsample(file_, ofile, subsample_size, 50)
```
| github_jupyter |
# Strata objects: Legend and Column
Strata is stratigraphic data.
The main object of `strata` submodule is `mplStrater.strata.Column` which represents the single stratigraphic column.
This example shows the structure of the class and how to use it.
First, import all required packages and load the example dataset.
```
%load_ext autoreload
%autoreload 2
from mplStrater.data import StrataFrame
from mplStrater.strata import Column,Legend
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv("../../../data/example.csv")
df.head()
```
Then, initiate a `mpl.StrataFrame` providing a `pandas.DataFrame` and specifying its `epsg` code.
```
sf=StrataFrame(
df=df,
epsg=32633)
```
## Define a `Legend`.
This is done providing a dictionary containing pairs of (value-specification) the `fill_dict` parameter and for the `hatch_fill` parameter.
The dictionary matches dataframe `fill` and `hatch` column values to either a *matplotlib encoded color* or *encoded hatch* string.
The example uses the following dictionaries.
```
fill_dict={
'Terreno conforme': 'lightgreen',
'Riporto conforme': 'darkgreen',
'Riporto non conforme': 'orange',
'Rifiuto': 'red',
'Assenza campione': 'white'
}
hatch_dict={
'Non pericoloso': '',
'Pericoloso': 'xxxxxxxxx',
'_': ''
}
l=Legend(
fill_dict=fill_dict,
hatch_dict=hatch_dict
)
```
## Plot stand-alone `Column` objects
Imagine we would need to inspect closely a column. It's not sure that we would be able to clearly do it on the map with all other elements (labels, basemap...). Unless exporting the map in pdf with a high resolution, open the local file... would take sooo long! Therefore `Column` object has its own `plot()` method.
Let's plot the first three columns of the strataframe.
```
sf.strataframe[:3]
```
Plot the first three columns contained in the `StrataFrame`.
```
#create figure
f,axes=plt.subplots(1,4,figsize=(5,3),dpi=200,frameon=False)
for ax,i in zip(axes,range(4)):
ax.axis('off')
#instantiate class
c=Column(
#figure
ax,l,
#id
sf.strataframe.loc[i,"ID"],
#coords
(0.9,0.9),
#scale
sf.strataframe.loc[i,"scale"],
3,
#stratigraphic data
sf.strataframe.loc[i,"layers"],
sf.strataframe.loc[i,"fill_list"],
sf.strataframe.loc[i,"hatch_list"],
#labels
sf.strataframe.loc[i,"lbl1_list"],
sf.strataframe.loc[i,"lbl2_list"],
sf.strataframe.loc[i,"lbl3_list"])
ax.set_title(c.id)
c.fill_column()
c.set_inset_params()
c.label_column(hardcoding=None)
```
| github_jupyter |
Sometimes it is useful to take a random choice between two or more options.
Numpy has a function for that, called `random.choice`:
```
import numpy as np
```
Say we want to choose randomly between 0 and 1. We want an equal probability of getting 0 and getting 1. We could do it like this:
```
np.random.randint(0, 2)
```
If we do that lots of times, we see that we have a roughly 50% chance of getting 0 (and therefore, a roughly 50% chance of getting 1).
```
# Make 10000 random numbers that can be 0 or 1, with equal probability.
lots_of_0_1 = np.random.randint(0, 2, size=10000)
# Count the proportion that are 1.
np.count_nonzero(lots_of_0_1) / 10000
```
Run the cell above a few times to confirm you get numbers very close to 0.5.
Another way of doing this is to use `np.random.choice`.
As usual, check the arguments that the function expects with `np.random.choice?` in a notebook cell.
The first argument is a sequence, like a list, with the options that Numpy should chose from.
For example, we can ask Numpy to choose randomly from the list `[0, 1]`:
```
np.random.choice([0, 1])
```
A second `size` argument to the function says how many items to choose:
```
# Ten numbers, where each has a 50% chance of 0 and 50% chance of 1.
np.random.choice([0, 1], size=10)
```
By default, Numpy will chose each item in the sequence with equal probability, In this case, Numpy will chose 0 with 50% probability, and 1 with 50% probability:
```
# Use choice to make another 10000 random numbers that can be 0 or 1,
# with equal probability.
more_0_1 = np.random.choice([0, 1], size=10000)
# Count the proportion that are 1.
np.count_nonzero(more_0_1) / 10000
```
If you want, you can change these proportions with the `p` argument:
```
# Use choice to make another 10000 random numbers that can be 0 or 1,
# where 0 has probability 0.25, and 1 has probability 0.75.
weighted_0_1 = np.random.choice([0, 1], size=10000, p=[0.25, 0.75])
# Count the proportion that are 1.
np.count_nonzero(weighted_0_1) / 10000
```
There can be more than two choices:
```
# Use choice to make another 10000 random numbers that can be 0 or 10 or 20, or
# 30, where each has probability 0.25.
multi_nos = np.random.choice([0, 10, 20, 30], size=10000)
multi_nos[:10]
np.count_nonzero(multi_nos == 30) / 10000
```
The choices don't have to be numbers:
```
np.random.choice(['Heads', 'Tails'], size=10)
```
You can also do choices *without replacement*, so once you have chosen an element, all subsequent choices cannot chose that element again. For example, this *must* return all the elements from the choices, but in random order:
```
np.random.choice([0, 10, 20, 30], size=4, replace=False)
```
| github_jupyter |
```
package_jar = '../target/spark-data-repair-plugin_2.12_spark3.2_0.1.0-EXPERIMENTAL-with-dependencies.jar'
import numpy as np
import pandas as pd
from pyspark.sql import *
from pyspark.sql.types import *
from pyspark.sql import functions as f
spark = SparkSession.builder \
.config('spark.jars', package_jar) \
.config('spark.deriver.memory', '8g') \
.enableHiveSupport() \
.getOrCreate()
# Suppresses user warinig messages in Python
import warnings
warnings.simplefilter("ignore", UserWarning)
# Suppresses `WARN` messages in JVM
spark.sparkContext.setLogLevel("ERROR")
from repair.api import Scavenger
Scavenger().version()
spark.read.option("header", True).csv("../testdata/adult.csv").createOrReplaceTempView("adult")
spark.table('adult').printSchema()
import altair as alt
charts = []
pdf = spark.table('adult').toPandas()
for c in [c for c in pdf.columns if c != 'tid']:
charts.append(alt.Chart(pdf).mark_bar().encode(x=alt.X(c), y=alt.Y('count()', axis=alt.Axis(title='freq'))).properties(width=300, height=300))
alt.hconcat(*charts)
from repair.detectors import NullErrorDetector, ConstraintErrorDetector
error_detectors = [
ConstraintErrorDetector(constraint_path="../testdata/adult_constraints.txt"),
NullErrorDetector()
]
from repair.model import RepairModel
model = RepairModel().setTableName('adult').setRowId('tid')
noisy_cells_df, noisy_columns = model.setErrorDetectors(error_detectors)._detect_errors('adult', 8, 20)
import altair as alt
pdf = noisy_cells_df.toPandas()
alt.Chart(pdf).mark_bar().encode(x=alt.X('attribute'), y=alt.Y('count()', axis=alt.Axis(title='freq'))).properties(width=400, height=400)
discretized_table, discretized_columns, distinct_stats = model._discretize_attrs('adult')
discretized_columns
target_columns = list(filter(lambda c: c in discretized_columns, noisy_columns))
target_columns
cell_domain, pairwise_stats = model._analyze_error_cell_domain(noisy_cells_df, discretized_table, [], target_columns, discretized_columns, 20)
import altair as alt
charts = []
for target, cols in pairwise_stats.items():
pdf = pd.DataFrame(cols, columns=[target, 'cor'])
pdf['cor'] = pdf['cor'].astype('float')
charts.append(alt.Chart(pdf).mark_bar().encode(x=alt.X(target), y=alt.Y('cor')).properties(width=200, height=200))
alt.hconcat(*charts)
error_cells_df, weak_labeled_cells_df_opt = model._extract_error_cells(noisy_cells_df, cell_domain, 20, 8)
repair_base_df = model._prepare_repair_base_cells('adult', noisy_cells_df, target_columns, 20, 8)
repair_base_df = model._repair_attrs(weak_labeled_cells_df_opt, repair_base_df)
import altair as alt
charts = []
pdf = repair_base_df.toPandas()
for c in [c for c in pdf.columns if c != 'tid']:
charts.append(alt.Chart(pdf).mark_bar().encode(x=alt.X(c), y=alt.Y('count()', axis=alt.Axis(title='freq'))).properties(width=300, height=300))
alt.hconcat(*charts)
target = 'Sex'
pdf = repair_base_df.toPandas()
pdf = pdf.dropna()
X = pdf.drop(['tid', target], axis=1).reset_index(drop=True)
y = pdf[target].reset_index(drop=True)
import category_encoders as ce
se = ce.OrdinalEncoder(handle_unknown='impute')
X = se.fit_transform(X)
X
import altair as alt
pdf = pd.concat([X, y], axis=1)
alt.Chart(pdf).mark_circle().encode(
alt.X(alt.repeat("column"), type='quantitative'),
alt.Y(alt.repeat("row"), type='quantitative'),
color=f'{target}:N'
).properties(width=200, height=200).repeat(row=X.columns.tolist(), column=X.columns.tolist())
# One of non-linear embedding in sklearn
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
_X = tsne.fit_transform(X)
tsne.kl_divergence_
import altair as alt
_X = pd.DataFrame({'tSNE-X': _X[:, 0], 'tSNE-Y': _X[:, 1], target: y})
alt.Chart(_X).mark_point().encode(x='tSNE-X', y='tSNE-Y', color=f'{target}:N').properties(width=600, height=400).interactive()
from sklearn.ensemble import RandomForestClassifier
from boruta import BorutaPy
rf = RandomForestClassifier(n_jobs=-1, max_depth=5)
rf.fit(X, y)
print('SCORE with ALL Features: %1.2f\n' % rf.score(X, y))
rf = RandomForestClassifier(n_jobs=-1, max_depth=5)
fs = BorutaPy(rf, n_estimators='auto', random_state=0)
fs.fit(X.values, y.values)
selected = fs.support_
print('Selected Features: %s' % ','.join(X.columns[selected]))
X_selected = X[X.columns[selected]]
rf = RandomForestClassifier(n_jobs=-1, max_depth=5)
rf.fit(X_selected, y)
print('SCORE with selected Features: %1.2f' % rf.score(X_selected, y))
```
| github_jupyter |
# Capsule Network
In this notebook i will try to explain and implement Capsule Network. MNIST images will be used as an input.
To implement capsule Network, we need to understand what are capsules first and what advantages do they have compared to convolutional neural network.
### so what are capsules?
* Briefly explaining it, capsules are small group of neurons where each neuron in a capsule represents various properties of a particular image part.
* Capsules represent relationships between parts of a whole object by using **dynamic routing** to weight the connections between one layer of capsules and the next and creating strong connections between spatially-related object parts, will be discussed later.
* The output of each capsule is a vector, this vector has a magnitude and orientation.
* Magnitude : It is an indicates if that particular part of image is present or not. Basically we can summerize it as the probability of the part existance (It has to be between 0 and 1).
* Oriantation : It changes if one of the properties of that particular image has changed.
Let us have an example to understand it more and make it clear.
As shown in the following image, capsules will detect a cat's face. As shown in the image the capsule consists of neurals with properties like the position,color,width and etc.. .Then we get a vector output with magnitude 0.9 which means we have 90% confidence that this is a cat face and we will get an orientation as well.

(image from : https://cezannec.github.io/Capsule_Networks/)
But what if we have changed in these properties like we have flipped the cat's face,what will happen ? will it detect the cat face?
Yes it still will detect the cat's face with 90% confidance(with magnitude 0.9) but there will be a change in the oriantation(theta)to indicate a change in the properties.

(image from: https://cezannec.github.io/Capsule_Networks/ )
### What advantages does it have compared to Convolutional Neural Network(CNN)?
* CNN is looking for key features regadless their position. As shown in the following image, CNN will detect the left image as a face while capsule network will not detect them as it will check if they are in the correct postition or not.

(image from:https://kndrck.co/posts/capsule_networks_explained/)
* Capsules network is more rubust to affine transformations in data. if translation or rotation is done on test data, atrained Capsule network will preform better and will give higher accuracy than normal CNN.
# Model Architecture
The capsule network is consisting of two main parts:
* A convolutional encoder.
* A fully connected, linear decoder.

(image from :[Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf) )
In this Explantaion and implementation i will follow the architecture from [Hinton paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf)
# 1)Encoder
The ecnoder consists of three main layers as shown in the following image and the input layer which is from MNIST which has a dimension of 28 x28
please notice the difference between this image and the previous image where the last layer is the decoder in the pravious image.

## A)The convolutional layer
So in Hinton's paper they have applied a kernel of size 9x9 to the input layer. This kernel has a depth of 256,stride =1 and padding = 0.This will give us an output of a dimenstion 20x20.
**Note** :
you can calculate the output dimenstion by this eqaution, output = [(w-k+2p)/s]+1 , where:
- w is the input size
- k is the kernel size
- p is padding
- s is stride
So to clarify this more:
- The input's dimension is (28,28,1) where the 28x28 is the input size and 1 is the number of channels.
- Kernel's dimention is (9,9,1,256) where 9x9 is the kernel size ,1 is the number of channels and 256 is the depth of the kernel .
- The output's dimension is (20,20,256) where 20x20 is the ouptut size and 256 is the stack of filtered images.
I think we are ready to start implementing the code now, so let us start by obtaining the MNIST data and create our DataLoaders for training and testing purposes.
```
# import resources
import numpy as np
import torch
# random seed (for reproducibility)
seed = 1
# set random seed for numpy
np.random.seed(seed)
# set random seed for pytorch
torch.manual_seed(seed)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to Tensors
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=batch_size,
num_workers=num_workers)
```
The nexts step is to create the convolutional layer as we explained:
```
import torch.nn as nn
import torch.nn.functional as F
class ConvLayer(nn.Module):
def __init__(self, in_channels=1, out_channels=256):
'''Constructs the ConvLayer with a specified input and output size.
These sizes has initial values from the paper.
param input_channel: input depth of an image, default value = 1
param output_channel: output depth of the convolutional layer, default value = 256
'''
super(ConvLayer, self).__init__()
# defining a convolutional layer of the specified size
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=9, stride=1, padding=0)
def forward(self, x):
# applying a ReLu activation to the outputs of the conv layer
output = F.relu(self.conv(x)) # we will have dimensions (batch_size, 20, 20, 256)
return output
```
## B)Primary capsules
This layer is tricky but i will try to simplify it as much as i can.
We would like to convolute the first layer to a new layer with 8 primary capsules.
To do so we will follow Hinton's paper steps:
- First step is to convolute our first Convolutional layer which has a dimension of (20 ,20 ,256) with a kernel of dimension(9,9,256,256) in which 9 is the kernel size,first 256 is the number of chanels from the first layer and the second 256 is the number of filters or the depth of the kernel.We will get an output with a dimension of (6,6,256) .
- second step is to reshape this output to (6,6,8,32) where 8 is the number of capsules and 32 is the depth of each capsule .
- Now the output of each capsule will have a dimension of (6,6,32) and we will reshape it to (32x32x6,1) = (1152,1) for each capsule.
- Final step we will squash the output to have a magnitute between 0 and 1 as we have discussed earlier using the following equation :

where Vj is the normalized output vector of capsule j, Sj is the total inputs of each capsule (which is the sum of weights over all the output vectors from the capsules in the layer below capsule).
We will use ModuleList container to loop on each capsule we have.
```
class PrimaryCaps(nn.Module):
def __init__(self, num_capsules=8, in_channels=256, out_channels=32):
'''Constructs a list of convolutional layers to be used in
creating capsule output vectors.
param num_capsules: number of capsules to create
param in_channels: input depth of features, default value = 256
param out_channels: output depth of the convolutional layers, default value = 32
'''
super(PrimaryCaps, self).__init__()
# creating a list of convolutional layers for each capsule I want to create
# all capsules have a conv layer with the same parameters
self.capsules = nn.ModuleList([
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=9, stride=2, padding=0)
for _ in range(num_capsules)])
def forward(self, x):
'''Defines the feedforward behavior.
param x: the input; features from a convolutional layer
return: a set of normalized, capsule output vectors
'''
# get batch size of inputs
batch_size = x.size(0)
# reshape convolutional layer outputs to be (batch_size, vector_dim=1152, 1)
u = [capsule(x).view(batch_size, 32 * 6 * 6, 1) for capsule in self.capsules]
# stack up output vectors, u, one for each capsule
u = torch.cat(u, dim=-1)
# squashing the stack of vectors
u_squash = self.squash(u)
return u_squash
def squash(self, input_tensor):
'''Squashes an input Tensor so it has a magnitude between 0-1.
param input_tensor: a stack of capsule inputs, s_j
return: a stack of normalized, capsule output vectors, v_j
'''
squared_norm = (input_tensor ** 2).sum(dim=-1, keepdim=True)
scale = squared_norm / (1 + squared_norm) # normalization coeff
output_tensor = scale * input_tensor / torch.sqrt(squared_norm)
return output_tensor
```
## c)Digit capsules
As we have 10 digit classes from 0 to 9, this layer will have 10 capsules each capsule is for one digit.
Each capsule takes an input of a batch of 1152 dimensional vector while the output is a ten 16 dimnsional vector.
### Dynamic Routing
Dynamic routing is used to find the best matching between the best connections between the child layer and the possible parent.Main companents of the dynamic routing is the capsule routing.
To make it easier we can think of the capsule routing as it is backprobagation.we can use it to obtain the probability that a certain capsule’s output should go to a parent capsule in the next layer.
As shown in the following figure The first child capsule is connected to $s_{1}$ which is the fist possible parent capsule and to $s_{2}$ which is the second possible parent capsule.In the begining the coupling will have equal values like both of them are zeros then we start apply dynamic routing to adjust it.We will find for example that coupling coffecient connected with $s_{1}$ is 0.9 and coupling coffecient connected with $s_{2}$ is 0.1, that means the probability that first child capsule’s output should go to a parent capsule in the next layer.

**Notes**
- Across all connections between one child capsule and all possible parent capsules, the coupling coefficients should sum to 1.This means That $c_{11}$ + $c_{12}$ = 1
- As shown in the following figure $s_{1}$ is the total inputs of each capsule (which is the sum of weights over all the output vectors from the capsules in the layer below capsule).
- To check the similarity between the total inputs $s_{1}$ and each vector we will calculate the dot product between both of them, in this example we will find that $s_{1}$ is more similar to $u_{1}$ than $u_{2}$ or $u_{3}$ , This similarity called (agreement)

### Dynamic Routing Algorithm
The followin algorithm is from [Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf)

we can simply explain the algorithm as folowing :
- First we initialize the initial logits $b_{ij}$ of the softmax function with zero
- calculate the capsule coefficiant using the softmax equation.
$$c_{ij} = \frac{e^{\ b_{ij}}}{\sum_{k}\ {e^{\ b_{ik}}}} $$
- calculate the total capsule inputs $s_{1}$ .
**Note**
''
- $ s_j = \sum{c_{ij} \ \hat{u}}$
- $ \hat{u} = Wu $ where W is the weight matrix and u is the input vector
''
- squash to get a normalized vector output $v_{j}$
- last step is composed of two steps, we will calculate agreement and the new $b_{ij}$ .The similarity (agremeent) is that we have discussed before,which is the cross product between prediction vector $\hat{u}$ and parent capsule's output vector $s_{1}$ . The second step is to update $b_{ij}$ .
$$\hat{u} = W u $$$$a = v \cdot u $$$$b_{ij} = b_{ij} + a $$
```
def softmax(input_tensor, dim=1): # to get transpose softmax function # for multiplication reason s_J
# transpose input
transposed_input = input_tensor.transpose(dim, len(input_tensor.size()) - 1)
# calculate softmax
softmaxed_output = F.softmax(transposed_input.contiguous().view(-1, transposed_input.size(-1)), dim=-1)
# un-transpose result
return softmaxed_output.view(*transposed_input.size()).transpose(dim, len(input_tensor.size()) - 1)
# dynamic routing
def dynamic_routing(b_ij, u_hat, squash, routing_iterations=3):
'''Performs dynamic routing between two capsule layers.
param b_ij: initial log probabilities that capsule i should be coupled to capsule j
param u_hat: input, weighted capsule vectors, W u
param squash: given, normalizing squash function
param routing_iterations: number of times to update coupling coefficients
return: v_j, output capsule vectors
'''
# update b_ij, c_ij for number of routing iterations
for iteration in range(routing_iterations):
# softmax calculation of coupling coefficients, c_ij
c_ij = softmax(b_ij, dim=2)
# calculating total capsule inputs, s_j = sum(c_ij*u_hat)
s_j = (c_ij * u_hat).sum(dim=2, keepdim=True)
# squashing to get a normalized vector output, v_j
v_j = squash(s_j)
# if not on the last iteration, calculate agreement and new b_ij
if iteration < routing_iterations - 1:
# agreement
a_ij = (u_hat * v_j).sum(dim=-1, keepdim=True)
# new b_ij
b_ij = b_ij + a_ij
return v_j # return latest v_j
```
After implementing the dynamic routing we are ready to implement the Digitcaps class,which consisits of :
- This layer is composed of 10 "digit" capsules, one for each of our digit classes 0-9.
- Each capsule takes, as input, a batch of 1152-dimensional vectors produced by our 8 primary capsules, above.
- Each of these 10 capsules is responsible for producing a 16-dimensional output vector.
- we will inizialize the weights matrix randomly.
```
# it will also be relevant, in this model, to see if I can train on gpu
TRAIN_ON_GPU = torch.cuda.is_available()
if(TRAIN_ON_GPU):
print('Training on GPU!')
else:
print('Only CPU available')
class DigitCaps(nn.Module):
def __init__(self, num_capsules=10, previous_layer_nodes=32*6*6,
in_channels=8, out_channels=16):
'''Constructs an initial weight matrix, W, and sets class variables.
param num_capsules: number of capsules to create
param previous_layer_nodes: dimension of input capsule vector, default value = 1152
param in_channels: number of capsules in previous layer, default value = 8
param out_channels: dimensions of output capsule vector, default value = 16
'''
super(DigitCaps, self).__init__()
# setting class variables
self.num_capsules = num_capsules
self.previous_layer_nodes = previous_layer_nodes # vector input (dim=1152)
self.in_channels = in_channels # previous layer's number of capsules
# starting out with a randomly initialized weight matrix, W
# these will be the weights connecting the PrimaryCaps and DigitCaps layers
self.W = nn.Parameter(torch.randn(num_capsules, previous_layer_nodes,
in_channels, out_channels))
def forward(self, u):
'''Defines the feedforward behavior.
param u: the input; vectors from the previous PrimaryCaps layer
return: a set of normalized, capsule output vectors
'''
# adding batch_size dims and stacking all u vectors
u = u[None, :, :, None, :]
# 4D weight matrix
W = self.W[:, None, :, :, :]
# calculating u_hat = W*u
u_hat = torch.matmul(u, W)
# getting the correct size of b_ij
# setting them all to 0, initially
b_ij = torch.zeros(*u_hat.size())
# moving b_ij to GPU, if available
if TRAIN_ON_GPU:
b_ij = b_ij.cuda()
# update coupling coefficients and calculate v_j
v_j = dynamic_routing(b_ij, u_hat, self.squash, routing_iterations=3)
return v_j # return final vector outputs
def squash(self, input_tensor):
'''Squashes an input Tensor so it has a magnitude between 0-1.
param input_tensor: a stack of capsule inputs, s_j
return: a stack of normalized, capsule output vectors, v_j
'''
# same squash function as before
squared_norm = (input_tensor ** 2).sum(dim=-1, keepdim=True)
scale = squared_norm / (1 + squared_norm) # normalization coeff
output_tensor = scale * input_tensor / torch.sqrt(squared_norm)
return output_tensor
```
# 2)Decoder
As shown in the following figure from [Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf), The decoder is made of three fully-connected, linear layers. The first layer sees the 10, 16-dimensional output vectors from the digit capsule layer and produces hidden_dim=512 number of outputs. The next hidden layer = 1024 , and the third and final linear layer produces an output of 784 values which is a 28x28 image!

```
class Decoder(nn.Module):
def __init__(self, input_vector_length=16, input_capsules=10, hidden_dim=512):
'''Constructs an series of linear layers + activations.
param input_vector_length: dimension of input capsule vector, default value = 16
param input_capsules: number of capsules in previous layer, default value = 10
param hidden_dim: dimensions of hidden layers, default value = 512
'''
super(Decoder, self).__init__()
# calculate input_dim
input_dim = input_vector_length * input_capsules
# define linear layers + activations
self.linear_layers = nn.Sequential(
nn.Linear(input_dim, hidden_dim), # first hidden layer
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim*2), # second, twice as deep
nn.ReLU(inplace=True),
nn.Linear(hidden_dim*2, 28*28), # can be reshaped into 28*28 image
nn.Sigmoid() # sigmoid activation to get output pixel values in a range from 0-1
)
def forward(self, x):
'''Defines the feedforward behavior.
param x: the input; vectors from the previous DigitCaps layer
return: two things, reconstructed images and the class scores, y
'''
classes = (x ** 2).sum(dim=-1) ** 0.5
classes = F.softmax(classes, dim=-1)
# find the capsule with the maximum vector length
# here, vector length indicates the probability of a class' existence
_, max_length_indices = classes.max(dim=1)
# create a sparse class matrix
sparse_matrix = torch.eye(10) # 10 is the number of classes
if TRAIN_ON_GPU:
sparse_matrix = sparse_matrix.cuda()
# get the class scores from the "correct" capsule
y = sparse_matrix.index_select(dim=0, index=max_length_indices.data)
# create reconstructed pixels
x = x * y[:, :, None]
# flatten image into a vector shape (batch_size, vector_dim)
flattened_x = x.contiguous().view(x.size(0), -1)
# create reconstructed image vectors
reconstructions = self.linear_layers(flattened_x)
# return reconstructions and the class scores, y
return reconstructions, y
```
Now let us collect all these layers (classes that we have created i.e ConvLayer,PrimaryCaps,DigitCaps,Decoder) in one class called CapsuleNetwork.
```
class CapsuleNetwork(nn.Module):
def __init__(self):
'''Constructs a complete Capsule Network.'''
super(CapsuleNetwork, self).__init__()
self.conv_layer = ConvLayer()
self.primary_capsules = PrimaryCaps()
self.digit_capsules = DigitCaps()
self.decoder = Decoder()
def forward(self, images):
'''Defines the feedforward behavior.
param images: the original MNIST image input data
return: output of DigitCaps layer, reconstructed images, class scores
'''
primary_caps_output = self.primary_capsules(self.conv_layer(images))
caps_output = self.digit_capsules(primary_caps_output).squeeze().transpose(0,1)
reconstructions, y = self.decoder(caps_output)
return caps_output, reconstructions, y
```
Let us now instantiate the model and print it.
```
# instantiate and print net
capsule_net = CapsuleNetwork()
print(capsule_net)
# move model to GPU, if available
if TRAIN_ON_GPU:
capsule_net = capsule_net.cuda()
```
# Loss
The loss for a capsule network is a weighted combination of two losses:
1. Reconstraction loss
2. Margin loss
### Reconstraction Loss
- It checks how the reconstracted image which we get from the decoder diferent from the original input image.
- It is calculated using mean squared error which is nn.MSELoss in pytorch.
- In [Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf) they have weighted reconstraction loss with a coefficient of 0.0005, so it wouldn't overpower margin loss.
### Margin Loss
```
from IPython.display import Image
Image(filename='images/margin_loss.png')
```
Margin Loss is a classification loss (we can think of it as cross entropy) which is based on the length of the output vectors coming from the DigitCaps layer.
so let us try to elaborate it more on our example.Let us say we have an output vector called (x) coming from the digitcap layer, this ouput vector represents a certain digit from 0 to 9 as we are using MNIST. Then we will square the length(take the square root of the squared value) of the corresponding output vector of that digit capsule $v_k = \sqrt{x^2}$ . The right capsule should have an output vector of greater than or equal 0.9 ($v_k >=0.9$) value while other capsules should output of smaller than or eqaul 0.1( $v_k<=0.1$ ).
So, if we have an input image of a 0, then the "correct," zero-detecting, digit capsule should output a vector of magnitude 0.9 or greater! For all the other digits (1-9, in this example) the corresponding digit capsule output vectors should have a magnitude that is 0.1 or less.
The following function is used to calculate the margin loss as it sums both sides of the 0.9 and 0.1 and k is the digit capsule.
where($T_k = 1 $) if a digit of class k is present
and $m^{+}$ = 0.9 and $m^{-}$ = 0.1. The λ down-weighting
of the loss for absent digit classes stops the initial learning from shrinking the lengths of the activity vectors of all the digit capsules. In the paper they have choosen λ = 0.5.
**Note** :
The total loss is simply the sum of the losses of all digit capsules.
```
class CapsuleLoss(nn.Module):
def __init__(self):
'''Constructs a CapsuleLoss module.'''
super(CapsuleLoss, self).__init__()
self.reconstruction_loss = nn.MSELoss(reduction='sum') # cumulative loss, equiv to size_average=False
def forward(self, x, labels, images, reconstructions):
'''Defines how the loss compares inputs.
param x: digit capsule outputs
param labels:
param images: the original MNIST image input data
param reconstructions: reconstructed MNIST image data
return: weighted margin and reconstruction loss, averaged over a batch
'''
batch_size = x.size(0)
## calculate the margin loss ##
# get magnitude of digit capsule vectors, v_c
v_c = torch.sqrt((x**2).sum(dim=2, keepdim=True))
# calculate "correct" and incorrect loss
left = F.relu(0.9 - v_c).view(batch_size, -1)
right = F.relu(v_c - 0.1).view(batch_size, -1)
# sum the losses, with a lambda = 0.5
margin_loss = labels * left + 0.5 * (1. - labels) * right
margin_loss = margin_loss.sum()
## calculate the reconstruction loss ##
images = images.view(reconstructions.size()[0], -1)
reconstruction_loss = self.reconstruction_loss(reconstructions, images)
# return a weighted, summed loss, averaged over a batch size
return (margin_loss + 0.0005 * reconstruction_loss) / images.size(0)
```
Now we have to call the custom loss class we have implemented and we will use Adam optimizer as in the paper.
```
import torch.optim as optim
# custom loss
criterion = CapsuleLoss()
# Adam optimizer with default params
optimizer = optim.Adam(capsule_net.parameters())
```
# Train the network
So the normal steps to do the training from a batch of data:
1. Clear the gradients of all optimized variables, by making them zero.
2. Forward pass: compute predicted outputs by passing inputs to the model
3. Calculate the loss .
4. Backward pass: compute gradient of the loss with respect to model parameters
5. Perform a single optimization step (parameter update)
6. Update average training loss
```
def train(capsule_net, criterion, optimizer,
n_epochs, print_every=300):
'''Trains a capsule network and prints out training batch loss statistics.
Saves model parameters if *validation* loss has decreased.
param capsule_net: trained capsule network
param criterion: capsule loss function
param optimizer: optimizer for updating network weights
param n_epochs: number of epochs to train for
param print_every: batches to print and save training loss, default = 100
return: list of recorded training losses
'''
# track training loss over time
losses = []
# one epoch = one pass over all training data
for epoch in range(1, n_epochs+1):
# initialize training loss
train_loss = 0.0
capsule_net.train() # set to train mode
# get batches of training image data and targets
for batch_i, (images, target) in enumerate(train_loader):
# reshape and get target class
target = torch.eye(10).index_select(dim=0, index=target)
if TRAIN_ON_GPU:
images, target = images.cuda(), target.cuda()
# zero out gradients
optimizer.zero_grad()
# get model outputs
caps_output, reconstructions, y = capsule_net(images)
# calculate loss
loss = criterion(caps_output, target, images, reconstructions)
# perform backpropagation and optimization
loss.backward()
optimizer.step()
train_loss += loss.item() # accumulated training loss
# print and record training stats
if batch_i != 0 and batch_i % print_every == 0:
avg_train_loss = train_loss/print_every
losses.append(avg_train_loss)
print('Epoch: {} \tTraining Loss: {:.8f}'.format(epoch, avg_train_loss))
train_loss = 0 # reset accumulated training loss
return losses
# training for 5 epochs
n_epochs = 5
losses = train(capsule_net, criterion, optimizer, n_epochs=n_epochs)
```
Now let us plot the training loss to get more feeling how does the loss look like:
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(losses)
plt.title("Training Loss")
plt.show()
```
# Test the trained network
Test the trained network on unseen data:
```
def test(capsule_net, test_loader):
'''Prints out test statistics for a given capsule net.
param capsule_net: trained capsule network
param test_loader: test dataloader
return: returns last batch of test image data and corresponding reconstructions
'''
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
test_loss = 0 # loss tracking
capsule_net.eval() # eval mode
for batch_i, (images, target) in enumerate(test_loader):
target = torch.eye(10).index_select(dim=0, index=target)
batch_size = images.size(0)
if TRAIN_ON_GPU:
images, target = images.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
caps_output, reconstructions, y = capsule_net(images)
# calculate the loss
loss = criterion(caps_output, target, images, reconstructions)
# update average test loss
test_loss += loss.item()
# convert output probabilities to predicted class
_, pred = torch.max(y.data.cpu(), 1)
_, target_shape = torch.max(target.data.cpu(), 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target_shape.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target_shape.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# avg test loss
avg_test_loss = test_loss/len(test_loader)
print('Test Loss: {:.8f}\n'.format(avg_test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# return last batch of capsule vectors, images, reconstructions
return caps_output, images, reconstructions
# call test function and get reconstructed images
caps_output, images, reconstructions = test(capsule_net, test_loader)
```
Now it is time to dispaly the reconstructions:
```
def display_images(images, reconstructions):
'''Plot one row of original MNIST images and another row (below)
of their reconstructions.'''
# convert to numpy images
images = images.data.cpu().numpy()
reconstructions = reconstructions.view(-1, 1, 28, 28)
reconstructions = reconstructions.data.cpu().numpy()
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(26,5))
# input images on top row, reconstructions on bottom
for images, row in zip([images, reconstructions], axes):
for img, ax in zip(images, row):
ax.imshow(np.squeeze(img), cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display original and reconstructed images, in rows
display_images(images, reconstructions)
```
| github_jupyter |
```
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:60% !important; }</style>"))
```
# Monte Carlo Control
So far, we assumed that we know the underlying model of the environment and that the agent has access to it.
Now, we considere the case in which do not have access to the full MDP. That is, we do __model-free control__ now.
To illustrate this, we implement the black jack example from the RL Lecture 5 by David Silver for Monte Carlo Control [see example](https://youtu.be/0g4j2k_Ggc4?t=2193)
We use Monte-Carlo policy evaluation based on the action-value function $Q=q_\pi$ and then a $\epsilon$-greedy exploration (greedy exploration with probability to choose a random move).
Remember: $ G_t = R_{t+1} + \gamma R_{t+2} + ... + \sum_{k=0} \gamma^k \cdot R_{t+k+1}$
__Algorithm:__
* Update $V(s)$ incrementally after each episode
* For each state $S_t$ with return $G_t$ do:
* $N(S_t) \gets N(S_t) +1$
* $Q(S_t,A_t) \gets Q(S_t,A_t) + \frac{1}{N(S_t)} \cdot (G_t - V(S_t,A_t))$
* Which corresponds to the _actual return_ ($G_t$) - the _estimated return_ ($Q(S_t,A_t)$)
* $\frac{1}{N(S_t)}$ is a weighting factor that let us forget old episodes slowly
* Improve policy based on new action-value function
* $\epsilon \gets \frac{1}{k}$
* $\lambda \gets \epsilon-greedy(Q)$
MC converges to solution with minimum mean squared error.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import plotting
from operator import itemgetter
plotting.set_layout(drawing_size=15)
```
## The Environment
For this example we use the python package [gym](https://gym.openai.com/docs/) which provides a ready-to-use implementation of a BlackJack environment.
The states are stored in this tuple format: \n(Agent's score , Dealer's visible score, and whether or not the agent has a usable ace)
Here, we can look at the number of different states:
```
import gym
env = gym.make('Blackjack-v0')
env.observation_space
```
And the number of actions we can take:
```
env.action_space
```
To start a game call `env.reset()` which will return the obersavtion space
```
env.reset()
```
We can take two different actions: `hit` = 1 or `stay` = 0.
The result of this function call shows the _obersavtion space_, the reward (winning=+1, loosing =-1) and if the game is over,
```
env.step(1)
```
## Define the Agent
```
class agents():
""" This class defines the agent
"""
def __init__(self, state_space, action_space, ):
""" TODO """
# Store the discount factor
self.gamma = 0.7
# Store the epsilon parameters
self.epsilon = 1
n_player_states = state_space[0].n
n_dealer_states = state_space[1].n
n_usable_ace = state_space[0].n
# two available actions stay (0) and hit (1)
self.actions = list(range(action_space.n))
# Store the action value function for each state and action
self.q = np.zeros((n_player_states,n_dealer_states,n_usable_ace, action_space.n))
# incremental counter for a state
self.N = np.zeros((n_player_states,n_dealer_states,n_usable_ace,action_space.n))
def greedy_move(self,s, k_episode):
# given a state return the next move according to epsilon greedy algorithm
# find optimal action a^*
v_a = []
for i_a,a in enumerate(self.actions):
# get value for action state pair
s2 = 1 if s[2] else 0
v = self.q[s[0],s[1],s2,a]
v_a.append((v,a))
# get action with maximal value
a_max = max(v_a,key=itemgetter(0))[1]
# with probabiliyt 1-eps execute the best action otherwise choose other action
if np.random.rand() < (1-self.epsilon):
a = a_max
else:
a = int(not a_max)
# decrement epsilon
self.epsilon = 1/(k_episode)
return a
def incre_counter(self, state, action):
# Increments the counter for a given state and action
# convert the true/false state to 0/1
s2 = 1 if state[2] else 0
# increment the counter for that state
self.N[state[0],state[1],s2,action] += 1
def get_counter(self, state, action):
# convert the true/false state to 0/1
s2 = 1 if state[2] else 0
# increment the counter for that state
return self.N[state[0],state[1],s2,action]
def policy_evaluation(self,all_states,all_rewards, all_actions):
# Update V(s) incrementally
for i_s,s in enumerate(all_states):
# get corresponding action for given state
a = all_actions[i_s]
# convert the true/false state to 0/1
s2 = 1 if s[2] else 0
# Get the value function for that state
Q_s = self.q[s[0],s[1],s2,a]
# calculate the total reward
G = np.sum([agent.gamma**k * r for k,r in enumerate(all_rewards)])
# Update the value funtion
self.q[s[0],s[1],s2,a] = Q_s + 1/self.get_counter(s,a) * (G - Q_s)
# how many episodes should be played
n_episodes = 500000
# initialize the agent. let it know the number of states and actions
agent = agents(env.observation_space, env.action_space)
# Incremental MC updates
# Play one episode then update V(s)
for i in range(n_episodes):
all_states = []
all_rewards = []
all_actions = []
# start the game
s = env.reset()
# play until environment tells you that the game is over
game_ended = False
while not game_ended:
# increment counter
# choose a movement according to eps-greedy algorithm and update policy
move = agent.greedy_move(s,i+1)
# use the old state for evaluation
all_states.append(s)
# increment the counter for a given state and action
agent.incre_counter(s,move)
# move
s,r,game_ended,_ = env.step(move)
# save everything
# all_states.append(s)
all_rewards.append(r)
all_actions.append(move)
# Evaluate policy
agent.policy_evaluation(all_states,all_rewards,all_actions)
### END OF EPISODE ###
```
## Plotting
```
fig = plt.figure(figsize=(10,5))
axes = fig.subplots(1,2,squeeze=False)
ax = axes[0,0]
c = ax.pcolormesh(agent.q[13:22,1:,0,:].max(2),vmin=-1,vmax=1)
ax.set_yticklabels(range(13,22))
ax.set_xticklabels(range(1,11,2))
ax.set_xlabel('Dealer Showing')
ax.set_ylabel('Player Sum')
ax.set_title('No Usable Aces')
# plt.colorbar(c)
ax = axes[0,1]
c = ax.pcolormesh(agent.q[13:22,1:,1,:].max(2),vmin=-1,vmax=1)
ax.set_yticklabels(range(13,22))
ax.set_xticklabels(range(1,11,2))
ax.set_title('Usable Aces')
ax.set_xlabel('Dealer Showing')
plt.colorbar(c)
plt.show()
fig = plt.figure(figsize=(10,5))
axes = fig.subplots(1,2,squeeze=False)
ax = axes[0,0]
c = ax.contour(agent.q[13:22,1:,0,:].max(2),levels=1,vmin=-1,vmax=1)
ax.set_yticklabels(range(13,22))
ax.set_xticklabels(range(1,11,2))
ax.set_xlabel('Dealer Showing')
ax.set_ylabel('Player Sum')
ax.set_title('No Usable Aces')
# plt.colorbar(c)
ax = axes[0,1]
c = ax.contour(agent.q[13:22,1:,1,:].max(2),levels=1,vmin=-1,vmax=1)
ax.set_yticklabels(range(13,22))
ax.set_xticklabels(range(1,11,2))
ax.set_title('Usable Aces')
ax.set_xlabel('Dealer Showing')
plt.colorbar(c)
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import glob
import os
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
display(HTML("<style>div.output_scroll { height: 44em; }</style>"))
def get_meta(path):
"""Returns (threads, num_jsons, repeats)"""
props = os.path.splitext(os.path.basename(path))[0].split('_')
values = [int(x[1:]) for x in props[1:]]
return {'max_value':values[0],
'max_num_values':values[1],
'threads':values[2],
'input_size_approx':values[3],
'repeats':values[4]}
def load(file):
"""Load the experiment data from a CSV file with converter metrics."""
schema = {
'num_threads': np.int64(),
'num_jsons_converted': np.int64(),
'num_json_bytes_converted': np.int64(),
'num_recordbatch_bytes': np.int64(),
'num_ipc': np.int64(),
'ipc_bytes': np.int64(),
'num_buffers_converted': np.int64(),
't_parse': np.float64(),
't_resize': np.float64(),
't_serialize': np.float64(),
't_thread': np.float64(),
't_enqueue': np.float64(),
'status': np.int64()
}
df = pd.read_csv(file, dtype=schema)
meta = get_meta(file)
for key, value in meta.items():
df.insert(0, key, value)
# Make sure there were no errors for converters.
assert(df['status'].sum() == len(df.index))
return df
def analyze(df):
"""Analyze the experiment data, deriving various metrics such as throughput."""
# Calculate time spent within the thread as 'other'.
df['t_other'] = df['t_thread'] - df[['t_parse', 't_resize', 't_serialize', 't_enqueue']].sum(axis=1)
# Calculate the throughput per thread
df['Parse throughput (in)'] = df['num_json_bytes_converted'] / df['t_parse']
df['Parse throughput (out)'] = df['num_recordbatch_bytes'] / df['t_parse']
return df
def aggr_counts(digit_counts):
total = 0
for n, d in digit_counts:
total = total + n
return total
def avg_number_of_decimals(max_value):
''' Return avg number of decimals of uniform random numbers from 0 up to max_value. '''
digits = 1
digit_counts = []
while (pow(10, digits) < max_value):
nums = pow(10,digits) - aggr_counts(digit_counts)
digit_counts.append((nums, digits))
digits = digits + 1
digit_counts.append((max_value - aggr_counts(digit_counts), digits))
avg_num_digits = 0
for n, d in digit_counts:
avg_num_digits += n/max_value * d
return avg_num_digits
def summarize(df):
"""Summarize the data from one run into one row with averages."""
assert(len(pd.unique(df['max_value'])==1))
assert(len(pd.unique(df['max_num_values'])==1))
assert(len(pd.unique(df['threads'])==1))
assert(len(pd.unique(df['input_size_approx'])==1))
assert(df['num_threads'].sum()==pd.unique(df['threads'])[0])
repeats = pd.unique(df['repeats'])[0]
# Avg. value bytes per JSON is the average array size (which is half the max, it is uniform random)
# times the average number of bytes for uniform random numbers between 0 and max value
max_value = pd.unique(df['max_value'])[0]
max_num_values = pd.unique(df['max_num_values'])[0]
value_bytes = avg_number_of_decimals(max_value) * max_num_values / 2
row = {'Max. value': max_value,
'Max. number of values': max_num_values,
'Value bytes': value_bytes,
'Input size': pd.unique(df['input_size_approx'])[0],
'Repeats': pd.unique(df['repeats'])[0],
'Threads': df['num_threads'].sum(),
'JSONs': df['num_jsons_converted'].sum() / repeats,
'Bytes (in)': df['num_json_bytes_converted'].sum() / repeats,
'RecordBatch bytes': df['num_recordbatch_bytes'].sum() / repeats,
'IPC messages': df['num_ipc'].sum() / repeats,
'IPC bytes': df['ipc_bytes'].sum() / repeats,
'Buffers converted': df['num_buffers_converted'].sum() / repeats,
# For time, we use the max time of all threads,
# since the throughput is determined by the slowest thread in the pool,
# and they all start operating simultaneously
'Parse time': df['t_parse'].max(),
'Resize time': df['t_resize'].max(),
'Serialize time': df['t_serialize'].max(),
'Enqueue time': df['t_enqueue'].max(),
'Other time': df['t_other'].max(),
'Thread time': df['t_thread'].max(),
'Parse throughput (in)': df['num_json_bytes_converted'].sum() / df['t_parse'].max(),
'Parse throughput (out)': df['num_recordbatch_bytes'].sum() / df['t_parse'].max()}
return row;
def get_all_data(data_path, schema, impl):
path = '{}/{}/latency/threads/metrics/{}/'.format(data_path, schema, impl.lower())
csv_files = []
for file in glob.glob("{}*.csv".format(path)):
csv_files.append(file)
print("Found {} files in {}".format(len(csv_files), path))
records = []
for file in csv_files:
records.append(summarize(analyze(load(file))))
df = pd.DataFrame.from_records(records)
df.sort_values(by=['Threads', 'JSONs'], inplace=True)
df.insert(0,'Implementation', impl)
# Use only max value
df = df[df['Max. value'] == 18446744073709551615]
display(pd.unique(df['Max. number of values']))
# Print max throughput
display('{} max: {}'.format(impl, df['Parse throughput (in)'].max() * 1e-9))
# Print mean throughput of highest throughput per input size
display('{} mean: {}'.format(impl, df.groupby(['Implementation', 'Input size']).agg({'Parse throughput (in)': 'max'})['Parse throughput (in)'].mean() * 1e-9))
return df
def get_max_throughput_for_max_size(df):
df = df[df.JSONs == df.JSONs.max()]
#df.set_index('Threads', inplace=True)
result = df[df['Parse throughput (in)'] == df['Parse throughput (in)'].max()]
return result
import matplotlib.pyplot as plt
from utils import lighten_color
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Palatino"],
"font.size": 14
})
colors = ['#4878d0', '#6acc64', '#d65f5f', '#d5bb67', '#dc7ec0', '#8c613c']
markers = ['o', 's', 'd']
d_impls = []
d_impls.append(get_all_data('../experiments/data-p9-battery', 'battery', 'Arrow'))
d_impls.append(get_all_data('../experiments/data-p9-battery', 'battery', 'Custom'))
d_impls.append(get_all_data('../experiments/data-p9-battery', 'battery', 'FPGA'))
#d_impls.append(get_all_data('../experiments/data-intel-battery', 'battery', 'Arrow'))
#d_impls.append(get_all_data('../experiments/data-intel-battery', 'battery', 'Custom'))
#d_impls.append(get_all_data('../experiments/data-intel-battery', 'battery', 'FPGA'))
df = pd.concat(d_impls)
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# Average the throughput of various number of max. array sizes
df = df.groupby(['Implementation', 'Threads', 'Input size']).agg({'Parse throughput (in)': 'mean'})
df = df.reset_index()
display(df)
max_tp = df['Parse throughput (in)'].max()
# Get all dimensions for plots
#max_values = pd.unique(df['Max. value'])
#max_num_values = pd.unique(df['Max. number of values'])
#value_bytes = np.sort(pd.unique(df['Value bytes']))
input_sizes = np.sort(pd.unique(df['Input size']))
threads = np.sort(pd.unique(df['Threads']))
impls = pd.unique(df['Implementation'])
#print("Value bytes :", value_bytes)
print("Input sizes :", input_sizes)
print("Threads :", threads)
print("Impls :", impls)
fig, axs = plt.subplots(ncols=len(input_sizes), figsize=[10, 3], sharey=True, sharex=True)
handles = {}
for xa, inps in enumerate(input_sizes):
ax = axs[xa]
for i, impl in enumerate(impls):
# Prepare plotting data
dl = df[(df['Input size'] == inps) & (df['Implementation'] == impl)]
y = dl['Parse throughput (in)'] * 1e-9
x = dl['Threads']
# Plot FPGA data
handles[impl], = ax.plot(x, y, c=lighten_color(colors[i],0.3), marker=markers[i], mfc=colors[i], mec=colors[i], linewidth=3)
if impl == 'FPGA':
handles['FPGA max.'] = ax.axhline(y=max(y.to_numpy()), color=lighten_color(colors[i],0.7), ls='--')
# Set inline
ax.annotate("Input size:{:.0f} MiB".format(inps / (1<<20)),
xycoords='axes fraction',
xy=(0.05, 0.875),
fontsize=12,
backgroundcolor='#FFFFFF80')
ax.set_xticks(threads)
ax.set_xticklabels(threads, rotation=0, fontsize=8)
ax.set_yticks(range(0, 25,2))
ax.set_ylim(0, 1.25*max_tp * 1e-9)
ax.grid(which='both')
if xa == 0:
ax.set_xlabel('Threads / Parser instances')
ax.set_ylabel('Throughput (GB/s)')
leg_handles = [v for k,v in handles.items()]
leg_labels = [k for k,v in handles.items()]
fig.legend(leg_handles, leg_labels, ncol=4, bbox_to_anchor=(-0.17, 0.93, 1.0, 0.1), frameon=False)
plt.subplots_adjust(hspace = .1, wspace = .075, bottom=0.15)
fig.savefig("throughput-battery-p9.pdf")
```
| github_jupyter |
# Scenario Analysis: Pop Up Shop

Kürschner (talk) 17:51, 1 December 2020 (UTC), CC0, via Wikimedia Commons
```
# install Pyomo and solvers for Google Colab
import sys
if "google.colab" in sys.modules:
!wget -N -q https://raw.githubusercontent.com/jckantor/MO-book/main/tools/install_on_colab.py
%run install_on_colab.py
```
## The problem
There is an opportunity to operate a pop-up shop to sell a unique commemorative item for events held at a famous location. The items cost 12 € each and will selL for 40 €. Unsold items can be returned to the supplier at a value of only 2 € due to their commemorative nature.
| Parameter | Symbol | Value |
| :---: | :---: | :---: |
| sales price | $r$ | 40 € |
| unit cost | $c$ | 12 € |
| salvage value | $w$ | 2 € |
Profit will increase with sales. Demand for these items, however, will be high only if the weather is good. Historical data suggests the following scenarios.
| Scenario ($s$) | Demand ($d_s$) | Probability ($p_s$) |
| :---: | :-----: | :----------: |
| Sunny Skies | 650 | 0.10 |
| Good Weather | 400 | 0.60 |
| Poor Weather | 200 | 0.30 |
The problem is to determine how many items to order for the pop-up shop.
The dilemma is that the weather won't be known until after the order is placed. Ordering enough items to meet demand for a good weather day results in a financial penalty on returned goods if the weather is poor. But ordering just enough items to satisfy demand on a poor weather day leaves "money on the table" if the weather is good.
How many items should be ordered for sale?
## Expected value for the mean scenario (EVM)
A naive solution to this problem is to place an order equal to the expected demand. The expected demand is given by
$$
\begin{align*}
\mathbb E[D] & = \sum_{s\in S} p_s d_s
\end{align*}
$$
Choosing an order size $x = \mathbb E[d]$ results in an expected profit we call the **expected value of the mean scenario (EVM)**.
Variable $y_s$ is the actual number of items sold if scenario $s$ should occur. The number sold is the lesser of the demand $d_s$ and the order size $x$.
$$
\begin{align*}
y_s & = \min(d_s, x) & \forall s \in S
\end{align*}
$$
Any unsold inventory $x - y_s$ remaining after the event will be sold at the salvage price $w$. Taking into account the revenue from sales $r y_s$, the salvage value of the unsold inventory $w(x - y_s)$, and the cost of the order $c x$, the profit $f_s$ for scenario $s$ is given by
$$
\begin{align*}
f_s & = r y_s + w (x - y_s) - c x & \forall s \in S
\end{align*}
$$
The average or expected profit is given by
$$
\begin{align*}
\text{EVM} = \mathbb E[f] & = \sum_{s\in S} p_s f_s
\end{align*}
$$
These calculations can be executed using operations on the pandas dataframe. Let's begin by calculating the expected demand.
Below we create a pandas DataFrame object to store the scenario data.
```
import numpy as np
import pandas as pd
# price information
r = 40
c = 12
w = 2
# scenario information
scenarios = {
"sunny skies" : {"probability": 0.10, "demand": 650},
"good weather": {"probability": 0.60, "demand": 400},
"poor weather": {"probability": 0.30, "demand": 200},
}
df = pd.DataFrame.from_dict(scenarios).T
display(df)
expected_demand = sum(df["probability"] * df["demand"])
print(f"Expected demand = {expected_demand}")
```
Subsequent calculations can be done directly withthe pandas dataframe holding the scenario data.
```
df["order"] = expected_demand
df["sold"] = df[["demand", "order"]].min(axis=1)
df["salvage"] = df["order"] - df["sold"]
df["profit"] = r * df["sold"] + w * df["salvage"] - c * df["order"]
EVM = sum(df["probability"] * df["profit"])
print(f"Mean demand = {expected_demand}")
print(f"Expected value of the mean demand (EVM) = {EVM}")
display(df)
```
## Expected value of the stochastic solution (EVSS)
The optimization problem is to find the order size $x$ that maximizes expected profit subject to operational constraints on the decision variables. The variables $x$ and $y_s$ are non-negative integers, while $f_s$ is a real number that can take either positive and negative values. The number of goods sold in scenario $s$ has to be less than the order size $x$ and customer demand $d_s$.
The problem to be solved is
$$
\begin{align*}
\text{EV} = & \max_{x, y_s} \mathbb E[F] = \sum_{s\in S} p_s f_s \\
\text{subject to:} \\
f_s & = r y_s + w(x - y_s) - c x & \forall s \in S\\
y_s & \leq x & \forall s \in S \\
y_s & \leq d_s & \forall s \in S
\end{align*}
$$
where $S$ is the set of all scenarios under consideration.
```
import pyomo.environ as pyo
import pandas as pd
# price information
r = 40
c = 12
w = 2
# scenario information
scenarios = {
"sunny skies" : {"demand": 650, "probability": 0.1},
"good weather": {"demand": 400, "probability": 0.6},
"poor weather": {"demand": 200, "probability": 0.3},
}
# create model instance
m = pyo.ConcreteModel('Pop-up Shop')
# set of scenarios
m.S = pyo.Set(initialize=scenarios.keys())
# decision variables
m.x = pyo.Var(domain=pyo.NonNegativeIntegers)
m.y = pyo.Var(m.S, domain=pyo.NonNegativeIntegers)
m.f = pyo.Var(m.S, domain=pyo.Reals)
# objective
@m.Objective(sense=pyo.maximize)
def EV(m):
return sum([scenarios[s]["probability"]*m.f[s] for s in m.S])
# constraints
@m.Constraint(m.S)
def profit(m, s):
return m.f[s] == r*m.y[s] + w*(m.x - m.y[s]) - c*m.x
@m.Constraint(m.S)
def sales_less_than_order(m, s):
return m.y[s] <= m.x
@m.Constraint(m.S)
def sales_less_than_demand(m, s):
return m.y[s] <= scenarios[s]["demand"]
# solve
solver = pyo.SolverFactory('glpk')
results = solver.solve(m)
# display solution using Pandas
print("Solver Termination Condition:", results.solver.termination_condition)
print("Expected Profit:", m.EV())
print()
for s in m.S:
scenarios[s]["order"] = m.x()
scenarios[s]["sold"] = m.y[s]()
scenarios[s]["salvage"] = m.x() - m.y[s]()
scenarios[s]["profit"] = m.f[s]()
df = pd.DataFrame.from_dict(scenarios).T
display(df)
```
Optimizing over all scenarios provides an expected profit of 8,920 €, an increase of 581 € over the base case of simply ordering the expected number of items sold. The new solution places a larger order. In poor weather conditions there will be more returns and lower profit that is more than compensated by the increased profits in good weather conditions.
The addtional value that results from solve of this planning problem is called the **Value of the Stochastic Solution (VSS)**. The value of the stochastic solution is the additional profit compared to ordering to meet expected in demand. In this case,
$$\text{VSS} = \text{EV} - \text{EVM} = 8,920 - 8,339 = 581$$
## Expected value with perfect information (EVPI)
Maximizing expected profit requires the size of the order be decided before knowing what scenario will unfold. The decision for $x$ has to be made "here and now" with probablistic information about the future, but without specific information on which future will actually transpire.
Nevertheless, we can perform the hypothetical calculation of what profit would be realized if we could know the future. We are still subject to the variability of weather, what is different is we know what the weather will be at the time the order is placed.
The resulting value for the expected profit is called the **Expected Value of Perfect Information (EVPI)**. The difference EVPI - EV is the extra profit due to having perfect knowledge of the future.
To compute the expected profit with perfect information, we let the order variable $x$ be indexed by the subsequent scenario that will unfold. Given decision varaible $x_s$, the model for EVPI becomes
$$
\begin{align*}
\text{EVPI} = & \max_{x_s, y_s} \mathbb E[f] = \sum_{s\in S} p_s f_s \\
\text{subject to:} \\
f_s & = r y_s + w(x_s - y_s) - c x_s & \forall s \in S\\
y_s & \leq x_s & \forall s \in S \\
y_s & \leq d_s & \forall s \in S
\end{align*}
$$
The following implementation is a variation of the prior cell.
```
import pyomo.environ as pyo
import pandas as pd
# price information
r = 40
c = 12
w = 2
# scenario information
scenarios = {
"sunny skies" : {"demand": 650, "probability": 0.1},
"good weather": {"demand": 400, "probability": 0.6},
"poor weather": {"demand": 200, "probability": 0.3},
}
# create model instance
m = pyo.ConcreteModel('Pop-up Shop')
# set of scenarios
m.S = pyo.Set(initialize=scenarios.keys())
# decision variables
m.x = pyo.Var(m.S, domain=pyo.NonNegativeIntegers)
m.y = pyo.Var(m.S, domain=pyo.NonNegativeIntegers)
m.f = pyo.Var(m.S, domain=pyo.Reals)
# objective
@m.Objective(sense=pyo.maximize)
def EV(m):
return sum([scenarios[s]["probability"]*m.f[s] for s in m.S])
# constraints
@m.Constraint(m.S)
def profit(m, s):
return m.f[s] == r*m.y[s] + w*(m.x[s] - m.y[s]) - c*m.x[s]
@m.Constraint(m.S)
def sales_less_than_order(m, s):
return m.y[s] <= m.x[s]
@m.Constraint(m.S)
def sales_less_than_demand(m, s):
return m.y[s] <= scenarios[s]["demand"]
# solve
solver = pyo.SolverFactory('glpk')
results = solver.solve(m)
# display solution using Pandas
print("Solver Termination Condition:", results.solver.termination_condition)
print("Expected Profit:", m.EV())
print()
for s in m.S:
scenarios[s]["order"] = m.x[s]()
scenarios[s]["sold"] = m.y[s]()
scenarios[s]["salvage"] = m.x[s]() - m.y[s]()
scenarios[s]["profit"] = m.f[s]()
df = pd.DataFrame.from_dict(scenarios).T
display(df)
```
## Summary
To summarize, have computed three different solutions to the problem of order size:
* The expected value of the mean solution (EVM) is the expected profit resulting from ordering the number of items expected to sold under all scenarios.
* The expected value of the stochastic solution (EVSS) is the expected profit found by solving an two-state optimization problem where the order size was the "here and now" decision without specific knowledge of which future scenario would transpire.
* The expected value of perfect information (EVPI) is the result of a hypotherical case where knowledge of the future scenario was somehow available when then order had to be placed.
For this example we found
| Solution | Value (€) |
| :------ | ----: |
| Expected Value of the Mean Solution (EVM) | 8,399.0 |
| Expected Value of the Stochastic Solution (EVSS) | 8,920.0 |
| Expected Value of Perfect Information (EVPI) | 10,220.0 |
These results verify our expectation that
$$
\begin{align*}
EVM \leq EVSS \leq EVPI
\end{align*}
$$
The value of the stochastic solution
$$
\begin{align*}
VSS = EVSS - EVM = 581
\end{align*}
$$
The value of perfect information
$$
\begin{align*}
VPI = EVPI - EVSS = 1,300
\end{align*}
$$
As one might expect, there is a cost that results from lack of knowledge about an uncertain future.
| github_jupyter |
<span style="color:#888888">Copyright (c) 2014-2021 National Technology and Engineering Solutions of Sandia, LLC. Under the terms of Contract DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government retains certain rights in this software. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:</span>
<span style="color:#888888">1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.</span>
<span style="color:#888888">2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.</span>
<span style="color:#888888">THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.</span>
# <span style="color:#0054a8">**Tutorial 1:**</span> <span style="color:#555555">How to Create Trajectory Points from a Deliminated File</span>
## Purpose
This notebook demonstrates how to create Tracktable Trajectory Point objects from a deliminated (e.g. csv, tsv, etc.) data file. A data file must contain the following columns in order to be compatible with Tracktable:
* **<span style="color:#00add0">an identifier</span>** that is unique to each object
* **<span style="color:#00add0">a timestamp</span>**
* **<span style="color:#00add0">longitude</span>**
* **<span style="color:#00add0">latitude</span>**
Both ordering and headers for these columns can vary, but they must exist in the file. Each row of the data file should represent the information for a single trajectory point.
**<span style="color:#81062e">IMPORTANT:</span>** Deliminated files must be **sorted by timestamp** to be compatible with Tracktable.
*Note:* This notebook does not cover how to create a Trajectory object (as opposed to a list of Trajectory point objects). Please see [Tutorial 2](Tutorial_02.ipynb) for an example of how to create Trajectory objects from a csv file containing trajectory point information.
## Step 1: Identify your CSV/TSV File
We will use the provided example data $^1$ for this tutorial. If you are using another filename, `data_filename` should be set to the string containing the path to your csv file.
```
from tracktable.core import data_directory
import os.path
data_filename = os.path.join(data_directory(), 'NYHarbor_2020_06_30_first_hour.csv')
```
## Step 2: Create a TrajectoryPointReader object.
We will create a Terrestrial point reader, which will expect **(longitude, latitude)** coordinates. Alternatively, if our data points were in a Cartesian coordinate system, we would import the `TrajectoryPointReader` object from `tracktable.domain.cartesian2d` or `tracktable.domain.cartesian3d`.
```
from tracktable.domain.terrestrial import TrajectoryPointReader
reader = TrajectoryPointReader()
```
## Step 3: Give the TrajectoryPointReader object info about the file.
Have the reader open an input stream to the data file.
```
reader.input = open(data_filename, 'r')
```
### <span style="color:#0f0f0f">*Additional Settings*</span>
Identify the comment character for the data file. Any lines with this as the first non-whitespace character will be ignored. This is optional and defaulted to `#`.
```
reader.comment_character = '#'
```
Identify the file's delimiter. For comma-separated (CSV) files, the delimiter should be set to `,`. For tab-separated files, this should be `\t`. This is optional, and the default value is `,`.
```
reader.field_delimiter = ','
```
Identify the string associated with a null value in a cell. This is optional and defaulted to an empty string.
```
reader.null_value = 'NaN'
```
### <span style="color:#0f0f0f">*Required Columns*</span>
We must tell the reader where to find the **<span style="color:#00add0">unique object ID</span>**, **<span style="color:#00add0">timestamp</span>**, **<span style="color:#00add0">longitude</span>** and **<span style="color:#00add0">latitude</span>** columns. Column numbering starts at zero.
If no column numbers are given, the reader will assume they are in the order listed above. Note that terrestrial points are stored as (longitude, latitude) in Tracktable.
```
reader.object_id_column = 3
reader.timestamp_column = 0
reader.coordinates[0] = 1 # longitude
reader.coordinates[1] = 2 # latitude
```
### <span style="color:#0f0f0f">*Optional Columns*</span>
Your data file may contain additional information (e.g. speed, heading, altitude, etc.) that you wish to store with your trajectory points. These can be stored as either floats, strings or datetime objects. An example of each is shown below, respectively.
```
reader.set_real_field_column('heading', 6)
reader.set_string_field_column('vessel-name', 7)
reader.set_time_field_column('eta', 17)
```
## Step 4: Convert the Reader to a List of Trajectory Points
```
trajectory_points = list(reader)
```
How many trajectory points do we have?
```
len(trajectory_points)
```
## Step 5: Accessing Trajectory Point Info
The information from the required columns of the csv can be accessed for a single `trajectory_point` object as
* **<span style="color:#00add0">unique object identifier:</span>** `trajectory_point.object_id`
* **<span style="color:#00add0">timestamp:</span>** `trajectory_point.timestamp`
* **<span style="color:#00add0">longitude:</span>** `trajectory_point[0]`
* **<span style="color:#00add0">latitude:</span>** `trajectory_point[1]`
The optional column information is available through the member variable `properties` as follows: `trajectory_point.properties['what-you-named-it']`.
This is demonstrated below for our first ten trajectory points.
```
for traj_point in trajectory_points[:10]:
object_id = traj_point.object_id
timestamp = traj_point.timestamp
longitude = traj_point[0]
latitude = traj_point[1]
heading = traj_point.properties["heading"]
vessel_name = traj_point.properties["vessel-name"]
eta = traj_point.properties["eta"]
print(f'Unique ID: {object_id}')
print(f'Timestamp: {timestamp}')
print(f'Longitude: {longitude}')
print(f'Latitude: {latitude}')
print(f'Heading: {heading}')
print(f'Vessel Name: {vessel_name}')
print(f'ETA: {eta}\n')
```
<span style="color:gray">$^1$ Bureau of Ocean Energy Management (BOEM) and National Oceanic and Atmospheric Administration (NOAA). MarineCadastre.gov. *AIS Data for 2020.* Retrieved February 2021 from [marinecadastre.gov/data](https://marinecadastre.gov/data/). Trimmed down to the first hour of June 30, 2020, restricted to in NY Harbor.</span>
| github_jupyter |
# Optimization of a Voigt profile
```
from exojax.spec.rlpf import rvoigt
import jax.numpy as jnp
import matplotlib.pyplot as plt
```
Let's optimize the Voigt function $V(\nu, \beta, \gamma_L)$ using exojax!
$V(\nu, \beta, \gamma_L)$ is a convolution of a Gaussian with a STD of $\beta$ and a Lorentian with a gamma parameter of $\gamma_L$.
Note that we use spec.rlpf.rvoigt instead of spec.voigt. This function is a voigt profile with VJP while voigt is JVP defined one. For some reason, we do not use rvoigt as a default function of the voigt profile. But in future, we plan to replace the VJP version as a default one.
```
nu=jnp.linspace(-10,10,100)
plt.plot(nu, rvoigt(nu,1.0,2.0)) #beta=1.0, gamma_L=2.0
```
## optimization of a simple absorption model
Next, we try to fit a simple absorption model to mock data.
The absorption model is
$ f= 1 - e^{-a V(\nu,\beta,\gamma_L)}$
```
def absmodel(nu,a,beta,gamma_L):
return 1.0 - jnp.exp(a*rvoigt(nu,beta,gamma_L))
```
Adding a noise...
```
from numpy.random import normal
data=absmodel(nu,2.0,1.0,2.0)+normal(0.0,0.01,len(nu))
plt.plot(nu,data,".")
```
Let's optimize the multiple parameters
```
from jax import grad, vmap
```
We define the objective function as $obj = |d - f|^2$
```
# loss or objective function
def obj(a,beta,gamma_L):
f=data-absmodel(nu,a,beta,gamma_L)
g=jnp.dot(f,f)
return g
#These are the derivative of the objective function
h_a=grad(obj,argnums=0)
h_beta=grad(obj,argnums=1)
h_gamma_L=grad(obj,argnums=2)
print(h_a(2.0,1.0,2.0),h_beta(2.0,1.0,2.0),h_gamma_L(2.0,1.0,2.0))
from jax import jit
@jit
def step(t,opt_state):
a,beta,gamma_L=get_params(opt_state)
value=obj(a,beta,gamma_L)
grads_a = h_a(a,beta,gamma_L)
grads_beta = h_beta(a,beta,gamma_L)
grads_gamma_L = h_gamma_L(a,beta,gamma_L)
grads=jnp.array([grads_a,grads_beta,grads_gamma_L])
opt_state = opt_update(t, grads, opt_state)
return value, opt_state
def doopt(r0,opt_init,get_params,Nstep):
opt_state = opt_init(r0)
traj=[r0]
for t in range(Nstep):
value, opt_state = step(t, opt_state)
p=get_params(opt_state)
traj.append(p)
return traj, p
```
Here, we use the ADAM optimizer
```
#adam
from jax.experimental import optimizers
opt_init, opt_update, get_params = optimizers.adam(1.e-1)
r0 = jnp.array([1.5,1.5,1.5])
trajadam, padam=doopt(r0,opt_init,get_params,1000)
```
Optimized values are given in padam
```
padam
traj=jnp.array(trajadam)
plt.plot(traj[:,0],label="$\\alpha$")
plt.plot(traj[:,1],ls="dashed",label="$\\beta$")
plt.plot(traj[:,2],ls="dotted",label="$\\gamma_L$")
plt.xscale("log")
plt.legend()
plt.show()
plt.plot(nu,data,".",label="data")
plt.plot(nu,absmodel(nu,padam[0],padam[1],padam[2]),label="optimized")
plt.show()
```
Using SGD instead..., you need to increase the number of iteration for convergence
```
#sgd
from jax.experimental import optimizers
opt_init, opt_update, get_params = optimizers.sgd(1.e-1)
r0 = jnp.array([1.5,1.5,1.5])
trajsgd, psgd=doopt(r0,opt_init,get_params,10000)
traj=jnp.array(trajsgd)
plt.plot(traj[:,0],label="$\\alpha$")
plt.plot(traj[:,1],ls="dashed",label="$\\beta$")
plt.plot(traj[:,2],ls="dotted",label="$\\gamma_L$")
plt.xscale("log")
plt.legend()
plt.show()
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
def load_obj(name ):
with open('ttt/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
results = load_obj("results")
hidden_units = []
def get_avg(list_,n_episodes):
avg = np.abs(np.array(list_[:-n_episodes]).mean())
return avg
def get_avgs(list_, n_samples=250):
avgs = np.array(list_).reshape(-1, n_samples).mean(axis=1)
return avgs
all_losses = []
all_returns = []
for hidden_unit, result in results.items():
hidden_units.append(hidden_unit)
losses = result["loss"]
avg_reward = result["return"][-1]
all_returns.append(avg_reward)
avg_loss = get_avg(losses, 100)
all_losses.append(avg_loss)
print len(hidden_units)
print len(all_losses)
plt.figure(figsize=(12,9))
plt.xlabel("Hidden Size")
plt.ylabel("Policy Loss")
plt.title("Policy Loss versus Number of Hidden Units")
plt.scatter(hidden_units, all_losses, marker="x")
plt.savefig("figures/hidden_units_loss.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
plt.xlabel("Hidden Size")
plt.ylabel("Average Return")
plt.title("Average Return versus Number of Hidden Units")
plt.scatter(hidden_units, all_returns, marker="x")
plt.savefig("figures/hidden_units_returns.png", dpi=300, bbox_inches="tight")
results_single_64 = load_obj("hidden_units_single_64")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 500)
plt.xlabel("Episode")
plt.ylabel("Average Return")
plt.title("Average Return versus Episodes for 64 Hidden Units")
plt.plot(episodes, results_single_64['return'])
plt.savefig("figures/return_learning_64.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 250)
plt.xlabel("Episode")
plt.ylabel("Loss")
plt.title("Episode Loss versus Episodes for 64 Hidden Units")
plt.scatter(episodes, np.abs(get_avgs(results_single_64['loss'])), marker='x')
plt.savefig("figures/loss_curve_64.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 250)
plt.xlabel("Episode")
plt.ylabel("Number of Invalid Moves per Episode")
plt.title("Number of Invalid Moves versus Episodes for 64 Hidden Units")
plt.scatter(episodes, get_avgs(results_single_64['invalid']), marker='x')
plt.savefig("figures/invalid_curve_64.png", dpi=300, bbox_inches="tight")
results_single_256 = load_obj("hidden_units_single_256")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 500)
plt.xlabel("Episode")
plt.ylabel("Average Return")
plt.title("Average Return versus Episodes for 256 Hidden Units")
plt.plot(episodes, results_single_256['return'])
plt.savefig("figures/return_learning_256.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 250)
plt.xlabel("Episode")
plt.ylabel("Loss")
plt.title("Episode Loss versus Episodes for 256 Hidden Units")
plt.scatter(episodes, get_avgs(results_single_256['loss']), marker='x')
plt.savefig("figures/loss_curve_256.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
episodes = range(1, 50001)
plt.xlabel("Episode")
plt.ylabel("Number of Invalid Moves per Episode")
plt.title("Number of Invalid Moves versus Episodes for 256 Hidden Units")
plt.scatter(episodes, np.abs(results_single_256['invalid']), marker='x')
plt.savefig("figures/invalid_curve_256.png", dpi=300, bbox_inches="tight")
results_single_128 = load_obj("hidden_units_single_128")
results_single_32 = load_obj("hidden_units_single_32")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 250)
plt.xlabel("Episode")
plt.ylabel("Average Number of Invalid Moves per Episode")
plt.title("Average Number of Invalid Moves versus Episodes for Different Number of Hidden Units")
plt.plot(episodes, get_avgs(results_single_32['invalid']), label='32')
plt.plot(episodes, get_avgs(results_single_64['invalid']), label='64')
plt.plot(episodes, get_avgs(results_single_128['invalid']), label='128')
plt.plot(episodes, get_avgs(results_single_256['invalid']), label='256')
plt.legend()
plt.savefig("figures/invalid_curve.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
n_samples = 500
episodes = range(1, 50001, n_samples)
plt.xlabel("Episode")
plt.ylabel("Average Number of Invalid Moves per Episode")
plt.ylim(0, 0.1)
plt.title("Average Number of Invalid Moves versus Episodes for Different Number of Hidden Units")
plt.plot(episodes, get_avgs(results_single_32['invalid'], n_samples), label='32')
plt.plot(episodes, get_avgs(results_single_64['invalid'], n_samples), label='64')
plt.plot(episodes, get_avgs(results_single_128['invalid'], n_samples), label='128')
plt.plot(episodes, get_avgs(results_single_256['invalid'], n_samples), label='256')
plt.legend()
plt.savefig("figures/invalid_curve_close.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 500)
plt.xlabel("Episode")
plt.ylabel("Average Return")
plt.title("Average Return versus Episodes for Different Number of Hidden Units")
plt.plot(episodes, results_single_32['return'], label='32')
plt.plot(episodes, results_single_64['return'], label='64')
plt.plot(episodes, results_single_128['return'], label='128')
plt.plot(episodes, results_single_256['return'], label='256')
plt.legend()
plt.savefig("figures/return_learning.png", dpi=300, bbox_inches="tight")
plt.figure(figsize=(12,9))
episodes = range(1, 50001, 500)
plt.xlabel("Episode")
plt.ylabel("Average Return")
plt.ylim(0, 10)
plt.title("Average Return versus Episodes for Different Number of Hidden Units")
plt.plot(episodes, results_single_32['return'], label='32')
plt.plot(episodes, results_single_64['return'], label='64')
plt.plot(episodes, results_single_128['return'], label='128')
plt.plot(episodes, results_single_256['return'], label='256')
plt.legend()
plt.savefig("figures/return_learning_close.png", dpi=300, bbox_inches="tight")
ratio_results = results_single_32['ratio']
win = [result['win'] for result in ratio_results]
lose = [result['lose'] for result in ratio_results]
tie = [result['tie'] for result in ratio_results]
episodes = range(1, 101)
plt.figure(figsize=(12,9))
plt.xlabel("Episodes [% of 50000]")
plt.ylabel("Number of Episodes")
plt.title("Win / Lose / Tie Ratio for 500 Episodes Played Versus Number of Episodes Trained For")
plt.bar(episodes, win, label='win')
plt.bar(episodes, lose, bottom=win, label='lose')
plt.bar(episodes, tie, bottom=np.add(win,lose), label='tie')
plt.legend()
plt.savefig("figures/ratio_graph.png", dpi=300, bbox_inches="tight")
first_distributions = results_single_128["first"]
move = {i: [distr[0, i] for distr in first_distributions] for i in range(9)}
move1 = move[0]
move2 = np.add(move[1], move1)
move3 = np.add(move[2], move2)
move4 = np.add(move[3], move3)
move5 = np.add(move[4], move4)
move6 = np.add(move[5], move5)
move7 = np.add(move[6], move6)
move8 = np.add(move[7], move7)
episodes = range(1, 101)
plt.figure(figsize=(12,9))
plt.xlabel("Episodes [% of 50000]")
plt.ylabel("Number of Episodes")
plt.title("Win / Lose / Tie Ratio for 500 Episodes Played Versus Number of Episodes Trained For")
plt.bar(episodes, move[0], label='1')
plt.bar(episodes, move[1], bottom=move1, label='2')
plt.bar(episodes, move[2], bottom=move2, label='3')
plt.bar(episodes, move[3], bottom=move3, label='4')
plt.bar(episodes, move[4], bottom=move4, label='5')
plt.bar(episodes, move[5], bottom=move5, label='6')
plt.bar(episodes, move[6], bottom=move6, label='7')
plt.bar(episodes, move[7], bottom=move7, label='8')
plt.bar(episodes, move[8], bottom=move8, label='9')
plt.legend()
plt.savefig("figures/moves_distr_graph_128.png", dpi=300, bbox_inches="tight")
```
| github_jupyter |
# Machine Learning Trading Bot
In this Challenge, you’ll assume the role of a financial advisor at one of the top five financial advisory firms in the world. Your firm constantly competes with the other major firms to manage and automatically trade assets in a highly dynamic environment. In recent years, your firm has heavily profited by using computer algorithms that can buy and sell faster than human traders.
The speed of these transactions gave your firm a competitive advantage early on. But, people still need to specifically program these systems, which limits their ability to adapt to new data. You’re thus planning to improve the existing algorithmic trading systems and maintain the firm’s competitive advantage in the market. To do so, you’ll enhance the existing trading signals with machine learning algorithms that can adapt to new data.
## Instructions:
Use the starter code file to complete the steps that the instructions outline. The steps for this Challenge are divided into the following sections:
* Establish a Baseline Performance
* Tune the Baseline Trading Algorithm
* Evaluate a New Machine Learning Classifier
* Create an Evaluation Report
#### Establish a Baseline Performance
In this section, you’ll run the provided starter code to establish a baseline performance for the trading algorithm. To do so, complete the following steps.
Open the Jupyter notebook. Restart the kernel, run the provided cells that correspond with the first three steps, and then proceed to step four.
1. Import the OHLCV dataset into a Pandas DataFrame.
2. Generate trading signals using short- and long-window SMA values.
3. Split the data into training and testing datasets.
4. Use the `SVC` classifier model from SKLearn's support vector machine (SVM) learning method to fit the training data and make predictions based on the testing data. Review the predictions.
5. Review the classification report associated with the `SVC` model predictions.
6. Create a predictions DataFrame that contains columns for “Predicted” values, “Actual Returns”, and “Strategy Returns”.
7. Create a cumulative return plot that shows the actual returns vs. the strategy returns. Save a PNG image of this plot. This will serve as a baseline against which to compare the effects of tuning the trading algorithm.
8. Write your conclusions about the performance of the baseline trading algorithm in the `README.md` file that’s associated with your GitHub repository. Support your findings by using the PNG image that you saved in the previous step.
#### Tune the Baseline Trading Algorithm
In this section, you’ll tune, or adjust, the model’s input features to find the parameters that result in the best trading outcomes. (You’ll choose the best by comparing the cumulative products of the strategy returns.) To do so, complete the following steps:
1. Tune the training algorithm by adjusting the size of the training dataset. To do so, slice your data into different periods. Rerun the notebook with the updated parameters, and record the results in your `README.md` file. Answer the following question: What impact resulted from increasing or decreasing the training window?
> **Hint** To adjust the size of the training dataset, you can use a different `DateOffset` value—for example, six months. Be aware that changing the size of the training dataset also affects the size of the testing dataset.
2. Tune the trading algorithm by adjusting the SMA input features. Adjust one or both of the windows for the algorithm. Rerun the notebook with the updated parameters, and record the results in your `README.md` file. Answer the following question: What impact resulted from increasing or decreasing either or both of the SMA windows?
3. Choose the set of parameters that best improved the trading algorithm returns. Save a PNG image of the cumulative product of the actual returns vs. the strategy returns, and document your conclusion in your `README.md` file.
#### Evaluate a New Machine Learning Classifier
In this section, you’ll use the original parameters that the starter code provided. But, you’ll apply them to the performance of a second machine learning model. To do so, complete the following steps:
1. Import a new classifier, such as `AdaBoost`, `DecisionTreeClassifier`, or `LogisticRegression`. (For the full list of classifiers, refer to the [Supervised learning page](https://scikit-learn.org/stable/supervised_learning.html) in the scikit-learn documentation.)
2. Using the original training data as the baseline model, fit another model with the new classifier.
3. Backtest the new model to evaluate its performance. Save a PNG image of the cumulative product of the actual returns vs. the strategy returns for this updated trading algorithm, and write your conclusions in your `README.md` file. Answer the following questions: Did this new model perform better or worse than the provided baseline model? Did this new model perform better or worse than your tuned trading algorithm?
#### Create an Evaluation Report
In the previous sections, you updated your `README.md` file with your conclusions. To accomplish this section, you need to add a summary evaluation report at the end of the `README.md` file. For this report, express your final conclusions and analysis. Support your findings by using the PNG images that you created.
```
# Imports
import pandas as pd
import numpy as np
from pathlib import Path
import hvplot.pandas
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn import metrics
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import StandardScaler
from pandas.tseries.offsets import DateOffset
from sklearn.metrics import classification_report
```
---
## Establish a Baseline Performance
In this section, you’ll run the provided starter code to establish a baseline performance for the trading algorithm. To do so, complete the following steps.
Open the Jupyter notebook. Restart the kernel, run the provided cells that correspond with the first three steps, and then proceed to step four.
### Step 1: mport the OHLCV dataset into a Pandas DataFrame.
```
# Import the OHLCV dataset into a Pandas Dataframe
ohlcv_df = pd.read_csv(
Path("./Resources/emerging_markets_ohlcv.csv"),
index_col='date',
infer_datetime_format=True,
parse_dates=True
)
# Review the DataFrame
ohlcv_df.head()
# Filter the date index and close columns
signals_df = ohlcv_df.loc[:, ["close"]]
# Use the pct_change function to generate returns from close prices
signals_df["Actual Returns"] = signals_df["close"].pct_change()
# Drop all NaN values from the DataFrame
signals_df = signals_df.dropna()
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
```
## Step 2: Generate trading signals using short- and long-window SMA values.
```
# Set the short window and long window
short_window = 4
long_window = 100
# Generate the fast and slow simple moving averages (4 and 100 days, respectively)
signals_df['SMA_Fast'] = signals_df['close'].rolling(window=short_window).mean()
signals_df['SMA_Slow'] = signals_df['close'].rolling(window=long_window).mean()
signals_df = signals_df.dropna()
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
# Initialize the new Signal column
signals_df['Signal'] = 0.0
# When Actual Returns are greater than or equal to 0, generate signal to buy stock long
signals_df.loc[(signals_df['Actual Returns'] >= 0), 'Signal'] = 1
# When Actual Returns are less than 0, generate signal to sell stock short
signals_df.loc[(signals_df['Actual Returns'] < 0), 'Signal'] = -1
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
signals_df['Signal'].value_counts()
# Calculate the strategy returns and add them to the signals_df DataFrame
signals_df['Strategy Returns'] = signals_df['Actual Returns'] * signals_df['Signal'].shift()
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
# Plot Strategy Returns to examine performance
(1 + signals_df['Strategy Returns']).cumprod().plot()
```
### Step 3: Split the data into training and testing datasets.
```
# Assign a copy of the sma_fast and sma_slow columns to a features DataFrame called X
X = signals_df[['SMA_Fast', 'SMA_Slow']].shift().dropna()
# Review the DataFrame
X.head()
# Create the target set selecting the Signal column and assiging it to y
y = signals_df['Signal']
# Review the value counts
y.value_counts()
# Select the start of the training period
training_begin = X.index.min()
# Display the training begin date
print(training_begin)
# Select the ending period for the training data with an offset of 3 months
training_end = X.index.min() + DateOffset(months=3)
# Display the training end date
print(training_end)
# Generate the X_train and y_train DataFrames
X_train = X.loc[training_begin:training_end]
y_train = y.loc[training_begin:training_end]
# Review the X_train DataFrame
X_train.head()
# Generate the X_test and y_test DataFrames
X_test = X.loc[training_end+DateOffset(hours=1):]
y_test = y.loc[training_end+DateOffset(hours=1):]
# Review the X_test DataFrame
X_train.head()
# Scale the features DataFrames
# Create a StandardScaler instance
scaler = StandardScaler()
# Apply the scaler model to fit the X-train data
X_scaler = scaler.fit(X_train)
# Transform the X_train and X_test DataFrames using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
```
### Step 4: Use the `SVC` classifier model from SKLearn's support vector machine (SVM) learning method to fit the training data and make predictions based on the testing data. Review the predictions.
```
# From SVM, instantiate SVC classifier model instance
svm_model = svm.SVC()
# Fit the model to the data using the training data
svm_model = svm_model.fit(X_train_scaled, y_train)
# Use the testing data to make the model predictions
svm_pred = svm_model.predict(X_test_scaled)
# Review the model's predicted values
svm_pred[:10]
```
### Step 5: Review the classification report associated with the `SVC` model predictions.
```
# Use a classification report to evaluate the model using the predictions and testing data
svm_testing_report = classification_report(y_test, svm_pred)
# Print the classification report
print(svm_testing_report)
```
### Step 6: Create a predictions DataFrame that contains columns for “Predicted” values, “Actual Returns”, and “Strategy Returns”.
```
# Create a new empty predictions DataFrame.
# Create a predictions DataFrame
predictions_df = pd.DataFrame(index=X_test.index)
# Add the SVM model predictions to the DataFrame
predictions_df['Predicted'] = svm_pred
# Add the actual returns to the DataFrame
predictions_df['Actual Returns'] = signals_df['Actual Returns']
# Add the strategy returns to the DataFrame
predictions_df['Strategy Returns'] = predictions_df['Predicted'] * predictions_df['Actual Returns']
# Review the DataFrame
display(predictions_df.head())
display(predictions_df.tail())
```
### Step 7: Create a cumulative return plot that shows the actual returns vs. the strategy returns. Save a PNG image of this plot. This will serve as a baseline against which to compare the effects of tuning the trading algorithm.
```
# Plot the actual returns versus the strategy returns
baseline_actual_vs_stragegy_plot = (1 + predictions_df[['Actual Returns', 'Strategy Returns']]).cumprod().plot(title="Baseline")
baseline_actual_vs_stragegy_plot.get_figure().savefig('Baseline_actual_vs_strategy.png',bbox_inches='tight')
(1 + predictions_df[['Actual Returns', 'Strategy Returns']]).cumprod().tail(1)
```
---
## Tune the Baseline Trading Algorithm
## Step 6: Use an Alternative ML Model and Evaluate Strategy Returns
In this section, you’ll tune, or adjust, the model’s input features to find the parameters that result in the best trading outcomes. You’ll choose the best by comparing the cumulative products of the strategy returns.
### Step 1: Tune the training algorithm by adjusting the size of the training dataset.
To do so, slice your data into different periods. Rerun the notebook with the updated parameters, and record the results in your `README.md` file.
Answer the following question: What impact resulted from increasing or decreasing the training window?
### Step 2: Tune the trading algorithm by adjusting the SMA input features.
Adjust one or both of the windows for the algorithm. Rerun the notebook with the updated parameters, and record the results in your `README.md` file.
Answer the following question: What impact resulted from increasing or decreasing either or both of the SMA windows?
### Step 3: Choose the set of parameters that best improved the trading algorithm returns.
Save a PNG image of the cumulative product of the actual returns vs. the strategy returns, and document your conclusion in your `README.md` file.
---
## Evaluate a New Machine Learning Classifier
In this section, you’ll use the original parameters that the starter code provided. But, you’ll apply them to the performance of a second machine learning model.
### Step 1: Import a new classifier, such as `AdaBoost`, `DecisionTreeClassifier`, or `LogisticRegression`. (For the full list of classifiers, refer to the [Supervised learning page](https://scikit-learn.org/stable/supervised_learning.html) in the scikit-learn documentation.)
```
# Initiate the model instance
abc = AdaBoostClassifier(n_estimators=50)
```
### Step 2: Using the original training data as the baseline model, fit another model with the new classifier.
```
# Fit the model using the training data
model = abc.fit(X_train_scaled, y_train)
# Use the testing dataset to generate the predictions for the new model
abc_pred = model.predict(X_test_scaled)
# Review the model's predicted values
abc_pred[:10]
```
### Step 3: Backtest the new model to evaluate its performance.
Save a PNG image of the cumulative product of the actual returns vs. the strategy returns for this updated trading algorithm, and write your conclusions in your `README.md` file.
Answer the following questions:
Did this new model perform better or worse than the provided baseline model?
Did this new model perform better or worse than your tuned trading algorithm?
```
print("Accuracy:",metrics.accuracy_score(y_test, abc_pred))
# Use a classification report to evaluate the model using the predictions and testing data
abc_testing_report = classification_report(y_test, abc_pred)
# Print the classification report
print(abc_testing_report)
# Create a new empty predictions DataFrame.
abc_pred_df = pd.DataFrame(index=X_test.index)
# Add the ABC model predictions to the DataFrame
abc_pred_df['Predicted'] = abc_pred
# Add the actual returns to the DataFrame
abc_pred_df['Actual Returns'] = signals_df['Actual Returns']
# Add the strategy returns to the DataFrame
abc_pred_df['Strategy Returns'] = abc_pred_df['Predicted'] * abc_pred_df['Actual Returns']
# Review the DataFrame
display(abc_pred_df.head(3))
display(abc_pred_df.tail(3))
# Plot the actual returns versus the strategy returns
abc_strategy_plot = (1 + abc_pred_df[['Actual Returns', 'Strategy Returns']]).cumprod().plot(title="AdaBoost: 3-month Train, SMA 4/100")
abc_strategy_plot.get_figure().savefig('AdaBoost_actual_vs_strategy.png',bbox_inches='tight')
(1 + abc_pred_df[['Actual Returns', 'Strategy Returns']]).cumprod().tail(1)
```
| github_jupyter |
```
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O cats_and_dogs_filtered.zip
! unzip cats_and_dogs_filtered.zip
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="cats_and_dogs_filtered/train",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="cats_and_dogs_filtered/validation", target_size=(224,224))
model = Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Flatten())
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=2, activation="softmax"))
from keras.optimizers import Adam
opt = Adam(lr=0.001)
model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
model.summary()
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')
hist = model.fit_generator(steps_per_epoch=100,generator=traindata, validation_data= testdata, validation_steps=10,epochs=100,callbacks=[checkpoint,early])
import matplotlib.pyplot as plt
plt.plot(hist.history["acc"])
plt.plot(hist.history['val_acc'])
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"])
plt.show()
from keras.preprocessing import image
img = image.load_img("Pomeranian_01.jpeg",target_size=(224,224))
img = np.asarray(img)
plt.imshow(img)
img = np.expand_dims(img, axis=0)
from keras.models import load_model
saved_model = load_model("vgg16_1.h5")
output = saved_model.predict(img)
if output[0][0] > output[0][1]:
print("cat")
else:
print('dog')
```
| github_jupyter |
<!-- Copyright 2015 Google Inc. All rights reserved. -->
<!-- Licensed under the Apache License, Version 2.0 (the "License"); -->
<!-- you may not use this file except in compliance with the License. -->
<!-- You may obtain a copy of the License at -->
<!-- http://www.apache.org/licenses/LICENSE-2.0 -->
<!-- Unless required by applicable law or agreed to in writing, software -->
<!-- distributed under the License is distributed on an "AS IS" BASIS, -->
<!-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -->
<!-- See the License for the specific language governing permissions and -->
<!-- limitations under the License. -->
# Getting started with the Google Genomics API
In this notebook we'll cover how to make authenticated requests to the [Google Genomics API](https://cloud.google.com/genomics/reference/rest/).
----
NOTE:
* If you're new to notebooks, or want to check out additional samples, check out the full [list](../) of general notebooks.
* For additional Genomics samples, check out the full [list](./) of Genomics notebooks.
## Setup
### Install Python libraries
We'll be using the [Google Python API client](https://github.com/google/google-api-python-client) for interacting with Genomics API. We can install this library, or any other 3rd-party Python libraries from the [Python Package Index (PyPI)](https://pypi.python.org/pypi) using the `pip` package manager.
There are [50+ Google APIs](http://api-python-client-doc.appspot.com/) that you can work against with the Google Python API Client, but we'll focus on the Genomics API in this notebook.
```
!pip install --upgrade google-api-python-client
```
### Create an Authenticated Client
Next we construct a Python object that we can use it to make requests.
The following snippet shows how we can authenticate using the service account on the Datalab host. For more detail about authentication from Python, see [Using OAuth 2.0 for Server to Server Applications](https://developers.google.com/api-client-library/python/auth/service-accounts).
```
from httplib2 import Http
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = Http()
credentials.authorize(http)
```
And then we create a client for the Genomics API.
```
from apiclient.discovery import build
genomics = build('genomics', 'v1', http=http)
```
### Send a request to the Genomics API
Now that we have a Python client for the Genomics API, we can access a variety of different resources. For details about each available resource, see the python client [API docs here](https://google-api-client-libraries.appspot.com/documentation/genomics/v1/python/latest/index.html).
Using our `genomics` client, we'll demonstrate fetching a Dataset resource by ID (the [1000 Genomes dataset](http://googlegenomics.readthedocs.org/en/latest/use_cases/discover_public_data/1000_genomes.html) in this case).
First, we need to construct a request object.
```
request = genomics.datasets().get(datasetId='10473108253681171589')
```
Next, we'll send this request to the Genomics API by calling the `request.execute()` method.
```
response = request.execute()
```
You will need enable the Genomics API for your project if you have not done so previously. Click on [this link](https://console.developers.google.com/flows/enableapi?apiid=genomics) to enable the API in your project.
The response object returned is simply a Python dictionary. Let's take a look at the properties returned in the response.
```
for entry in response.items():
print "%s => %s" % entry
```
Success! We can see the name of the specified Dataset and a few other pieces of metadata.
Accessing other Genomics API resources will follow this same set of steps. The full [list of available resources within the API is here](https://google-api-client-libraries.appspot.com/documentation/genomics/v1/python/latest/index.html). Each resource has details about the different verbs that can be applied (e.g., [Dataset methods](https://google-api-client-libraries.appspot.com/documentation/genomics/v1/python/latest/genomics_v1.datasets.html)).
## Access Data
In this portion of the notebook, we implement [this same example](https://github.com/googlegenomics/getting-started-with-the-api/tree/master/python) implemented as a python script. First let's define a few constants to use within the examples that follow.
```
dataset_id = '10473108253681171589' # This is the 1000 Genomes dataset ID
sample = 'NA12872'
reference_name = '22'
reference_position = 51003835
```
### Get read bases for a sample at specific a position
First find the read group set ID for the sample.
```
request = genomics.readgroupsets().search(
body={'datasetIds': [dataset_id], 'name': sample},
fields='readGroupSets(id)')
read_group_sets = request.execute().get('readGroupSets', [])
if len(read_group_sets) != 1:
raise Exception('Searching for %s didn\'t return '
'the right number of read group sets' % sample)
read_group_set_id = read_group_sets[0]['id']
```
Once we have the read group set ID, lookup the reads at the position in which we are interested.
```
request = genomics.reads().search(
body={'readGroupSetIds': [read_group_set_id],
'referenceName': reference_name,
'start': reference_position,
'end': reference_position + 1,
'pageSize': 1024},
fields='alignments(alignment,alignedSequence)')
reads = request.execute().get('alignments', [])
```
And we print out the results.
```
# Note: This is simplistic - the cigar should be considered for real code
bases = [read['alignedSequence'][
reference_position - int(read['alignment']['position']['position'])]
for read in reads]
print '%s bases on %s at %d are' % (sample, reference_name, reference_position)
from collections import Counter
for base, count in Counter(bases).items():
print '%s: %s' % (base, count)
```
### Get variants for a sample at specific a position
First find the call set ID for the sample.
```
request = genomics.callsets().search(
body={'variantSetIds': [dataset_id], 'name': sample},
fields='callSets(id)')
resp = request.execute()
call_sets = resp.get('callSets', [])
if len(call_sets) != 1:
raise Exception('Searching for %s didn\'t return '
'the right number of call sets' % sample)
call_set_id = call_sets[0]['id']
```
Once we have the call set ID, lookup the variants that overlap the position in which we are interested.
```
request = genomics.variants().search(
body={'callSetIds': [call_set_id],
'referenceName': reference_name,
'start': reference_position,
'end': reference_position + 1},
fields='variants(names,referenceBases,alternateBases,calls(genotype))')
variant = request.execute().get('variants', [])[0]
```
And we print out the results.
```
variant_name = variant['names'][0]
genotype = [variant['referenceBases'] if g == 0
else variant['alternateBases'][g - 1]
for g in variant['calls'][0]['genotype']]
print 'the called genotype is %s for %s' % (','.join(genotype), variant_name)
```
| github_jupyter |
## Setup
```
from google.colab import drive
drive.mount('/content/drive')
!ls /content/drive/MyDrive/ColabNotebooks/Transformer
!nvcc --version
!pip3 install timm faiss tqdm numpy
!pip3 install torch==1.10.2+cu113 torchvision==0.11.3+cu113 torchaudio==0.10.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
!sudo apt-get install libomp-dev
import torch
print(f'torch.__version__ = {torch.__version__}')
print(f'torch.cuda.is_available() = {torch.cuda.is_available()}')
print(f'torch.cuda.current_device() = {torch.cuda.current_device()}')
print(f'torch.cuda.device(0) = {torch.cuda.device(0)}')
print(f'torch.cuda.device_count() = {torch.cuda.device_count()}')
print(f'torch.cuda.get_device_name(0) = {torch.cuda.get_device_name(0)}')
%cd /content/drive/MyDrive/ColabNotebooks/Transformer/LA-Transformer
```
# Testing
```
from __future__ import print_function
import os
import time
import glob
import random
import zipfile
from itertools import chain
import timm
import numpy as np
import pandas as pd
from PIL import Image
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
from torchvision import models
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
import faiss
# from LATransformer.model import ClassBlock, LATransformer, LATransformerTest
# from LATransformer.utils import save_network, update_summary, get_id
# from LATransformer.metrics import rank1, rank5, rank10, calc_map
from osprey import LATransformerTest
def initilize_device(hardware):
# os.environ['CUDA_VISIBLE_DEVICES']='1'
if hardware == "gpu":
device = "cuda"
# if not device.type == "cpu":
print(f'torch.__version__ = {torch.__version__}')
print(f'torch.cuda.is_available() = {torch.cuda.is_available()}')
print(f'torch.cuda.current_device() = {torch.cuda.current_device()}')
print(f'torch.cuda.device(0) = {torch.cuda.device(0)}')
print(f'torch.cuda.device_count() = {torch.cuda.device_count()}')
print(f'torch.cuda.get_device_name(0) = {torch.cuda.get_device_name(0)}')
elif hardware == "cpu":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ## Use if CPU
print("Using cpu")
else:
print("Choose either gpu or cpu")
return None
return device
device = initilize_device("gpu")
```
## Load Model
```
batch_size = 8
gamma = 0.7
seed = 42
# Load ViT
vit_base = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes=50)
vit_base= vit_base.to(device)
# Create La-Transformer
osprey_model = LATransformerTest(vit_base, lmbd=8).to(device)
# Load LA-Transformer
# name = "la_with_lmbd_8"
# name = "la_with_lmbd_8_12-03"
# save_path = os.path.join('./model',name,'net_best.pth')
name = "oprey_{}".format(8)
output_dir = "model/" + name
save_path = os.path.join(output_dir, "saves", "model_32.pt")
checkpoint = torch.load(save_path)
osprey_model.load_state_dict(checkpoint['model_state_dict'], strict=False)
# # Load LA-Transformer
# name = "old_weights"
# save_path = os.path.join('./model',name,'small_ds_68_map_net_best.pth')
#Transformer\model\old_weights\small_ds_68_map_net_best.pth
# osprey_model.load_state_dict(torch.load(save_path), strict=False)
# model.eval()
transform_query_list = [
transforms.Resize((224,224), interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
transform_gallery_list = [
transforms.Resize(size=(224,224),interpolation=3), #Image.BICUBIC
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
data_transforms = {
'query': transforms.Compose( transform_query_list ),
'gallery': transforms.Compose(transform_gallery_list),
}
```
# Required functions
```
# device = initilize_device("cpu")
# We had to recreate the get_id() func since they assume the pictures are named in a specific manner.
def get_id_padel(img_path):
labels = []
for path, v in img_path:
filename = os.path.basename(path)
label = filename.split('_')[0]
labels.append(int(label))
return labels
def extract_feature(model,dataloaders):
features = torch.FloatTensor()
count = 0
idx = 0
for data in tqdm(dataloaders):
img, label = data
img, label = img.to(device), label.to(device)
output = model(img)
n, c, h, w = img.size()
count += n
features = torch.cat((features, output.detach().cpu()), 0)
idx += 1
return features
def image_loader(data_dir_path):
image_datasets = {}
# data_dir = "data/The_OspreyChallengerSet"
data_dir = data_dir_path
image_datasets['query'] = datasets.ImageFolder(os.path.join(data_dir, 'query'),
data_transforms['query'])
image_datasets['gallery'] = datasets.ImageFolder(os.path.join(data_dir, 'gallery'),
data_transforms['gallery'])
query_loader = DataLoader(dataset = image_datasets['query'], batch_size=batch_size, shuffle=False)
gallery_loader = DataLoader(dataset = image_datasets['gallery'], batch_size=batch_size, shuffle=False)
return query_loader, gallery_loader, image_datasets
def feature_extraction(model, query_loader, gallery_loader):
# Extract Query Features
query_feature = extract_feature(model, query_loader)
# Extract Gallery Features
gallery_feature = extract_feature(model, gallery_loader)
return query_feature, gallery_feature
def get_labels(image_datasets):
#Retrieve labels
gallery_path = image_datasets['gallery'].imgs
query_path = image_datasets['query'].imgs
gallery_label = get_id_padel(gallery_path)
query_label = get_id_padel(query_path)
return gallery_label, query_label
def calc_gelt_feature(query_feature):
concatenated_query_vectors = []
for query in query_feature:
fnorm = torch.norm(query, p=2, dim=1, keepdim=True)*np.sqrt(14)
query_norm = query.div(fnorm.expand_as(query))
concatenated_query_vectors.append(query_norm.view((-1))) # 14*768 -> 10752
return concatenated_query_vectors
def calc_gelt_gallery(gallery_feature):
concatenated_gallery_vectors = []
for gallery in gallery_feature:
fnorm = torch.norm(gallery, p=2, dim=1, keepdim=True) *np.sqrt(14)
gallery_norm = gallery.div(fnorm.expand_as(gallery))
concatenated_gallery_vectors.append(gallery_norm.view((-1))) # 14*768 -> 10752
return concatenated_gallery_vectors
def calc_faiss(concatenated_gallery_vectors, gallery_label):
index = faiss.IndexIDMap(faiss.IndexFlatIP(10752))
index.add_with_ids(np.array([t.numpy() for t in concatenated_gallery_vectors]), np.array(gallery_label).astype('int64')) # original
return index
def search(query: str, k=1):
encoded_query = query.unsqueeze(dim=0).numpy()
top_k = index.search(encoded_query, k)
return top_k
def osprey_detect(data_dir_path, osprey_model):
query_loader, gallery_loader, image_datasets = image_loader(data_dir_path=data_dir_path)
query_feature, gallery_feature = feature_extraction(model=osprey_model, query_loader=query_loader, gallery_loader=gallery_loader)
gallery_label, query_label = get_labels(image_datasets)
concatenated_query_vectors = calc_gelt_feature(query_feature)
concatenated_gallery_vectors = calc_gelt_gallery(gallery_feature)
index = calc_faiss(concatenated_gallery_vectors, gallery_label)
return concatenated_query_vectors, index
concatenated_query_vectors, index = osprey_detect("data/Osprey_eval", osprey_model)
#For each vector in the query vector list
for query in concatenated_query_vectors:
output = search(query)
print(f"Predicted class: {output[1][0][0]} with {output[0][0][0] * 100} % confidence")
##Making new class boy
def predictClass(queryVector):
output = search(queryVector)
print(f"Predicted class: {output[1][0][0]} with {output[0][0][0] * 100} % confidence")
return output[1][0][0]
```
odoijadsoijas
```
#query_loader, gallery_loader, image_datasets = image_loader(data_dir_path="data/The_OspreyChallengerSet")
#load images from folder
query_loader, gallery_loader, image_datasets = image_loader(data_dir_path="data/bim_bam")
#extract features
query_feature, gallery_feature = feature_extraction(model=osprey_model, query_loader=query_loader)
#get labels from pictures
gallery_label, query_label = get_labels(image_datasets)
concatenated_query_vectors = calc_gelt_feature(query_feature)
concatenated_gallery_vectors = calc_gelt_gallery(gallery_feature)
index = calc_faiss(concatenated_gallery_vectors, gallery_label)
rank1_score = 0
rank5_score = 0
rank10_score = 0
ap = 0
count = 0
for query, label in zip(concatenated_query_vectors, query_label):
count += 1
label = label
output = search(query, k=10)
# print(output)
rank1_score += rank1(label, output)
rank5_score += rank5(label, output)
rank10_score += rank10(label, output)
print("Correct: {}, Total: {}, Incorrect: {}".format(rank1_score, count, count-rank1_score), end="\r")
ap += calc_map(label, output)
print("Rank1: {}, Rank5: {}, Rank10: {}, mAP: {}".format(rank1_score/len(query_feature),
rank5_score/len(query_feature),
rank10_score/len(query_feature), ap/len(query_feature)))
```
| github_jupyter |
```
# HIDDEN
from datascience import *
from prob140 import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
%matplotlib inline
import math
from scipy import stats
from scipy import misc
```
### The Chain at a Fixed Time ###
Let $X_0, X_1, X_2, \ldots $ be a Markov Chain with state space $S$. We will start by setting up notation that will help us express our calculations compactly.
For $n \ge 0$, let $P_n$ be the distribution of $X_n$. That is,
$$
P_n(i) = P(X_n = i), ~~~~ i \in S
$$
Then the distribution of $X_0$ is $P_0$. This is called the *initial distribution* of the chain.
For $n \ge 0$ and $j \in S$,
\begin{align*}
P_{n+1}(j) &= P(X_{n+1} = j) \\
&= \sum_{i \in S} P(X_n = i, X_{n+1} = j) \\
&= \sum_{i \in S} P(X_n = i)P(X_{n+1} = j \mid X_n = i) \\
&= \sum_{i \in S} P_n(i)P(X_{n+1} = j \mid X_n = i)
\end{align*}
The conditional probability $P(X_{n+1} = j \mid X_n = i)$ is called a *one-step transition probability at time $n$*.
For many chains such as the random walk, these one-step transition probabilities depend only on the states $i$ and $j$, not on the time $n$. For example, for the random walk,
\begin{equation}
P(X_{n+1} = j \mid X_n = i) =
\begin{cases}
\frac{1}{2} & \text{if } j = i-1 \text{ or } j = i+1 \\
0 & \text{ otherwise}
\end{cases}
\end{equation}
for every $n$. When one-step transition probabilites don't depend on $n$, they are called *stationary* or *time-homogenous*. All the Markov Chains that we will study in this course have time-homogenous transition probabilities.
For such a chain, define the *one-step transition probability*
$$
P(i, j) = P(X_{n+1} = j \mid X_n = i)
$$
### The Probability of a Path ###
Given that the chain starts at $i$, what is the chance that the next three values are of the chain are $j, k$, and $l$, in that order?
We are looking for
$$
P(X_1 = j, X_2 = k, X_3 = l \mid X_0 = i)
$$
By repeated use of the multiplication rule and the Markov property, this is
$$
P(X_1 = j, X_2 = k, X_3 = l \mid X_0 = i) = P(i, j)P(j, k)P(k, l)
$$
In the same way, given that you know the starting point, you can find the probability of any path of finite length by multiplying one-step transition probabilities.
### The Distribution of $X_{n+1}$ ###
By our calculation at the start of this section,
\begin{align*}
P_{n+1}(j) &= P(X_{n+1} = j) \\
&= \sum_{i \in S} P_n(i)P(X_{n+1} = j \mid X_n = i) \\
&= \sum_{i \in S} P_n(i)P(i, j)
\end{align*}
The calculation is based on the straightforward observation that for the chain to be at state $j$ at time $n+1$, it had to be at some state $i$ at time $n$ and then get from $i$ to $j$ in one step.
Let's use all this in examples. You will quickly see that the distribution $P_n$ has interesting properties.
### Lazy Random Walk on a Circle ###
Let the state space be five points arranged on a circle. Suppose the process starts at Point 1, and at each step either stays in place with probability 0.5 (and thus is lazy), or moves to one of the two neighboring points with chance 0.25 each, regardless of the other moves.
This transition behavior can be summed up in a *transition diagram*:

At every step, the next move is determined by a random choice from among three options and by the chain's current location, not on how it got to that location. So the process is a Markov chain. Let's call it $X_0, X_1, X_2, \ldots $.
By our assumption, the initial distribution $P_0$ puts all the probability on Point 1. It is defined in the cell below. We will be using `prob140` Markov Chain methods based on [Pykov](https://github.com/riccardoscalco/Pykov) written by [Riccardo Scalco](http://riccardoscalco.github.io). Note the use of `states` instead of `values`. Please enter the states in ascending order, for technical reasons that we hope to overcome later in the term.
```
s = np.arange(1, 6)
p = [1, 0, 0, 0, 0]
initial = Table().states(s).probability(p)
initial
```
The transition probabilities are:
- For $2 \le i \le 4$, $P(i, i) = 0.5$ and $P(i, i-1) = 0.25 = P(i, i+1)$.
- $P(1, 1) = 0.5$ and $P(1, 5) = 0.25 = P(1, 2)$.
- $P(5, 5) = 0.5$ and $P(5, 4) = 0.25 = P(5, 1)$.
These probabilities are returned by the function `circle_walk_probs` that takes states $i$ and $j$ as its arguments.
```
def circle_walk_probs(i, j):
if i-j == 0:
return 0.5
elif abs(i-j) == 1:
return 0.25
elif abs(i-j) == 4:
return 0.25
else:
return 0
```
All the transition probabilities can be captured in a table, in a process analogous to creating a joint distribution table.
```
trans_tbl = Table().states(s).transition_function(circle_walk_probs)
trans_tbl
```
Just as when we were constructing joint distribution tables, we can better visualize this as a $5 \times 5$ table:
```
circle_walk = trans_tbl.toMarkovChain()
circle_walk
```
This is called the *transition matrix* of the chain.
- For each $i$ and $j$, the $(i, j)$ element of the transition matrix is the one-step transition probability $P(i, j)$.
- For each $i$, the $i$th row of the transition matrix consists of the conditional distribution of $X_{n+1}$ given $X_n = i$.
#### Probability of a Path ####
What's the probability of the path 1, 1, 2, 1, 2? That's the path $X_0 = 1, X_1 = 1, X_2 = 2, X_3 = 1, X_4 = 2$. We know that the chain is starting at 1, so the chance of the path is
$$
1 \cdot P(1, 1)P(1, 2)P(2, 1)P(1, 2) = 0.5 \times 0.25 \times 0.25 \times 0.25 = 0.0078125
$$
The method `prob_of_path` takes the initial distribution and path as its arguments, and returns the probability of the path:
```
circle_walk.prob_of_path(initial, [1, 1, 2, 1, 2])
```
#### Distribution of $X_n$ ####
Remember that the chain starts at 1. So $P_0$, the distribution of $X_0$ is:
```
initial
```
We know that $P_1$ must place probability 0.5 at Point 1 and 0.25 each the points 2 and 5. This is confirmed by the `distribution` method that applies to a MarkovChain object. Its first argument is the initial distribution, and its second is the number of steps $n$. It returns a distribution object that is the distribution of $X_n$.
```
P_1 = circle_walk.distribution(initial, 1)
P_1
```
What's the probability that the chain is has value 3 at time 2? That's $P_2(3)$ which we can calculate by conditioning on $X_1$:
$$
P_2(3) = \sum_{i=1}^5 P_1(i)P(i, 3)
$$
The distribution of $X_1$ is $P_1$, given above. Here are those probabilities in an array:
```
P_1.column('Probability')
```
The `3` column of the transition matrix gives us, for each $i$, the chance of getting from $i$ to 3 in one step.
```
circle_walk.column('3')
```
So the probability that the chain has the value 3 at time 2 is $P_2(3)$ which is equal to:
```
sum(P_1.column('Probability')*circle_walk.column('3'))
```
Similarly, $P_2(2)$ is equal to:
```
sum(P_1.column('Probability')*circle_walk.column('2'))
```
And so on. The `distribution` method finds all these probabilities for us.
```
P_2 = circle_walk.distribution(initial, 2)
P_2
```
At time 3, the chain continues to be much more likely to be at 1, 2, or 5 compared to the other two states. That's because it started at Point 1 and is lazy.
```
P_3 = circle_walk.distribution(initial, 3)
P_3
```
But by time 10, something interesting starts to emerge.
```
P_10 = circle_walk.distribution(initial, 10)
P_10
```
The chain is almost equally likely to be at any of the five states. By time 50, it seems to have completely forgotten where it started, and is distributed uniformly on the state space.
```
P_50 = circle_walk.distribution(initial, 50)
P_50
```
As time passes, this chain gets "all mixed up", regardless of where it started. That is perhaps not surprising as the transition probabilities are symmetric over the five states. Let's see what happens when we cut the circle between Points 1 and 5 and lay it out in a line.
### Reflecting Random Walk ###
The state space and transition probabilities remain the same, except when the chain is at the two "edge" states.
- If the chain is at Point 1, then at the next step it either stays there or moves to Point 2 with equal probability: $P(1, 1) = 0.5 = P(1, 2)$.
- If the chain is at Point 5, then at the next step it either stays there or moves to Point 4 with equal probability: $P(5, 5) = 0.5 = P(5, 4)$.
We say that there is *reflection* at the boundaries 1 and 5.

```
def ref_walk_probs(i, j):
if i-j == 0:
return 0.5
elif 2 <= i <= 4:
if abs(i-j) == 1:
return 0.25
else:
return 0
elif i == 1:
if j == 2:
return 0.5
else:
return 0
elif i == 5:
if j == 4:
return 0.5
else:
return 0
trans_tbl = Table().states(s).transition_function(ref_walk_probs)
refl_walk = trans_tbl.toMarkovChain()
print('Transition Matrix')
refl_walk
```
Let the chain start at Point 1 as it did in the last example. That initial distribution was defined as `initial`. At time 1, therefore, the chain is either at 1 or 2, and at times 2 and 3 it is likely to still be around 1.
```
refl_walk.distribution(initial, 1)
refl_walk.distribution(initial, 3)
```
But by time 20, the distribution is settling down:
```
refl_walk.distribution(initial, 20)
```
And by time 100 it has settled into what is called its *steady state*.
```
refl_walk.distribution(initial, 100)
```
This steady state distribution isn't uniform. But it is steady. If you increase the amount of time for which the chain has run, you get the same distribution for the value of the chain at that time.
That's quite remarkable. In the rest of this chapter, we will look more closely at what's going on.
| github_jupyter |
```
import csv
from pprint import pprint
import random
import numpy as np
alphabet = ['',
'ا', 'ب', 'ت', 'ث','ج','ح', 'خ',
'د','ذ','ر','ز', 'س','ش','ص',
'ض','ط','ظ','ع','غ','ف','ق',
'ك','ل','م','ن','ه','و','ي',
'ء','ى','أ','ؤ']
def xalphabetin(char):
nums = list(char.encode('utf8'))
nums[0] = nums[0] - 216
num = (nums[0] * 256) + nums[1]
return num
def alphabetin(char):
if(char == 'ؤ'):
return 29
if(char == 'أ'):
return 29
if(char == 'ى'):
return 1
if(char == 'ئ'):
return 1
return alphabet.index(char)
def alphabetout(num):
return alphabet[num]
def binin(dcty):
x = np.zeros(20*512) # 20 letters max x (from unicode)
y = np.zeros((4*30) + 1) # 4 letters max y (mapped to alphabet) + 1 "no root" flag
lx = 0 # letter index
for letter in list(dcty['word']):
ix = (lx*512) + xalphabetin(letter)
x[ix] = 1
lx += 1
if dcty['rootsize'] > 0:
y[(0*30) + dcty['root1']] = 1
if dcty['rootsize'] > 1:
y[(1*30) + dcty['root2']] = 1
if dcty['rootsize'] > 2:
y[(2*30) + dcty['root3']] = 1
if dcty['rootsize'] > 3:
y[(3*30) + dcty['root4']] = 1
if dcty['rootsize'] == 0:
y[4*30] = 1 # no root
return np.array([x, y])
def binout(by):
root = ''
if by[120] == 1:
return ''
for yix in range(0, 30):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
for yix in range(30, 60):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
for yix in range(60, 90):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
for yix in range(90, 120):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
if len(list(root)) == 2:
root += root[1]
return root
def transformin(row):
if(len(row[1]) == 0):
# null object
dcty = {
'word': row[0],
'rootsize': 0,
'root1': 0,
'root2': 0,
'root3': 0,
'root4': 0
}
binxy = binin(dcty)
dcty['x'] = binxy[0]
dcty['y'] = binxy[1]
return dcty
rootlist = list(row[1])
rootsize = len(rootlist)
if(len(rootlist) == 2):
rootlist += [rootlist[1]]
rootsize = 3
if(rootlist[2] not in alphabet):
# null object
dcty = {
'word': row[0],
'rootsize': 0,
'root1': 0,
'root2': 0,
'root3': 0,
'root4': 0
}
binxy = binin(dcty)
dcty['x'] = binxy[0]
dcty['y'] = binxy[1]
return dcty
if(len(rootlist) == 3):
rootlist += [""]
dcty = {
'word': row[0],
'rootsize': rootsize,
'root1': alphabetin(rootlist[0]),
'root2': alphabetin(rootlist[1]),
'root3': alphabetin(rootlist[2]),
'root4': alphabetin(rootlist[3])
}
binxy = binin(dcty)
dcty['x'] = binxy[0]
dcty['y'] = binxy[1]
return dcty
def transformout(data):
return [data['word'], alphabetout(data['root1']) + alphabetout(data['root2']) + alphabetout(data['root3']) + alphabetout(data['root4'])]
datain = []
with open('roots-all.csv') as csvfile:
readcsv = csv.reader(csvfile, delimiter=',')
next(readcsv)
for row in readcsv:
data = transformin(row)
if(data == False):
continue
datain += [data]
for i in range(3):
r = random.randint(0,len(datain))
pprint(transformout(datain[r]))
pprint(datain[r])
pprint(binout(datain[r]['y']))
print("\n")
from sklearn.model_selection import train_test_split
X = datain
y = np.array([d['y'] for d in datain])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.025)
DX_train = np.array([d['x'] for d in X_train])
DX_test = np.array([d['x'] for d in X_test])
pprint(np.shape(DX_train))
pprint(np.shape(DX_test))
pprint(np.shape(y_train))
pprint(np.shape(y_test))
from keras.models import Sequential
from keras import regularizers
from keras.layers import Dense
model = Sequential()
model.add(Dense(8000,
input_dim=10240,
kernel_initializer='normal',
activation='sigmoid'))
model.add(Dense(121,
kernel_initializer='normal',
activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(DX_train, y_train, validation_data=(DX_test, y_test), epochs=7, batch_size=200)
loss_and_metrics = model.evaluate(DX_test, y_test, batch_size=128)
pprint(loss_and_metrics)
def ytobin(y):
by = np.zeros(121)
if y[120] == 1:
by[120] == 1
return by
by[np.argmax(y[0:30])] = 1
by[np.argmax(y[30:60]) + 30] = 1
if np.max(y[60:90]) > 0.02:
by[np.argmax(y[60:90]) + 60] = 1
if np.max(y[90:120]) > 0.01:
by[np.argmax(y[90:120]) + 90] = 1
return by
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
score = []
for r in range(len(X_test)):
r_pred = model.predict(DX_test[r:r+1,:])[0]
if binout(ytobin(r_pred)) == transformout(X_test[r])[1]:
print("Correct: " + str(transformout(X_test[r])))
score += [1]
else:
print("Missed: " + str(transformout(X_test[r])) + " Predicted: " + binout(ytobin(r_pred)))
score += [0]
print("Score: " + str(round(100 * (np.sum(score) / len(score)), 1)) + "%")
x1 = np.array([transformin(["أرحام",""])['x']])
r_pred = model.predict([x1])[0]
print(binout(ytobin(r_pred)))
```
| github_jupyter |
# Twitter Sentiment Analysis
```
import twitter
import pandas as pd
import numpy as np
```
### Source
https://towardsdatascience.com/creating-the-twitter-sentiment-analysis-program-in-python-with-naive-bayes-classification-672e5589a7ed
### Authenticating Twitter API
```
# Authenticating our twitter API credentials
twitter_api = twitter.Api(consumer_key='f2ujCRaUnQJy4PoiZvhRQL4n4',
consumer_secret='EjBSQirf7i83T7CX90D5Qxgs9pTdpIGIsVAhHVs5uvd0iAcw5V',
access_token_key='1272989631404015616-5XMQkx65rKfQU87UWAh40cMf4aCzSq',
access_token_secret='emfWcF8fyfqoyywfPCJnz4jXt6DFXfndro59UK9IMAMgy')
# Test authentication to make sure it was successful
print(twitter_api.VerifyCredentials())
```
### Building the Test Set
```
#We first build the test set, consisting of only 100 tweets for simplicity.
#Note that we can only download 180 tweets every 15min.
def buildTestSet(search_keyword):
try:
tweets_fetched = twitter_api.GetSearch(search_keyword, count = 100)
print("Fetched " + str(len(tweets_fetched)) + " tweets for the term " + search_keyword)
return [{"text":status.text, "label":None} for status in tweets_fetched]
except:
print("Unfortunately, something went wrong..")
return None
#Testing out fetching the test set. The function below prints out the first 5 tweets in our test set.
search_term = input("Enter a search keyword:")
testDataSet = buildTestSet(search_term)
print(testDataSet[0:4])
testDataSet[0]
#df = pd.DataFrame(list())
#df.to_csv('tweetDataFile.csv')
```
### Building the Training Set
We will be using a downloadable training set, consisting of 5,000 tweets. These tweets have already been labelled as positive/negative. We use this training set to calculate the posterior probabilities of each word appearing and its respective sentiment.
```
#As Twitter doesn't allow the storage of the tweets on personal drives, we have to create a function to download
#the relevant tweets that will be matched to the Tweet IDs and their labels, which we have.
def buildTrainingSet(corpusFile, tweetDataFile, size):
import csv
import time
count = 0
corpus = []
with open(corpusFile,'r') as csvfile:
lineReader = csv.reader(csvfile,delimiter=',', quotechar="\"")
for row in lineReader:
if count <= size:
corpus.append({"tweet_id":row[2], "label":row[1], "topic":row[0]})
count += 1
else:
break
rate_limit = 180
sleep_time = 900/180
trainingDataSet = []
for tweet in corpus:
try:
status = twitter_api.GetStatus(tweet["tweet_id"])
print("Tweet fetched" + status.text)
tweet["text"] = status.text
trainingDataSet.append(tweet)
time.sleep(sleep_time)
except:
continue
# now we write them to the empty CSV file
with open(tweetDataFile,'w') as csvfile:
linewriter = csv.writer(csvfile,delimiter=',',quotechar="\"")
for tweet in trainingDataSet:
try:
linewriter.writerow([tweet["tweet_id"], tweet["text"], tweet["label"], tweet["topic"]])
except Exception as e:
print(e)
return trainingDataSet
#This function is used to download the actual tweets. It takes hours to run and we only need to run it once
#in order to get all 5,000 training tweets. The 'size' parameter below is the number of tweets that we want to
#download. If 5,000 => set size=5,000
'''
corpusFile = "corpus.csv"
tweetDataFile = "tweetDataFile.csv"
trainingData = buildTrainingSet(corpusFile, tweetDataFile, 5000)
'''
#When this code stops running, we will have a tweetDataFile.csv full of the tweets that we downloaded.
#This line counts the number of tweets and their labels in the Corpus.csv file that we originally downloaded
corp = pd.read_csv("corpus.csv", header = 0 , names = ['topic','label', 'tweet_id'] )
corp['label'].value_counts()
#As a check, we look at the first 5 lines in our new tweetDataFile.csv
trainingData_copied = pd.read_csv("tweetDataFile.csv", header = None, names = ['tweet_id', 'text', 'label', 'topic'])
trainingData_copied.head()
len(trainingData_copied)
#We check the number of tweets by each label in our training set
trainingData_copied['label'].value_counts()
df = trainingData_copied.copy()
lst_labels = df['label'].unique()
count_rows_keep = df['label'].value_counts().min()
neutral_df = df[df['label'] == 'neutral'].sample(n= count_rows_keep , random_state = 3)
irrelevant_df = df[df['label'] == 'irrelevant'].sample(n= count_rows_keep , random_state = 2)
negative_df = df[df['label'] == 'negative'].sample(n= count_rows_keep , random_state = 3)
positive_df = df[df['label'] == 'positive'].sample(n= count_rows_keep , random_state = 3)
lst_df = [neutral_df, irrelevant_df, negative_df, positive_df]
trainingData_copied = pd.concat(lst_df)
trainingData_copied['label'].value_counts()
'''
def oversample(df):
lst_labels = df['label'].unique()
for x in lst_labels:
if len(df[df['label'] == x]) < df['label'].value_counts().max():
df=df.append(df[df['label'] == x]*((df['label'].value_counts().max())/ len(df[df['label'] == 'x'])))
return df
'''
'''
def undersample(df):
lst_labels = df['label'].unique()
for x in lst_labels:
if len(df[df['label'] == 'x']) > df['label'].value_counts().min():
count_rows_keep = df['label'].value_counts().min()
sample = df[df['label'] == 'x'].sample(n= count_rows_keep , random_state = 1)
index_drop = pd.concat([df[df['label'] == 'x'], sample]).drop_duplicates(keep=False).index
df = df.drop(index_drop)
return df
'''
trainingData_copied = trainingData_copied.to_dict('records')
```
### Pre-processing
Here we use the NLTK library to filter for keywords and remove irrelevant words in tweets. We also remove punctuation and things like images (emojis) as they cannot be classified using this model.
```
import re #a library that makes parsing strings and modifying them more efficient
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
import nltk #Natural Processing Toolkit that takes care of any processing that we need to perform on text
#to change its form or extract certain components from it.
#nltk.download('popular') #We need this if certain nltk libraries are not installed.
class PreProcessTweets:
def __init__(self):
self._stopwords = set(stopwords.words('english') + list(punctuation) + ['AT_USER','URL'])
def processTweets(self, list_of_tweets):
processedTweets=[]
for tweet in list_of_tweets:
processedTweets.append((self._processTweet(tweet["text"]),tweet["label"]))
return processedTweets
def _processTweet(self, tweet):
tweet = tweet.lower() # convert text to lower-case
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))', 'URL', tweet) # remove URLs
tweet = re.sub('@[^\s]+', 'AT_USER', tweet) # remove usernames
tweet = re.sub(r'#([^\s]+)', r'\1', tweet) # remove the # in #hashtag
tweet = word_tokenize(tweet) # remove repeated characters (helloooooooo into hello)
return [word for word in tweet if word not in self._stopwords]
#Here we call the function to pre-process both our training and our test set.
tweetProcessor = PreProcessTweets()
preprocessedTrainingSet = tweetProcessor.processTweets(trainingData_copied)
preprocessedTestSet = tweetProcessor.processTweets(testDataSet)
```
### Building the Naive Bayes Classifier
We apply a classifier based on Bayes' Theorem, hence the name. It allows us to find the posterior probability of an event occuring (in this case that event being the sentiment- positive/neutral or negative) is reliant on another probabilistic background that we know.
The posterior probability is calculated as follows:
$P(A|B) = \frac{P(B|A)\times P(A)}{P(B)}$
The final sentiment is assigned based on the highest probability of the tweet falling in each one.
#### To read more about Bayes Classifier in the context of classification:
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
### Build the vocabulary
```
#Here we attempt to build a vocabulary (a list of words) of all words present in the training set.
import nltk
def buildVocabulary(preprocessedTrainingData):
all_words = []
for (words, sentiment) in preprocessedTrainingData:
all_words.extend(words)
wordlist = nltk.FreqDist(all_words)
word_features = wordlist.keys()
return word_features
#This function generates a list of all words (all_words) and then turns it into a frequency distribution (wordlist)
#The word_features is a list of distinct words, with the key being the frequency of each one.
```
### Matching tweets against our vocabulary
Here we go through all the words in the training set (i.e. our word_features list), comparing every word against the tweet at hand, associating a number with the word following:
label 1 (true): if word in vocabulary occurs in tweet
label 0 (false): if word in vocabulary does not occur in tweet
```
def extract_features(tweet):
tweet_words = set(tweet)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in tweet_words)
return features
```
### Building our feature vector
```
word_features = buildVocabulary(preprocessedTrainingSet)
trainingFeatures = nltk.classify.apply_features(extract_features, preprocessedTrainingSet)
```
This feature vector shows if a particular tweet contains a certain word out of all the words present in the corpus in the training data + label (positive, negative or neutral) of the tweet.
We will input the feature vector in the Naive Bayes Classifier, which will calculate the posterior probability given the prior probability that a randomly chosen observation is associated with a certain label, and the likelihood of the outcome/label given the presence of this word (density function of X that comes for observation that comes from the k class/label)
### Train the Naives Bayes Classifier
```
#This line trains our Bayes Classifier
NBayesClassifier = nltk.NaiveBayesClassifier.train(trainingFeatures)
```
## Test Classifier
```
#We now run the classifier and test it on 100 tweets previously downloaded in the test set, on our specified keyword.
NBResultLabels = [NBayesClassifier.classify(extract_features(tweet[0])) for tweet in preprocessedTestSet]
# get the majority vote
if NBResultLabels.count('positive') > NBResultLabels.count('negative'):
print("Overall Positive Sentiment")
print("Positive Sentiment Percentage = " + str(100*NBResultLabels.count('positive')/len(NBResultLabels)) + "%")
else:
print("Overall Negative Sentiment")
print("Negative Sentiment Percentage = " + str(100*NBResultLabels.count('negative')/len(NBResultLabels)) + "%")
print("Positive Sentiment Percentage = " + str(100*NBResultLabels.count('positive')/len(NBResultLabels)) + "%")
print("Number of negative comments = " + str(NBResultLabels.count('negative')))
print("Number of positive comments = " + str(NBResultLabels.count('positive')))
print("Number of neutral comments = " + str(NBResultLabels.count('neutral')))
print("Number of irrelevant comments = " + str(NBResultLabels.count('irrelevant')))
len(preprocessedTestSet)
import plotly.graph_objects as go
sentiment = ["Negative","Positive","Neutral", "Irrelevant"]
fig = go.Figure([go.Bar(x=sentiment, y=[str(NBResultLabels.count('negative')), str(NBResultLabels.count('positive')), str(NBResultLabels.count('neutral')), str(NBResultLabels.count('irrelevant'))])])
fig.update_layout(title_text='Sentiment Results for Specific Keyword')
fig.update_layout(template = 'simple_white',
title_text='Twitter Sentiment Results',
yaxis=dict(
title='Percentage (%)',
titlefont_size=16,
tickfont_size=14,) ,
)
fig.show()
```
### TBC:
- Retrieve tweets about keyword, not from keyword (username)
| github_jupyter |
```
#yds data mapping 2012 (eastbound and westbound)
```
<h4>This script is to map 2012 galway traffic data (bridge 1)</h4>
```
#python list to store csv data as mapping suggest
#Site No Dataset Survey Company Client Project Reference Method of Survey Address Latitude Longtitude Easting Northing Date From Date To Time From Time To Observations Weather Junction Type Vehicle Type Direction Count
#Site No,Dataset,Survey Company,Client,Project Reference,Method of Survey,Address,Latitude,Longtitude,Easting,Northing,Date From,Date To,Time From,Time To,Observations,Weather,Junction Type,Vehicle Type,Direction,Count
header=["Site No","Dataset","Survey Company","Client","Project Reference","Method of Survey","Address","Latitude","Longtitude","Easting","Northing",
"Date From","Date To","Time From","Time To","Observations","Weather","Junction Type","Vehicle Type","Direction","Count"]
full_data_template = ["","Galway 2016 Br. 1","Idaso Ltd.","Galway City Council","2016 Annual Survey","JTC","Quincentenary Bridge",53.282696,-9.06065,495956.4,5903720.6,"","","","","Nothing to report","Sunny and generally dry but there were some light showers","Link","","",""]
data_template = ["","Galway 2012 Br. 1","","Galway City Council","","","Quincentenary Bridge",53.282696,-9.06065,495956.4,5903720.6,"","","","","","","Link","","",""]
directions_alphabet = ["", "", "", "", "", "", "A TO F", "A TO E", "A TO D", "A TO C", "A TO B", "A TO A", "B TO A", "B TO F", "B TO E", "B TO D", "B TO C", "B TO B", "C TO B", "C TO A", "C TO F", "C TO E", "C TO D", "C TO C", "D TO C", "D TO B", "D TO A", "D TO F", "D TO E", "D TO D", "E TO D", "E TO C", "E TO B", "E TO A", "E TO F", "E TO E", "F TO E", "F TO D", "F TO C", "F TO B", "F TO A", "F TO F"]
outputfile_name="data/2012/mapped-final/bridge1_2012_eastbound_verified.csv"
vich_type = ["Motorcycles","Cars","LGV","HGV","Buses"]
directions = ["Westbound","Eastbound"]
counts_in_rows = [3,5,7,9,11]
#times_hourly = ["00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00","08:00","08:00","09:00","10:00","11:00"]
#Read csv file data row by row
#this file wil only fill sections (0,11,12,13,14,19,20,21)
import csv
with open('data/2012/refined/Br1_Eastbound_2012.csv', 'rb') as source:
#write data again acoording to the schema
#import csv
with open(outputfile_name, 'w+') as output:
csv_sourcereader = csv.reader(source, delimiter=',', quotechar='\"')
outputwriter = csv.writer(output, delimiter=',', quotechar='\"')
#putting the header
outputwriter.writerow(header)
#counter to scape file headers
c = 0
#list to get all possible readings
quinque_data = []
#csv reader object to list
sourcereader = list(csv_sourcereader)
for r in xrange (0,len(sourcereader)):
#print ', '.join(row)
print sourcereader[r]
import copy
#lget both possible directions (A-B, B-A)
#data_A_B = copy.deepcopy(data_template)
#data_B_A = copy.deepcopy(data_template)
data = copy.deepcopy(data_template)
#print data
if c > 1 :
for x in xrange(0,5):
#a-b
#data_A_B[0]=row[0] # Site NO
#data_A_B[11]=row[2] # date from
#data_A_B[12]=row[2] # date to
#data_A_B[13]=row[3] # time from
#data_A_B[14]=row[4] # time to
#data_A_B[18]=row[5] # Vehicle Type
#b-a
#data_B_A[0]=row[0] # Site NO
#data_B_A[11]=row[2] # date from
#data_B_A[12]=row[2] # date to
#data_B_A[13]=row[3] # time from
#data_B_A[14]=row[4] # time to
#data_B_A[18]=row[5] # Vehicle Type
data[0]="" # Site NO
data[11]=sourcereader[r][0] # date from
data[12]=sourcereader[r][0] # date to
data[13]="\'"+str(sourcereader[r][1]) # time from
#last one to avoid index out range
if sourcereader[r][1] != "23:00":
data[14]="\'"+str(sourcereader[r+1][1]) # time to
elif sourcereader[r][1] == "23:00":
data[14]="\'24:00" # time to
data[18]=vich_type[x] # Vehicle Type
data[19]=sourcereader[r][13] # direction
data[20]=sourcereader[r][counts_in_rows[x]] # count
#appending data row to the 5 rows batch
quinque_data.append(copy.deepcopy(data))
for data_row in quinque_data:
outputwriter.writerow(data_row)
c = c + 1
#print data
#del data_B_A [:]
#del data_A_B[:]
del data[:]
del quinque_data [:]
```
| github_jupyter |
## Data Preperation for the first Model
Welcome to the first notebook. Here we'll process the data from downloading to what we will be using to train our first model - **'Wh’re Art Thee Min’ral?'**.
The steps we'll be following here are:
- Downloading the SARIG Geochem Data Package. **(~350 Mb)**
- Understanding the data columns in our csv of interest.
- Cleaning and applying some processing.
- Saving our processed file into a csv.
- _And seeing some unnecessary memes in between_.
You can upload this notebook and run it on colab or on Jupyter-Notebook locally.
```
# import the required package - Pandas
import pandas as pd
```
You can simply download the data by clicking the link [here](https://unearthed-exploresa.s3-ap-southeast-2.amazonaws.com/Unearthed_5_SARIG_Data_Package.zip). You can also download it by simply running the cell down below.
We recommed you to use **Google Colab** and download it here itself if you have a poor internet connection.

Colab has a decent internet speed of around **~15-20 Mb/s** which is more than enough for the download.
```
# You can simply download the data by running this cell
!wget https://unearthed-exploresa.s3-ap-southeast-2.amazonaws.com/Unearthed_5_SARIG_Data_Package.zip
```
Here for extracting, if you wish to use the download file for a later use, than you can first mount your google drive and then extracting the files there. You can read more about mounting Google Drive to colab [here](https://towardsdatascience.com/downloading-datasets-into-google-drive-via-google-colab-bcb1b30b0166).
***Note** - One of the files is really big (~10 Gb) and so it might take some time to extract as well. *Don't think that it's stuck!*
```
# Let's first create a directory to extract the downloaded zip file.
!mkdir 'GeoChemData'
# Now let's unzip the files into the data directory that we created.
!unzip 'Unearthed_5_SARIG_Data_Package.zip' -d 'GeoChemData/'
# Read the df_details.csv
# We use unicode_escape as the encoding to avoid etf-8 error.
df_details = pd.read_csv('/content/GeoChemData/SARIG_Data_Package3_Exported06072020/sarig_dh_details_exp.csv', encoding= 'unicode_escape')
# Let's view the first few columns
df_details.head()
# Data Column Information
df_details.info()
```
### What columns do we need?
We only need the following three columns from this dataframe ->
- `LONGITUDE_GDA94`: This is the longitude of the mine/mineral location in **EPSG:4283** Co-ordinate Referencing System (CRS).
- `LATITUDE_GDA94`: This is the latitude of the mine/mineral location in **EPSG:4283** Co-ordinate Referencing System (CRS).
- `MINERAL_CLASS`: Mineral Class is a column containing **two unique values (Y/N)** representing if there is any mineralization or not.
> *Note - We are using GDA94 over GDA20 because of the former's standardness.* You can understand more about it our glossary's page [here]().
```
# Here the only relevant data we need is the location and the Mineral Class (Yes/No)
df_final = df_details[['LONGITUDE_GDA94','LATITUDE_GDA94', 'MINERAL_CLASS']]
# Drop the rows with null values
df_final = df_final.dropna()
# Lets print out a few rows of the new dataframe.
df_final.head()
# Let's check the data points in both classes
print("Number of rows with Mineral Class Yes is", len(df_final.query('MINERAL_CLASS=="Y"')))
print("Number of rows with Mineral Class No is", len(df_final.query('MINERAL_CLASS=="N"')))
```
The Total Number of rows in the new dataset is **147407 (Y) + 174436 (N) = 321843** which is quite sufficient for training our models over it.
Also the ratio of Class `'Y'` to Class `'N'` is 1 : 0.8 which is quite _**balanced**_.

Now that we have our csv, let's go ahead and save our progress into a new csv before the session expires!

```
# Create a new directory to save the csv.
!mkdir 'GeoChemData/exported'
# Convert the dataframe into a new csv file.
df_final.to_csv('GeoChemData/mod1_unsampled.csv')
# Finally if you are on google colab, you can simply download using ->
from google.colab import files
files.download('GeoChemData/exported/mod1_vectors.csv')
```
| github_jupyter |
## Hyperopt
### Iris 数据集
在本节中,我们将介绍4个使用hyperopt在经典数据集 Iris 上调参的完整示例。我们将涵盖 K 近邻(KNN),支持向量机(SVM),决策树和随机森林。
对于这项任务,我们将使用经典的Iris数据集,并进行一些有监督的机器学习。数据集有有4个输入特征和3个输出类别。数据被标记为属于类别0,1或2,其映射到不同种类的鸢尾花。输入有4列:萼片长度,萼片宽度,花瓣长度和花瓣宽度。输入的单位是厘米。我们将使用这4个特征来学习模型,预测三种输出类别之一。因为数据由sklearn提供,它有一个很好的DESCR属性,可以提供有关数据集的详细信息。尝试以下代码以获得更多细节信息
```
from sklearn import datasets
iris = datasets.load_iris()
print(iris.feature_names) # input names
print(iris.target_names) # output names
print(iris.DESCR) # everything else
```
### K-means
我们现在将使用hyperopt来找到 K近邻(KNN)机器学习模型的最佳参数。KNN 模型是基于训练数据集中 k 个最近数据点的大多数类别对来自测试集的数据点进行分类。
```
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import matplotlib.pyplot as plt
import numpy as np, pandas as pd
from math import *
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
# 数据集导入
iris = datasets.load_iris()
X = iris.data
y = iris.target
# 损失函数
def hyperopt_train_test(params):
clf = KNeighborsClassifier(**params)
return cross_val_score(clf, X, y).mean()
# hp.choice(label, options) 其中options应是 python 列表或元组
# space4nn就是需要输入到损失函数里面的参数
space4knn = {
'n_neighbors': hp.choice('n_neighbors', range(1,100))
}
# 定义目标函数
def f(params):
acc = hyperopt_train_test(params)
return {'loss': -acc, 'status': STATUS_OK}
# Trials对象允许我们在每个时间步存储信息
trials = Trials()
# 函数fmin首先接受一个函数来最小化,algo参数指定搜索算法,最大评估次数max_evals
best = fmin(f, space4knn, algo=tpe.suggest, max_evals=100, trials=trials)
print('best:',best)
print('trials:')
for trial in trials.trials[:2]:
print(trial)
```
现在让我们看看输出结果的图。y轴是交叉验证分数,x轴是 k 近邻个数。下面是代码和它的图像:
```
f, ax = plt.subplots(1) #, figsize=(10,10))
xs = [t['misc']['vals']['n_neighbors'] for t in trials.trials]
ys = [-t['result']['loss'] for t in trials.trials]
ax.scatter(xs, ys, s=20, linewidth=0.01, alpha=0.5)
ax.set_title('Iris Dataset - KNN', fontsize=18)
ax.set_xlabel('n_neighbors', fontsize=12)
ax.set_ylabel('cross validation accuracy', fontsize=12)
```
k大于63后,精度会急剧下降。 这是由于数据集中每个类只有50个实例。 因此,让我们通过将“ n_neighbors”的值限制为较小的值来进行深入研究。
```
def hyperopt_train_test(params):
clf = KNeighborsClassifier(**params)
return cross_val_score(clf, X, y).mean()
space4knn = {
'n_neighbors': hp.choice('n_neighbors', range(1,50))
}
def f(params):
acc = hyperopt_train_test(params)
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space4knn, algo=tpe.suggest, max_evals=100, trials=trials)
print ('best:')
print (best)
f, ax = plt.subplots(1) #, figsize=(10,10))
xs = [t['misc']['vals']['n_neighbors'] for t in trials.trials]
ys = [-t['result']['loss'] for t in trials.trials]
ax.scatter(xs, ys, s=20, linewidth=0.01, alpha=0.5)
ax.set_title('Iris Dataset - KNN', fontsize=18)
ax.set_xlabel('n_neighbors', fontsize=12)
ax.set_ylabel('cross validation accuracy', fontsize=12)
```
上面的模型没有做任何预处理。所以我们来归一化和缩放特征,看看是否有帮助。用如下代码:
```
# 归一化和缩放特征
from sklearn.preprocessing import normalize, scale
iris = datasets.load_iris()
X = iris.data
y = iris.target
def hyperopt_train_test(params):
X_ = X[:]
if 'normalize' in params:
if params['normalize'] == 1:
X_ = normalize(X_)
del params['normalize']
if 'scale' in params:
if params['scale'] == 1:
X_ = scale(X_)
del params['scale']
clf = KNeighborsClassifier(**params)
return cross_val_score(clf, X_, y).mean()
space4knn = {
'n_neighbors': hp.choice('n_neighbors', range(1,50)),
'scale': hp.choice('scale', [0, 1]),
'normalize': hp.choice('normalize', [0, 1])
}
def f(params):
acc = hyperopt_train_test(params)
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space4knn, algo=tpe.suggest, max_evals=100, trials=trials)
print('best:',best)
```
绘制参数
```
parameters = ['n_neighbors', 'scale', 'normalize']
cols = len(parameters)
f, axes = plt.subplots(nrows=1, ncols=cols, figsize=(15,5))
cmap = plt.cm.jet
for i, val in enumerate(parameters):
xs = np.array([t['misc']['vals'][val] for t in trials.trials]).ravel()
ys = [-t['result']['loss'] for t in trials.trials]
xs, ys = zip(*sorted(zip(xs, ys)))
ys = np.array(ys)
axes[i].scatter(xs, ys, s=20, linewidth=0.01, alpha=0.75, c=cmap(float(i)/len(parameters)))
```
### 支持向量机(SVM)
由于这是一个分类任务,我们将使用sklearn的SVC类。代码如下:
```
from sklearn.svm import SVC
def hyperopt_train_test(params):
X_ = X[:]
if 'normalize' in params:
if params['normalize'] == 1:
X_ = normalize(X_)
del params['normalize']
if 'scale' in params:
if params['scale'] == 1:
X_ = scale(X_)
del params['scale']
clf = SVC(**params)
return cross_val_score(clf, X_, y).mean()
# SVM模型有两个非常重要的参数C与gamma。其中 C是惩罚系数,即对误差的宽容度。
# c越高,说明越不能容忍出现误差,容易过拟合。C越小,容易欠拟合。C过大或过小,泛化能力变差
space4svm = {
'C': hp.uniform('C', 0, 20),
'kernel': hp.choice('kernel', ['linear', 'sigmoid', 'poly', 'rbf']),
'gamma': hp.uniform('gamma', 0, 20),
'scale': hp.choice('scale', [0, 1]),
'normalize': hp.choice('normalize', [0, 1])
}
def f(params):
acc = hyperopt_train_test(params)
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space4svm, algo=tpe.suggest, max_evals=100, trials=trials)
print('best:',best)
```
同样,缩放和规范化也无济于事。 核函数的最佳选择是(线性核),最佳C值为1.4168540399911616,最佳gamma为15.04230279483486。 这组参数的分类精度为99.3%。
```
parameters = ['C', 'kernel', 'gamma', 'scale', 'normalize']
cols = len(parameters)
f, axes = plt.subplots(nrows=1, ncols=cols, figsize=(20,5))
cmap = plt.cm.jet
for i, val in enumerate(parameters):
xs = np.array([t['misc']['vals'][val] for t in trials.trials]).ravel()
ys = [-t['result']['loss'] for t in trials.trials]
xs, ys = zip(*sorted(zip(xs, ys)))
axes[i].scatter(xs, ys, s=20, linewidth=0.01, alpha=0.25, c=cmap(float(i)/len(parameters)))
axes[i].set_title(val)
axes[i].set_ylim([0.9, 1.0])
```
### 决策树
我们将尝试只优化决策树的一些参数,码如下。
```
from sklearn.tree import DecisionTreeClassifier
def hyperopt_train_test(params):
X_ = X[:]
if 'normalize' in params:
if params['normalize'] == 1:
X_ = normalize(X_)
del params['normalize']
if 'scale' in params:
if params['scale'] == 1:
X_ = scale(X_)
del params['scale']
clf = DecisionTreeClassifier(**params)
return cross_val_score(clf, X, y).mean()
space4dt = {
'max_depth': hp.choice('max_depth', range(1,20)),
'max_features': hp.choice('max_features', range(1,5)),
'criterion': hp.choice('criterion', ["gini", "entropy"]),
'scale': hp.choice('scale', [0, 1]),
'normalize': hp.choice('normalize', [0, 1])
}
def f(params):
acc = hyperopt_train_test(params)
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space4dt, algo=tpe.suggest, max_evals=100, trials=trials)
print('best:',best)
```
### Random Forests
让我们看看 ensemble 的分类器 随机森林,它只是一组决策树的集合。
```
from sklearn.ensemble import RandomForestClassifier
def hyperopt_train_test(params):
X_ = X[:]
if 'normalize' in params:
if params['normalize'] == 1:
X_ = normalize(X_)
del params['normalize']
if 'scale' in params:
if params['scale'] == 1:
X_ = scale(X_)
del params['scale']
clf = RandomForestClassifier(**params)
return cross_val_score(clf, X, y).mean()
space4rf = {
'max_depth': hp.choice('max_depth', range(1,20)),
'max_features': hp.choice('max_features', range(1,5)),
'n_estimators': hp.choice('n_estimators', range(1,20)),
'criterion': hp.choice('criterion', ["gini", "entropy"]),
'scale': hp.choice('scale', [0, 1]),
'normalize': hp.choice('normalize', [0, 1])
}
best = 0
def f(params):
global best
acc = hyperopt_train_test(params)
if acc > best:
best = acc
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space4rf, algo=tpe.suggest, max_evals=100, trials=trials)
print('best:')
print(best)
```
同样的我们得到 97.3 % 的正确率 , 和decision tree 的结果一致.
### All Together Now
一次自动调整一个模型的参数(例如,SVM或KNN)既有趣又有启发性,但如果一次调整所有模型参数并最终获得最佳模型更为有用。 这使我们能够一次比较所有模型和所有参数,从而为我们提供最佳模型。
```
from sklearn.naive_bayes import BernoulliNB
def hyperopt_train_test(params):
t = params['type']
del params['type']
if t == 'naive_bayes':
clf = BernoulliNB(**params)
elif t == 'svm':
clf = SVC(**params)
elif t == 'dtree':
clf = DecisionTreeClassifier(**params)
elif t == 'knn':
clf = KNeighborsClassifier(**params)
else:
return 0
return cross_val_score(clf, X, y).mean()
space = hp.choice('classifier_type', [
{
'type': 'naive_bayes',
'alpha': hp.uniform('alpha', 0.0, 2.0)
},
{
'type': 'svm',
'C': hp.uniform('C', 0, 10.0),
'kernel': hp.choice('kernel', ['linear', 'rbf']),
'gamma': hp.uniform('gamma', 0, 20.0)
},
{
'type': 'randomforest',
'max_depth': hp.choice('max_depth', range(1,20)),
'max_features': hp.choice('max_features', range(1,5)),
'n_estimators': hp.choice('n_estimators', range(1,20)),
'criterion': hp.choice('criterion', ["gini", "entropy"]),
'scale': hp.choice('scale', [0, 1]),
'normalize': hp.choice('normalize', [0, 1])
},
{
'type': 'knn',
'n_neighbors': hp.choice('knn_n_neighbors', range(1,50))
}
])
count = 0
best = 0
def f(params):
global best, count
count += 1
acc = hyperopt_train_test(params.copy())
if acc > best:
print ('new best:', acc, 'using', params['type'])
best = acc
if count % 50 == 0:
print ('iters:', count, ', acc:', acc, 'using', params)
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
best = fmin(f, space, algo=tpe.suggest, max_evals=50, trials=trials)
print('best:')
print(best)
```
| github_jupyter |
# Black-Scholes Algorithm Using Numba-dppy
## Sections
- [Black Sholes algorithm](#Black-Sholes-algorithm)
- _Code:_ [Implementation of Black Scholes targeting CPU using Numba JIT](#Implementation-of-Black-Scholes-targeting-CPU-using-Numba-JIT)
- _Code:_ [Implementation of Black Scholes targeting GPU using Kernels](#Implementation-of-Black-Scholes-targeting-GPU-using-Kernels)
- _Code:_ [Implementation of Black Scholes targeting GPU using Numpy](#Implementation-of-Black-Scholes-targeting-GPU-using-Numpy)
## Learning Objectives
* Build a Numba implementation of Black Scholes targeting CPU and GPU using Numba Jit
* Build a Numba-DPPY implementation of Black Scholes on CPU and GPU using Kernel approach
* Build a Numba-DPPY implementation of Black Scholes on GPU using Numpy approach
## numba-dppy
Numba-dppy is a standalone extension to the Numba JIT compiler that adds SYCL programming capabilities to Numba. Numba-dppy is packaged as part of the IDP that comes with oneAPI base toolkit, and you don’t need to install any specific Conda packages. The support for SYCL is via DPC++'s SYCL runtime and other SYCL compilers are not supported by Numba-dppy.
## Black Sholes algorithm
The Black-Scholes program computes the price of a portfolio of options using partial differential equations. The entire computation performed by Black-Scholes is data-parallel, where each option can be priced independent of other options.
The Black-Scholes Model is one of the most important concepts in modern quantitative finance theory. Developed in 1973 by Fisher Black, Robert Merton, and Myron Scholes; it is still widely used today, and regarded as one of the best ways to determine fair prices of financial derivatives.
### Implementation of Black-Scholes Formula
The Black-Scholes formula is used widely in almost every aspect of quantitative finance. The Black-Scholes calculation has essentially permeated every quantitative finance library by traders and quantitative analysts alike.
Let’s look at a hypothetic situation in which a firm has to calculate European options for millions of financial instruments. For each instrument, it has current price, strike price, and option expiration time. For each set of these data, it makes several thousand Black-Scholes calculations, much like the way options of neighboring stock prices, strike prices, and different option expiration times were calculated.
# Implementation of Black Scholes targeting CPU using Numba JIT
In the following example, we introduce a naive Black-Sholes implementation that targets a CPU using the Numba JIT, where we calculate the Black-Sholes formula as described:
This is the decorator-based approach, where we offload data parallel code sections like parallel-for, and certain NumPy function calls. With the decorator method, a programmer needs to simply identify the most time-consuming parts of the program. If those parts can be parallelized, the programmer needs to just annotate those sections using Numba-DPPy, and can expect those code sections to execute on a GPU.
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_jit_cpu.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf
import numba as nb
from math import log, sqrt, exp, erf
# blackscholes implemented as a parallel loop using numba.prange
@nb.njit(parallel=True, fastmath=True)
def black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
for i in nb.prange(nopt):
P = price[i]
S = strike[i]
T = t[i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[i] = r
put[i] = r - P + Se
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to CPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf.get_device_selector()):
black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put)
# call the run function to setup input data and performance data infrastructure
base_bs_erf.run("Numba@jit-loop-par", black_scholes)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_jit_cpu.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_jit_cpu.sh; else ./run_black_sholes_jit_cpu.sh; fi
```
# Implementation of Black Scholes targeting GPU using Numba JIT
In the below example we introduce to a Naive Blacksholes implementation that targets a GPU using the Numba Jit where we calculate the blacksholes formula as described above.
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_jit_gpu.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf_gpu
import numba as nb
from math import log, sqrt, exp, erf
# blackscholes implemented as a parallel loop using numba.prange
@nb.njit(parallel=True, fastmath=True)
def black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
for i in nb.prange(nopt):
P = price[i]
S = strike[i]
T = t[i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[i] = r
put[i] = r - P + Se
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to GPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf_gpu.get_device_selector()):
black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put)
# call the run function to setup input data and performance data infrastructure
base_bs_erf_gpu.run("Numba@jit-loop-par", black_scholes)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_jit_gpu.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_jit_gpu.sh; else ./run_black_sholes_jit_gpu.sh; fi
```
# Implementation of Black Scholes targeting GPU using Kernels
## Writing Explicit Kernels in numba-dppy
Writing a SYCL kernel using the `@numba_dppy.kernel` decorator has similar syntax to writing OpenCL kernels. As such, the numba-dppy module provides similar indexing and other functions as OpenCL. The indexing functions supported inside a `numba_dppy.kernel` are:
* numba_dppy.get_local_id : Gets the local ID of the item
* numba_dppy.get_local_size: Gets the local work group size of the device
* numba_dppy.get_group_id : Gets the group ID of the item
* numba_dppy.get_num_groups: Gets the number of gropus in a worksgroup
Refer https://intelpython.github.io/numba-dppy/latest/user_guides/kernel_programming_guide/index.html for more details.
In the following example we use dppy-kernel approach for explicit kernel programming where if the programmer wants to extract further performance from the offloaded code, the programmer can use the explicit kernel programming approach using dppy-kernels and tune the GPU parameterswhere we take advantage of the workgroups and the workitems in a device using the kernel approach
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_kernel.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf_gpu
import numba_dppy
from math import log, sqrt, exp, erf
# blackscholes implemented using dppy.kernel
@numba_dppy.kernel(
access_types={"read_only": ["price", "strike", "t"], "write_only": ["call", "put"]}
)
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
i = numba_dppy.get_global_id(0)
P = price[i]
S = strike[i]
T = t[i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[i] = r
put[i] = r - P + Se
def black_scholes_driver(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to GPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf_gpu.get_device_selector()):
black_scholes[nopt, numba_dppy.DEFAULT_LOCAL_SIZE](
nopt, price, strike, t, rate, vol, call, put
)
# call the run function to setup input data and performance data infrastructure
base_bs_erf_gpu.run("Numba@jit-loop-par", black_scholes_driver)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_kernel.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_kernel.sh; else ./run_black_sholes_kernel.sh; fi
```
## Implementation of Black Scholes targeting GPU using Numpy
In the following example, we can observe the Black Scholes NumPy implementation and we target the GPU using the NumPy approach.
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_numpy_graph.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf_graph
import numba as nb
import numpy as np
from numpy import log, exp, sqrt
from math import erf
# Numba does know erf function from numpy or scipy
@nb.vectorize(nopython=True)
def nberf(x):
return erf(x)
# blackscholes implemented using numpy function calls
@nb.jit(nopython=True, parallel=True, fastmath=True)
def black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
P = price
S = strike
T = t
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * nberf(w1)
d2 = 0.5 + 0.5 * nberf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[:] = r # temporary `r` is necessary for faster `put` computation
put[:] = r - P + Se
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to GPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf_graph.get_device_selector()):
black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put)
# call the run function to setup input data and performance data infrastructure
base_bs_erf_graph.run("Numba@jit-numpy", black_scholes)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_numpy_graph.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_numpy_graph.sh; else ./run_black_sholes_numpy_graph.sh; fi
```
# Plot GPU Results
The algorithm below is detecting Calls and Puts verses Current price for a strike price in range 23 to 25 and plots the results in a graph as shown below.
### View the results
Select the cell below and click run ▶ to view the graph:
```
from matplotlib import pyplot as plt
import numpy as np
def read_dictionary(fn):
import pickle
# Load data (deserialize)
with open(fn, 'rb') as handle:
dictionary = pickle.load(handle)
return dictionary
resultsDict = read_dictionary('resultsDict.pkl')
limit = 10
call = resultsDict['call']
put = resultsDict['put']
price = resultsDict['price']
strike = resultsDict['strike']
plt.style.use('dark_background')
priceRange = [23.0, 23.5]
# strikeIndex = np.where((price >= priceRange[0]) & (price < priceRange[1]) )[0]
# plt.scatter(strike[strikeIndex], put[strikeIndex], c= 'r', s = 2, alpha = 1, label = 'puts')
# plt.scatter(strike[strikeIndex], call[strikeIndex], c= 'b', s = 2, alpha = 1, label = 'calls')
# plt.title('Calls and Puts verses Strike for a current price in range {}'.format(priceRange))
# plt.ylabel('Option Price [$]')
# plt.xlabel('Strike Price [$]')
# plt.legend()
# plt.grid()
strikeRange = [23.0, 23.5]
strikeIndex = np.where((strike >= strikeRange[0]) & (strike < strikeRange[1]) )[0]
plt.scatter(price[strikeIndex], put[strikeIndex], c= 'r', s = 2, alpha = 1, label = 'puts')
plt.scatter(price[strikeIndex], call[strikeIndex], c= 'b', s = 2, alpha = 1, label = 'calls')
plt.title('Calls and Puts verses Current price for a strike price in range {}'.format(priceRange))
plt.ylabel('Option Price [$]')
plt.xlabel('Current Price [$]')
plt.legend()
plt.grid()
```
_If the Jupyter cells are not responsive or if they error out when you compile the code samples, please restart the Jupyter Kernel:
"Kernel->Restart Kernel and Clear All Outputs" and compile the code samples again__
## Summary
In this module you will have learned the following:
* Numba implementation of Black Scholes targeting a CPU and GPU using Numba JIT
* Numba-DPPY implementation of Black Scholes on a CPU and GPU using the kernel approach
* Numba-DPPY implementation of Black Scholes on a GPU using Numpy approach
| github_jupyter |
```
# Pandas for managing datasets
import pandas as pd
# seaborn for plotting and styling
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# read dataset
tips = sns.load_dataset("tips")
# a preview of the data
tips.head()
# make a copy of the data to create the graphs of
df = tips.copy()
df
# create a column to determine tip percentage
df["tip_percentage"] = df["tip"] / df["total_bill"]
# This plot is a histogram of tip percentages
# The hue argument allows the color to be changed to reflect the categories
sns.histplot(x='tip_percentage', binwidth = 0.05, hue = 'sex', data = df)
# Scatterplot of total bill and tip
# This shows how you can set the style to change the visual style
# The default relplot is a scatterplot
sns.set(style = 'darkgrid')
sns.relplot( x = 'total_bill', y = 'tip', hue = 'smoker', data = df)
# Scatterplot Gender
# This scatterplot is the same with the addition of the size argument
# The size argument is time here
sns.set(style = 'darkgrid')
gender = sns.relplot( x = 'total_bill', y = 'tip', hue = 'sex', size = 'time', data = df)
# Catplot is for categorical data
# The default catplot is a strip plot
sns.catplot(x = 'day', y = 'total_bill', data = df)
# This catplot shows that with the addition of the kind argument,
# we can alter it to another cat plot, in this case, a barplot
sns.catplot(x = 'time', y = 'total_bill', data= df, kind='bar')
# A violin plot is another way of visualizing categorical data
sns.violinplot(x = 'day', y = 'total_bill', hue = 'sex', data = df)
# This violoin plot shows the same data above
# With different arguments, different visuals are created
# Here we set bw to 0.25 and split to True
sns.violinplot(x = 'day', y = 'total_bill', hue = 'sex', bw = .25, split = True, data = df)
# This shows how we can alter the color palette of a violin plot
sns.violinplot(x = 'day', y = 'total_bill', hue = 'sex', bw = .25, split = True, palette = 'Greens', data = df)
# Pairplots allow visualization of many distributions at once
# Seaborn determines the visualizations and the variables to create
# This allows the user to quickly view distributions very easily
sns.set_theme(style="ticks")
sns.pairplot(df, hue='sex')
# This swarm plot is similar to a strip plot but does not allow points to overlap
# The style is whitegrid
sns.swarmplot(y='total_bill', x = 'day', data = df)
sns.set_style('whitegrid')
# Seaborn can also create heatmaps
# This heatmap shows correlation between variables
sns.heatmap(df.corr(), annot = True, cmap = 'viridis')
# This heatmap requires creation of a pivot table
# This shows that Seaborn can work with pivot tables
pivot = df.pivot_table(index = ['day'], columns =['size'], values = 'tip_percentage', aggfunc = np.average)
sns.heatmap(pivot)
# This plot shows Seaborn's ability to create side by side visuals
# The col argument allows for this
pal = dict(Male='#6495ED', Female = '#F08080')
g = sns.lmplot(x='total_bill', y = 'tip_percentage', col = 'sex', hue='sex', data =df,
palette=pal, y_jitter=.02, logistic = True, truncate = True)
# This plot is an example of how you can overlay visualizations
# This is a boxplot with a stripplot on top
sns.stripplot(x='tip', y = 'day', data = df, jitter = True, dodge = True, linewidth=1,
edgecolor = 'gray', palette = 'gray')
colors = ['#78C850', '#F08030', '#6890F0','#F8D030']
sns.boxplot(x='tip', y='day',data = df, fliersize=0, palette = colors)
```
| github_jupyter |
TSG097 - Get BDC stateful sets (Kubernetes)
===========================================
Description
-----------
Steps
-----
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("tsg097-get-statefulsets.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
```
### Get the Kubernetes namespace for the big data cluster
Get the namespace of the Big Data Cluster use the kubectl command line
interface .
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA\_NAMESPACE, before starting
Azure Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
```
### Run kubectl to display the Stateful sets
```
run(f"kubectl get statefulset -n {namespace} -o wide")
print('Notebook execution complete.')
```
| github_jupyter |
```
## Importing the libraries
import pandas as pd
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from scipy.sparse import hstack
from sklearn.metrics import f1_score
train = pd.read_csv('trainl.csv')
test = pd.read_csv('testl.csv')
print(train.shape); print(test.shape)
## This is typical sentiment Analysis problem.
# Customer Tweets related to tech firms who are manufacturers of mobiles, laptops are given to us.
# The task is to determine tweets which have negative sentiments towards such companies or products.
train.label.value_counts() #Most of the tweets have positive sentiments.
# train.isna().sum()
## Clearly there are no missing values.
## Data Preprocessing
## Not using deep learning models using simple ml algorithm - Logistic Regression.
# And so we will simply use frequency based embeddings loke tfidf or count vectorizer.
def clean_text(text):
# firstly put all the texts in lower cases
text = text.lower()
text = text.replace('$&@*#', 'bakwas')
text = text.replace('f**k', 'fuck')
text = text.replace('@$$hole', 'asshole')
text = text.replace('f#%*king', 'fucking')
text = text.replace(':@', 'bakwas')
return text
train['tweet']=train['tweet'].apply(lambda x: clean_text(x))
test['tweet']=test['tweet'].apply(lambda x: clean_text(x))
## Since twitter ID can be '@' followed by some alphanumeric we need to remove them.
# Because they are just ID's and will play any role in determining the sentiments.
def remove_user(text):
r = re.findall('@[\w]*', text)
for i in r:
text = re.sub(i, '', text)
return text
train.tweet = train.tweet.apply(lambda x: remove_user(x))
test.tweet = test.tweet.apply(lambda x: remove_user(x))
## Similarly there are many URL's which we need to remove as they wont play any role in sentiments.
def remove_url(text):
text = re.sub('(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?', '', text)
return text
train.tweet = train.tweet.apply(lambda x: remove_url(x))
test.tweet = test.tweet.apply(lambda x: remove_url(x))
## Now we will split our training data into train and validation so that we can do proper regularisation.
X_train, X_valid, y_train, y_valid = train_test_split(train['tweet'], train['label'], test_size = 0.1,
random_state=12)
## Part1 -- using count vectoriser and Naive Bayes Algorithm.
vect = CountVectorizer().fit(X_train)
X_train_vectorized = vect.transform(X_train)
model = MultinomialNB(alpha = 0.0925)
model.fit(X_train_vectorized, y_train)
predictions = model.predict(vect.transform(X_valid))
## Clearly our submissions are evaluated on the basis of F1Score
print(f1_score(y_valid, predictions))
## Part2 -- using tfidf vectorizer and Naive Bayes Algorithm.
tfvect = TfidfVectorizer().fit(X_train)
X_train_vectorized = tfvect.transform(X_train)
model = MultinomialNB(alpha = 0.0955)
model.fit(X_train_vectorized, y_train)
predictions = model.predict(tfvect.transform(X_valid))
print(f1_score(y_valid, predictions))
## Part3 -- using count vectoriser and Logistic Regression Algorithm.
vect = CountVectorizer(min_df=2, ngram_range=(1,3)).fit(X_train)
X_train_vectorized = vect.transform(X_train)
model = LogisticRegression(C = 1.6, solver = 'sag')
model.fit(X_train_vectorized, y_train)
predictions = model.predict(vect.transform(X_valid))
print(f1_score(y_valid, predictions))
## Part4 -- using tfidf vectorizer and Logistic Regression Algorithm.
## Word Level tf idf vectorizer.
text = pd.concat([train.tweet, test.tweet])
Tfword_vectorizer = TfidfVectorizer(sublinear_tf=True,strip_accents='unicode',analyzer='word',ngram_range=(1, 3),max_features=10000).fit(text)
word_train_vectorized = Tfword_vectorizer.transform(X_train)
word_valid_vectorized = Tfword_vectorizer.transform(X_valid)
word_test_vectorized = Tfword_vectorizer.transform(test.tweet)
## Character level tf idf vectoriser.
Tfchar_vectorizer = TfidfVectorizer(sublinear_tf=True,strip_accents='unicode',analyzer='char',ngram_range=(1, 15),max_features=50000).fit(text)
char_train_vectorized = Tfchar_vectorizer.transform(X_train)
char_valid_vectorized = Tfchar_vectorizer.transform(X_valid)
char_test_vectorized = Tfchar_vectorizer.transform(test.tweet)
## Horizontally stacking the tf idf vectorizers.
train_features = hstack([char_train_vectorized, word_train_vectorized])
valid_features = hstack([char_valid_vectorized, word_valid_vectorized])
test_features = hstack([char_test_vectorized, word_test_vectorized])
model = LogisticRegression(max_iter=300,C=2.0,solver='sag')
model.fit(train_features, y_train)
predictions = model.predict(valid_features)
pred_y = model.predict(test_features)
print(f1_score(y_valid, predictions))
```
| github_jupyter |
## Trajectory equations:
```
%matplotlib inline
import matplotlib.pyplot as plt
from sympy import *
init_printing()
Bx, By, Bz, B = symbols("B_x, B_y, B_z, B")
x, y, z = symbols("x, y, z" )
x_0, y_0, z_0 = symbols("x_0, y_0, z_0")
vx, vy, vz, v = symbols("v_x, v_y, v_z, v")
vx_0, vy_0, vz_0 = symbols("v_x0, v_y0, v_z0")
t = symbols("t")
q, m = symbols("q, m")
c, eps0 = symbols("c, epsilon_0")
```
The equation of motion:
$$
\begin{gather*}
m \frac{d^2 \vec{r} }{dt^2} = \frac{q}{c} [ \vec{v} \vec{B} ]
\end{gather*}
$$
For the case of a uniform magnetic field along the $z$-axis:
$$ \vec{B} = B_z = B, \quad B_x = 0, \quad B_y = 0 $$
In Cortesian coordinates:
```
eq_x = Eq( Derivative(x(t), t, 2), q / c / m * Bz * Derivative(y(t),t) )
eq_y = Eq( Derivative(y(t), t, 2), - q / c / m * Bz * Derivative(x(t),t) )
eq_z = Eq( Derivative(z(t), t, 2), 0 )
display( eq_x, eq_y, eq_z )
```
Motion is uniform along the $z$-axis:
```
z_eq = dsolve( eq_z, z(t) )
vz_eq = Eq( z_eq.lhs.diff(t), z_eq.rhs.diff(t) )
display( z_eq, vz_eq )
```
The constants of integration can be found from the initial conditions $z(0) = z_0$ and $v_z(0) = v_{z0}$:
```
c1_c2_system = []
initial_cond_subs = [(t, 0), (z(0), z_0), (diff(z(t),t).subs(t,0), vz_0) ]
c1_c2_system.append( z_eq.subs( initial_cond_subs ) )
c1_c2_system.append( vz_eq.subs( initial_cond_subs ) )
c1, c2 = symbols("C1, C2")
c1_c2 = solve( c1_c2_system, [c1, c2] )
c1_c2
```
So that
```
z_sol = z_eq.subs( c1_c2 )
vz_sol = vz_eq.subs( c1_c2 ).subs( [( diff(z(t),t), vz(t) ) ] )
display( z_sol, vz_sol )
```
For some reason I have not been able to solve the system of differential equations for $x$ and $y$ directly
with Sympy's `dsolve` function:
```
#dsolve( [eq_x, eq_y], [x(t),y(t)] )
```
It is necessary to resort to the manual solution. The method is to differentiate one of them over
time and substitute the other. This will result in oscillator-type second-order equations for $v_y$ and $v_x$. Their solution is known. Integrating one more time, it is possible to obtain laws of motion $x(t)$ and $y(t)$.
```
v_subs = [ (Derivative(x(t),t), vx(t)), (Derivative(y(t),t), vy(t)) ]
eq_vx = eq_x.subs( v_subs )
eq_vy = eq_y.subs( v_subs )
display( eq_vx, eq_vy )
eq_d2t_vx = Eq( diff(eq_vx.lhs,t), diff(eq_vx.rhs,t))
eq_d2t_vx = eq_d2t_vx.subs( [(eq_vy.lhs, eq_vy.rhs)] )
display( eq_d2t_vx )
```
The solution of the last equation is
```
C1, C2, Omega = symbols( "C1, C2, Omega" )
vx_eq = Eq( vx(t), C1 * cos( Omega * t ) + C2 * sin( Omega * t ))
display( vx_eq )
omega_eq = Eq( Omega, Bz * q / c / m )
display( omega_eq )
```
where $\Omega$ is a cyclotron frequency.
```
display( vx_eq )
vy_eq = Eq( vy(t), solve( Eq( diff(vx_eq.rhs,t), eq_vx.rhs ), ( vy(t) ) )[0] )
vy_eq = vy_eq.subs( [(Omega*c*m / Bz / q, omega_eq.rhs * c * m / Bz / q)]).simplify()
display( vy_eq )
```
For initial conditions $v_x(0) = v_{x0}, v_y(0) = v_{y0}$:
```
initial_cond_subs = [(t,0), (vx(0), vx_0), (vy(0), vy_0) ]
vx0_eq = vx_eq.subs( initial_cond_subs )
vy0_eq = vy_eq.subs( initial_cond_subs )
display( vx0_eq, vy0_eq )
c1_c2 = solve( [vx0_eq, vy0_eq] )
c1_c2_subs = [ ("C1", c1_c2[c1]), ("C2", c1_c2[c2]) ]
vx_eq = vx_eq.subs( c1_c2_subs )
vy_eq = vy_eq.subs( c1_c2_subs )
display( vx_eq, vy_eq )
```
These equations can be integrated to obtain the laws of motion:
```
x_eq = vx_eq.subs( vx(t), diff(x(t),t))
x_eq = dsolve( x_eq )
y_eq = vy_eq.subs( vy(t), diff(y(t),t))
y_eq = dsolve( y_eq ).subs( C1, C2 )
display( x_eq, y_eq )
```
For nonzero $\Omega$:
```
x_eq = x_eq.subs( [(Omega, 123)] ).subs( [(123, Omega)] ).subs( [(Rational(1,123), 1/Omega)] )
y_eq = y_eq.subs( [(Omega, 123)] ).subs( [(123, Omega)] ).subs( [(Rational(1,123), 1/Omega)] )
display( x_eq, y_eq )
```
For initial conditions $x(0) = x_0, y(0) = y_0$:
```
initial_cond_subs = [(t,0), (x(0), x_0), (y(0), y_0) ]
x0_eq = x_eq.subs( initial_cond_subs )
y0_eq = y_eq.subs( initial_cond_subs )
display( x0_eq, y0_eq )
c1_c2 = solve( [x0_eq, y0_eq] )
c1_c2_subs = [ ("C1", c1_c2[0][c1]), ("C2", c1_c2[0][c2]) ]
x_eq = x_eq.subs( c1_c2_subs )
y_eq = y_eq.subs( c1_c2_subs )
display( x_eq, y_eq )
x_eq = x_eq.simplify()
y_eq = y_eq.simplify()
x_eq = x_eq.expand().collect(Omega)
y_eq = y_eq.expand().collect(Omega)
display( x_eq, y_eq )
```
Finally
```
display( x_eq, y_eq, z_sol )
display( vx_eq, vy_eq, vz_sol )
display( omega_eq )
```
| github_jupyter |
# 人力规划
等级:高级
## 目的和先决条件
此模型是人员编制问题的一个示例。在人员编制计划问题中,必须在招聘,培训,裁员(裁员)和安排工时方面做出选择。人员配备问题在制造业和服务业广泛存在。
### What You Will Learn
In this example, we will model and solve a manpower planning problem. We have three types of workers with different skills levels. For each year in the planning horizon, the forecasted number of required workers with specific skills is given. It is possible to recruit new people, train workers to improve their skills, or shift them to a part-time working arrangement. The aim is to create an optimal multi-period operation plan that achieves one of the following two objectives: minimizing the total number of layoffs over the whole horizon or minimizing total costs.
More information on this type of model can be found in example #5 of the fifth edition of Model Building in Mathematical Programming, by H. Paul Williams on pages 256-257 and 303-304.
This modeling example is at the advanced level, where we assume that you know Python and the Gurobi Python API and that you have advanced knowledge of building mathematical optimization models. Typically, the objective function and/or constraints of these examples are complex or require advanced features of the Gurobi Python API.
**Note:** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=CommercialDataScience) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=AcademicDataScience) as an *academic user*.
---
## Problem Description
A company is changing how it runs its business, and therefore its staffing needs are expected to change.
Through the purchase of new machinery, it is expected that there will be less need for unskilled labor and more need for skilled and semi-skilled labor. In addition, a lower sales forecast — driven by an economic slowdown that is predicted to happen in the next year — is expected to further reduce labor needs across all categories.
The forecast for labor needs over the next three years is as follows:
| <i></i> | Unskilled | Semi-skilled | Skilled |
| --- | --- | --- | --- |
| Current Strength | 2000 | 1500 | 1000 |
| Year 1 | 1000 | 1400 | 1000 |
| Year 2 | 500 | 2000 | 1500 |
| Year 3 | 0 | 2500 | 2000 |
The company needs to determine the following for each of the next three years:
- Recruitment
- Retraining
- Layoffs (redundancy)
- Part-time vs. full-time employees
It is important to note that labor is subject to a certain level of natural attrition each year. The rate of attrition is relatively high in the first year after a new employee is hired and relatively low in subsequent years. The expected attrition rates are as follows:
| <i></i> | Unskilled (%)| Semi-skilled (%) | Skilled (%) |
| --- | --- | --- | --- |
| $< 1$ year of service | 25 | 20 | 10 |
| $\geq 1$ year of service | 10 | 5 | 5 |
All of the current workers have been with the company for at least one year.
### Recruitment
Each year, it is possible to hire a limited number of employees in each classification from outside the company as follows:
| Unskilled | Semi-skilled | Skilled |
| --- | --- | --- |
| 500 | 800 | 500 |
### Retraining
Each year, it is possible to train up to 200 unskilled workers to make them into semi-skilled workers. This training costs the company $\$400$ per worker.
In addition, it is possible train semi-skilled workers to make them into skilled workers. However, this number can not exceed 25% of the current skilled labor force and this training costs $\$500$ per worker.
Lastly, downgrading workers to a lower skill level can be done. However, 50% of the downgraded workers will leave the company, increasing the natural attrition rate described above.
### Layoffs
Each laid-off worker is entitled to a separation payment at the rate of $\$200$ per unskilled worker and $\$500$ per semi-skilled or skilled worker.
### Excess Employees
It is possible to have workers in excess of the actual number needed, up to 150 workers in total in any given year, but this will result in the following additional cost per excess employee per year.
| Unskilled | Semi-skilled | Skilled |
| --- | --- | --- |
| $\$1500$ | $\$2000$ | $\$3000$ |
### Part-time Workers
Up to 50 employees of each skill level can be assigned to part-time work. The cost of doing so (per employee, per year) is as follows:
| Unskilled | Semi-skilled | Skilled |
| --- | --- | --- |
| $\$500$ | $\$400$ | $\$400$ |
**Note:** A part-time employee is half as productive as a full-time employee.
If the company’s objective is to minimize layoffs, what plan should they adopt in order to do this?
If their objective is to minimize costs, how much could they further reduce costs?
How can they determine the annual savings possible across each job?
---
## Model Formulation
### Sets and Indices
$t \in \text{Years}=\{1,2,3\}$: Set of years.
$s \in \text{Skills}=\{s_1: \text{unskilled},s_2: \text{semi_skilled},s_3: \text{skilled}\}$: Set of skills.
### Parameters
$\text{rookie_attrition} \in [0,1] \subset \mathbb{R}^+$: Percentage of workers who leave within the first year of service.
$\text{veteran_attrition} \in [0,1] \subset \mathbb{R}^+$: Percentage of workers who leave after the first year of service.
$\text{demoted_attrition} \in [0,1] \subset \mathbb{R}^+$: Percentage of workers who leave the company after a demotion.
$\text{parttime_cap} \in [0,1] \subset \mathbb{R}^+$: Productivity of part-time workers with respect to full-time workers.
$\text{max_train_unskilled} \in \mathbb{N}$: Maximum number of unskilled workers that can be trained on any given year.
$\text{max_train_semiskilled} \in [0,1] \subset \mathbb{R}^+$: Maximum proportion of semi-skilled workers (w.r.t. skilled ones) that can be trained on any given year.
$\text{max_parttime} \in \mathbb{N}$: Maximum number of part-time workers of each skill at any given year.
$\text{max_overmanning} \in \mathbb{N}$: Maximum number of overmanned workers at any given year.
$\text{max_hiring}_s \in \mathbb{N}$: Maximum number of workers of skill $s$ that can be hired any given year.
$\text{training_cost}_s \in \mathbb{R}^+$: Cost for training a worker of skill $s$ to the next level.
$\text{layoff_cost}_s \in \mathbb{R}^+$: Cost for laying off a worker of skill $s$.
$\text{parttime_cost}_s \in \mathbb{R}^+$: Cost for assigning a worker of skill $s$ to part-time work.
$\text{overmanning_cost}_s \in \mathbb{R}^+$: Yearly cost for having excess manpower of skill $s$.
$\text{curr_workforce}_s \in \mathbb{N}$: Current manpower of skill $s$ at the beginning of the planning horizon.
$\text{demand}_{t,s} \in \mathbb{N}$: Required manpower of skill $s$ in year $t$.
### Decision Variables
$\text{hire}_{t,s} \in [0,\text{max_hiring}_s] \subset \mathbb{R}^+$: Number of workers of skill $s$ to hire in year $t$.
$\text{part_time}_{t,s} \in [0,\text{max_parttime}] \subset \mathbb{R}^+$: Number of part-time workers of skill $s$ working in year $t$.
$\text{workforce}_{t,s} \in \mathbb{R}^+$: Number of workers of skill $s$ that are available in year $t$.
$\text{layoff}_{t,s} \in \mathbb{R}^+$: Number of workers of skill $s$ that are laid off in year $t$.
$\text{excess}_{t,s} \in \mathbb{R}^+$: Number of workers of skill $s$ that are overmanned in year $t$.
$\text{train}_{t,s,s'} \in \mathbb{R}^+$: Number of workers of skill $s$ to retrain to skill $s'$ in year $t$.
### Objective Function
- **Layoffs:** Minimize the total layoffs during the planning horizon.
\begin{equation}
\text{Minimize} \quad Z = \sum_{t \in \text{Years}}\sum_{s \in \text{Skills}}{\text{layoff}_{t,s}}
\end{equation}
- **Cost:** Minimize the total cost (in USD) incurred by training, overmanning, part-time workers, and layoffs in the planning horizon.
\begin{equation}
\text{Minimize} \quad W = \sum_{t \in \text{Years}}{\{\text{training_cost}_{s_1}*\text{train}_{t,s1,s2} + \text{training_cost}_{s_2}*\text{train}_{t,s2,s3}\}}
\end{equation}
\begin{equation}
+ \sum_{t \in \text{Years}}\sum_{s \in \text{Skills}}{\{\text{parttime_cost}*\text{part_time}_{t,s} + \text{layoff_cost}_s*\text{layoff}_{t,s} + \text{overmanning_cost}_s*\text{excess}_{t,s}\}}
\end{equation}
### Constraints
- **Initial Balance:** Workforce $s$ available in year $t=1$ is equal to the workforce of the previous year, recent hires, promoted and demoted workers (after accounting for attrition), minus layoffs and transferred workers.
\begin{equation}
\text{workforce}_{1,s} = (1-\text{veteran_attrition}_s)*\text{curr_workforce} + (1-\text{rookie_attrition}_s)*\text{hire}_{1,s}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' < s}{\{(1-\text{veteran_attrition})*\text{train}_{1,s',s} - \text{train}_{1,s,s'}\}}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' > s}{\{(1-\text{demoted_attrition})*\text{train}_{1,s',s} - \text{train}_{1,s,s'}\}} - \text{layoff}_{1,s} \qquad \forall s \in \text{Skills}
\end{equation}
- **Balance:** Workforce $s$ available in year $t > 1$ is equal to the workforce of the previous year, recent hires, promoted and demoted workers (after accounting for attrition), minus layoffs and transferred workers.
\begin{equation}
\text{workforce}_{t,s} = (1-\text{veteran_attrition}_s)*\text{workforce}_{t-1,s} + (1-\text{rookie_attrition}_s)*\text{hire}_{t,s}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' < s}{\{(1-\text{veteran_attrition})*\text{train}_{t,s',s} - \text{train}_{t,s,s'}\}}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' > s}{\{(1-\text{demotion_attrition})*\text{train}_{t,s',s} - \text{train}_{t,s,s'}\}} - \text{layoff}_{t,s} \quad \forall (t > 1,s) \in \text{Years} \times \text{Skills}
\end{equation}
- **Unskilled Training:** Unskilled workers trained in year $t$ cannot exceed the maximum allowance. Unskilled workers cannot be immediately transformed into skilled workers.
\begin{equation}
\text{train}_{t,s_1,s_2} \leq 200 \quad \forall t \in \text{Years}
\end{equation}
\begin{equation}
\text{train}_{t,s_1,s_3} = 0 \quad \forall t \in \text{Years}
\end{equation}
- **Semi-skilled Training:** Semi-skilled workers trained in year $t$ cannot exceed the maximum allowance.
\begin{equation}
\text{train}_{t,s_2,s_3} \leq 0.25*\text{available}_{t,s_3} \quad \forall t \in \text{Years}
\end{equation}
- **Overmanning:** Excess workers in year $t$ cannot exceed the maximum allowance.
\begin{equation}
\sum_{s \in \text{Skills}}{\text{excess}_{t,s}} \leq \text{max_overmanning} \quad \forall t \in \text{Years}
\end{equation}
- **Demand:** Workforce $s$ available in year $t$ equals the required number of workers plus the excess workers and the part-time workers.
\begin{equation}
\text{available}_{t,s} = \text{demand}_{t,s} + \text{excess}_{t,s} + \text{parttime_cap}*\text{part_time}_{t,s} \quad \forall (t,s) \in \text{Years} \times \text{Skills}
\end{equation}
---
## Python Implementation
We import the Gurobi Python Module and other Python libraries.
```
import gurobipy as gp
import numpy as np
import pandas as pd
from gurobipy import GRB
# tested with Python 3.7.0 & Gurobi 9.0
```
## Input Data
We define all the input data of the model.
```
# Parameters
years = [1, 2, 3]
skills = ['s1', 's2', 's3']
curr_workforce = {'s1': 2000, 's2': 1500, 's3': 1000}
demand = {
(1, 's1'): 1000,
(1, 's2'): 1400,
(1, 's3'): 1000,
(2, 's1'): 500,
(2, 's2'): 2000,
(2, 's3'): 1500,
(3, 's1'): 0,
(3, 's2'): 2500,
(3, 's3'): 2000
}
rookie_attrition = {'s1': 0.25, 's2': 0.20, 's3': 0.10}
veteran_attrition = {'s1': 0.10, 's2': 0.05, 's3': 0.05}
demoted_attrition = 0.50
max_hiring = {
(1, 's1'): 500,
(1, 's2'): 800,
(1, 's3'): 500,
(2, 's1'): 500,
(2, 's2'): 800,
(2, 's3'): 500,
(3, 's1'): 500,
(3, 's2'): 800,
(3, 's3'): 500
}
max_overmanning = 150
max_parttime = 50
parttime_cap = 0.50
max_train_unskilled = 200
max_train_semiskilled = 0.25
training_cost = {'s1': 400, 's2': 500}
layoff_cost = {'s1': 200, 's2': 500, 's3': 500}
parttime_cost = {'s1': 500, 's2': 400, 's3': 400}
overmanning_cost = {'s1': 1500, 's2': 2000, 's3': 3000}
```
## Model Deployment
We create a model and the variables. For each of the three skill levels and for each year, we will create variables for the number of workers that get recruited, transferred into part-time work, are available as workers, are redundant, or are overmanned. For each pair of skill levels and each year, we have a variable for the amount of workers that get retrained to a higher/lower skill level. The number of people who are part-time and can be recruited is limited.
```
manpower = gp.Model('Manpower planning')
hire = manpower.addVars(years, skills, ub=max_hiring, name="Hire")
part_time = manpower.addVars(years, skills, ub=max_parttime,
name="Part_time")
workforce = manpower.addVars(years, skills, name="Available")
layoff = manpower.addVars(years, skills, name="Layoff")
excess = manpower.addVars(years, skills, name="Overmanned")
train = manpower.addVars(years, skills, skills, name="Train")
```
Next, we insert the constraints. The balance constraints ensure that per skill level and per year the workers who are currently required (LaborForce) and the people who get laid off, and the people who get retrained to the current level, minus the people who get retrained from the current level to a different skill, equals the LaborForce of the last year (or the CurrentStrength in the first year) plus the recruited people. A certain amount of people leave the company each year, so this is also considered to be a factor. This constraint describes the change in the total amount of employed workers.
```
#1.1 & 1.2 Balance
Balance = manpower.addConstrs(
(workforce[year, level] == (1-veteran_attrition[level])*(curr_workforce[level] if year == 1 else workforce[year-1, level])
+ (1-rookie_attrition[level])*hire[year, level] + gp.quicksum((1- veteran_attrition[level])* train[year, level2, level]
-train[year, level, level2] for level2 in skills if level2 < level)
+ gp.quicksum((1- demoted_attrition)* train[year, level2, level] -train[year, level, level2] for level2 in skills if level2 > level)
- layoff[year, level] for year in years for level in skills), "Balance")
```
The Unskilled training constraints force that per year only 200 workers can be retrained from Unskilled to Semi-skilled due to capacity limitations. Also, no one can be trained in one year from Unskilled to Skilled.
```
#2.1 & 2.2 Unskilled training
UnskilledTrain1 = manpower.addConstrs((train[year, 's1', 's2'] <= max_train_unskilled for year in years), "Unskilled_training1")
UnskilledTrain2 = manpower.addConstrs((train[year, 's1', 's3'] == 0 for year in years), "Unskilled_training2")
```
The Semi-skilled training states that the retraining of Semi-skilled workers to skilled workers is limited to no more than one quarter of the skilled labor force at this time. This is due to capacity limitations.
```
#3. Semi-skilled training
SemiskilledTrain = manpower.addConstrs((train[year,'s2', 's3'] <= max_train_semiskilled * workforce[year,'s3'] for year in years), "Semiskilled_training")
```
The overmanning constraints ensure that the total overmanning over all skill levels in one year is no more than 150.
```
#4. Overmanning
Overmanning = manpower.addConstrs((excess.sum(year, '*') <= max_overmanning for year in years), "Overmanning")
```
The demand constraints ensure that the number of workers of each level and year equals the required number of workers plus the Overmanned workers and the number of workers who are working part-time.
```
#5. Demand
Demand = manpower.addConstrs((workforce[year, level] ==
demand[year,level] + excess[year, level] + parttime_cap * part_time[year, level]
for year in years for level in skills), "Requirements")
```
The first objective is to minimize the total number of laid off workers. This can be stated as:
```
#0.1 Objective Function: Minimize layoffs
obj1 = layoff.sum()
manpower.setObjective(obj1, GRB.MINIMIZE)
```
The second alternative objective is to minimize the total cost of all employed workers and costs for retraining:
```
obj2 = quicksum((training_cost[level]*train[year, level, skills[skills.index(level)+1]] if level < 's3' else 0)
+ layoff_cost[level]*layoff[year, level]
+ parttime_cost[level]*part_time[year, level]
+ overmanning_cost[level] * excess[year, level] for year in years for level in skills)
```
Next we start the optimization with the objective function of minimizing layoffs, and Gurobi finds the optimal solution.
```
manpower.optimize()
```
## Analysis
The minimum number of layoffs is 841.80. The optimal policies to achieve this minimum number of layoffs are given below.
### Hiring Plan
This plan determines the number of new workers to hire at each year of the planning horizon (rows) and each skill level (columns). For example, at year 2 we are going to hire 649.3 Semi-skilled workers.
```
rows = years.copy()
columns = skills.copy()
hire_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in hire.keys():
if (abs(hire[year, level].x) > 1e-6):
hire_plan.loc[year, level] = np.round(hire[year, level].x, 1)
hire_plan
```
### Training and Demotions Plan
This plan defines the number of workers to promote by training (or demote) at each year of the planning horizon. For example, in year 1 we are going to demote 168.4 skilled (s3) workers to the level of semi-skilled (s2).
```
rows = years.copy()
columns = ['{0} to {1}'.format(level1, level2) for level1 in skills for level2 in skills if level1 != level2]
train_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level1, level2 in train.keys():
col = '{0} to {1}'.format(level1, level2)
if (abs(train[year, level1, level2].x) > 1e-6):
train_plan.loc[year, col] = np.round(train[year, level1, level2].x, 1)
train_plan
```
### Layoffs Plan
This plan determines the number of workers to layoff of each skill level at each year of the planning horizon. For example, we are going to layoff 232.5 Unskilled workers in year 3.
```
rows = years.copy()
columns = skills.copy()
layoff_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in layoff.keys():
if (abs(layoff[year, level].x) > 1e-6):
layoff_plan.loc[year, level] = np.round(layoff[year, level].x, 1)
layoff_plan
```
### Part-time Plan
This plan defines the number of part-time workers of each skill level working at each year of the planning horizon. For example, in year 1, we have 50 part-time skilled workers.
```
rows = years.copy()
columns = skills.copy()
parttime_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in part_time.keys():
if (abs(part_time[year, level].x) > 1e-6):
parttime_plan.loc[year, level] = np.round(part_time[year, level].x, 1)
parttime_plan
```
### Overmanning Plan
This plan determines the number of excess workers of each skill level working at each year of the planning horizon. For example, we have 150 Unskilled excess workers in year 3.
```
rows = years.copy()
columns = skills.copy()
excess_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in excess.keys():
if (abs(excess[year, level].x) > 1e-6):
excess_plan.loc[year, level] = np.round(excess[year, level].x, 1)
excess_plan
```
By minimizing the cost instead, we could implement policies that would cost $\$498,677.29$ over the three-year period and result in 1,423.7 layoffs. Alternative optimal solutions could be considered to reduce layoffs without increasing cost. If we minimize costs instead of layoffs, we can save $\$942,712.51$ at the expense of 581.9 additional layoffs. Thus, the cost of saving each job, when minimizing layoffs, could be regarded as $\$1,620.06$.
**Note:** If you want to write your solution to a file, rather than print it to the terminal, you can use the model.write() command. An example implementation is:
`manpower.write("manpower-planning-output.sol")`
---
## References
H. Paul Williams, Model Building in Mathematical Programming, fifth edition.
Copyright © 2020 Gurobi Optimization, LLC
| github_jupyter |
```
import os, sys
from LossJLearn.utils.plot import show_prediction_face_comparison, show_linear_point, show_regressor_linear
from LossJLearn.datasets import load_linear_data
import numpy as np
from matplotlib import pyplot as plt
import sklearn
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, Lasso, SGDRegressor
from sklearn.preprocessing import PolynomialFeatures, StandardScaler, Normalizer
from sklearn.datasets import fetch_olivetti_faces, fetch_california_housing, load_diabetes
from sklearn.model_selection import train_test_split
print("python version: ", sys.version_info)
print(sklearn.__name__, sklearn.__version__)
```
## 1. 基础回归
### 1.1 线性回归
#### 1.1.1 sklearn.linear_model.LinearRegression
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression
```
X_data, y_data = load_linear_data(point_count=500, max_=10, w=3.2412, b=-5.2941, random_state=10834)
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, random_state=19332)
rgs = LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None)
rgs.fit(X_train, y_train)
rgs.coef_, rgs.intercept_
rgs.score(X_test, y_test)
show_regressor_linear(X_test, y_test, rgs.coef_, rgs.intercept_)
```
##### 正规化Normalizer
每个样本求范数,再用每个特征除以范数
```
norm = Normalizer(norm="l2", copy=True)
X_train_norm = norm.fit_transform(X_train)
X_test_norm = norm.transform(X_test)
rgs = LinearRegression()
rgs.fit(X_train_norm, y_train)
rgs.coef_, rgs.intercept_
rgs.score(X_test_norm, y_test)
X_train_norm[:10], X_test_norm[:10]
X_train[:5]
rgs = LinearRegression(fit_intercept=True,
normalize=True, # bool. fit_intercept为True才生效。 如果为True,则将在回归之前通过减去均值并除以12范数来对回归变量X进行归一化。
copy_X=False,
n_jobs=None)
rgs.fit(X_train, y_train)
X_train[:5]
X_test[:5]
rgs.score(X_test, y_test)
X_test[:5]
rgs.coef_, rgs.intercept_
%%timeit
rgs = LinearRegression(n_jobs=2)
rgs.fit(X_train, y_train)
%%timeit
rgs = LinearRegression(n_jobs=-1)
rgs.fit(X_train, y_train)
%%timeit
rgs = LinearRegression(n_jobs=None)
rgs.fit(X_train, y_train)
```
#### 1.1.2 sklearn.linear_model.SGDRegressor
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html#sklearn.linear_model.SGDRegressor
```
X_data, y_data = load_linear_data(point_count=500, max_=10, w=3.2412, b=-5.2941, random_state=10834)
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, random_state=19332)
rgs = SGDRegressor(random_state=10190)
rgs.fit(X_train, y_train)
rgs.score(X_test, y_test)
```
##### 标准化StandardScaler
https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler
z = (x - u) / s, u是均值, s是标准差
```
scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
X_train_scaler = scaler.fit_transform(X_train)
X_test_scaler = scaler.transform(X_test)
scaler.mean_, scaler.scale_
rgs = SGDRegressor(
loss='squared_loss', # ‘squared_loss’, ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’
penalty='l2', # 惩罚项(正则项)
alpha=0.0001, # 正则系数
fit_intercept=True,
max_iter=100,
tol=0.001,
shuffle=True,
verbose=0,
epsilon=0.1,
random_state=10190,
learning_rate='invscaling',
eta0=0.01,
power_t=0.25,
early_stopping=True,
validation_fraction=0.1,
n_iter_no_change=5,
warm_start=False,
average=False
)
rgs.fit(X_train_scaler, y_train)
rgs.coef_, rgs.intercept_
rgs.score(X_test_scaler, y_test)
show_regressor_linear(X_test_scaler, y_test, pred_coef=rgs.coef_, pred_intercept=rgs.intercept_)
```
### 1.2 多项式回归
```
def load_data_from_func(func=lambda X_data: 0.1383 * np.square(X_data) - 1.2193 * X_data + 2.4096,
x_min=0, x_max=10, n_samples=500, loc=0, scale=1, random_state=None):
if random_state is not None and isinstance(random_state, int):
np.random.seed(random_state)
x = np.random.uniform(x_min, x_max, n_samples)
y = func(x)
noise = np.random.normal(loc=loc, scale=scale, size=n_samples)
y += noise
return x.reshape([-1, 1]), y
X_data, y_data = load_data_from_func(n_samples=500, random_state=10392)
```
https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html#sklearn.preprocessing.PolynomialFeatures/
```
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, random_state=10319)
poly = PolynomialFeatures() # [1, a, b, a^2, ab, b^2]
X_train_poly = poly.fit_transform(X_train)
X_test_poly = poly.transform(X_test)
X_train_poly.shape
rgs = LinearRegression()
rgs.fit(X_train_poly, y_train)
rgs.score(X_test_poly, y_test)
y_pred = rgs.predict(X_test_poly)
def show_regression_line(X_data, y_data, y_pred):
plt.figure(figsize=[10, 5])
plt.xlabel("x")
plt.ylabel("y")
if X_data.ndim == 2:
X_data = X_data.reshape(-1)
plt.scatter(X_data, y_data)
idx = np.argsort(X_data)
X_data = X_data[idx]
y_pred = y_pred[idx]
plt.plot(X_data, y_pred, color="darkorange")
plt.show()
show_regression_line(X_test, y_test, y_pred)
```
## 2. 加利福尼亚房价数据集
```
df = fetch_california_housing(data_home="./data", as_frame=True)
X_data = df['data']
X_data.describe()
X_train, X_test, y_train, y_test = train_test_split(X_data, df.target, random_state=1, shuffle=True)
```
### 2.1 线性回归
```
rgs = LinearRegression()
rgs.fit(X_train, y_train)
rgs.score(X_test, y_test)
scaler = StandardScaler()
X_train_scaler = scaler.fit_transform(X_train)
X_test_scaler = scaler.transform(X_test)
rgs = LinearRegression()
rgs.fit(X_train_scaler, y_train)
rgs.score(X_test_scaler, y_test)
```
### 2.2 岭回归
https://scikit-learn.org/stable/modules/linear_model.html#ridge-regression-and-classification
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge
```
rgs = Ridge(alpha=1.0, solver="auto")
rgs.fit(X_train, y_train)
rgs.score(X_test, y_test)
rgs.coef_
```
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html#sklearn.linear_model.RidgeCV
#### 2.2.1 交叉验证
```
rgs = RidgeCV(
alphas=(0.001, 0.01, 0.1, 1.0, 10.0),
fit_intercept=True,
normalize= False,
scoring=None, # 如果为None,则当cv为'auto'或为None时为负均方误差,否则为r2得分。scorer(estimator, X, y)
cv=None, # int, cross-validation generator or an iterable, default=None
gcv_mode='auto', # {‘auto’, ‘svd’, eigen’}, default=’auto’
store_cv_values=None, # bool, 是否将与每个alpha对应的交叉验证值存储在cv_values_属性中, 仅cv=None有效
)
rgs.fit(X_train, y_train)
rgs.best_score_
rgs.score(X_test, y_test)
rgs = RidgeCV(
alphas=(0.001, 0.01, 0.1, 1.0, 10.0),
fit_intercept=True,
normalize= False,
scoring=None, # 如果为None,则当cv为'auto'或为None时为负均方误差,否则为r2得分。scorer(estimator, X, y)
cv=10, # int, cross-validation generator or an iterable, default=None
gcv_mode='auto', # {‘auto’, ‘svd’, eigen’}, default=’auto’
store_cv_values=None, # bool, 是否将与每个alpha对应的交叉验证值存储在cv_values_属性中, 仅cv=None有效
)
rgs.fit(X_train, y_train)
rgs.best_score_, rgs.score(X_test, y_test)
```
### 2.3 索套回归
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html#sklearn.linear_model.Lasso
https://scikit-learn.org/stable/modules/linear_model.html#lasso
```
rgs = Lasso()
rgs.fit(X_train, y_train)
rgs.score(X_test, y_test)
rgs.coef_
```
### 2.4 多项式回归
https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html?highlight=polynomialfeatures#sklearn.preprocessing.PolynomialFeatures
```
poly = PolynomialFeatures(degree=2, interaction_only=False, include_bias=True)
X_train_poly = poly.fit_transform(X_train) # [1, a, b, a^2, ab, b^2]
X_train_poly.shape
poly.get_feature_names()
X_test_poly = poly.transform(X_test)
rgs = LinearRegression()
rgs.fit(X_train_poly, y_train)
rgs.score(X_test_poly, y_test)
poly = PolynomialFeatures(degree=2,
interaction_only=True, # 是否只保留插乘特征,除去指数项
include_bias=True,
order="C") # Order of output array in the dense case. ‘F’ order is faster to compute, but may slow down subsequent estimators.
X_train_poly = poly.fit_transform(X_train)
X_test_poly = poly.transform(X_test)
X_train_poly.shape
poly.get_feature_names()
rgs = LinearRegression()
rgs.fit(X_train_poly, y_train)
rgs.score(X_test_poly, y_test)
```
## 总结
1. sklearn的线性回归相关的模型放在sklearn.linear_model下
> from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, Lasso, SGDRegressor
2. 调参数
> LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None)
>
> SGDRegressor(loss='squared_loss',
penalty='l2',
alpha=0.0001,
fit_intercept=True,
max_iter=1000,
tol=0.001,
shuffle=True,
epsilon=0.1,
random_state=None,
learning_rate='invscaling',
eta0=0.01,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5)
>
> Ridge(alpha=1.0, fit_intercept=True,
normalize=False,
copy_X=True,
max_iter=None,
tol=0.001,
solver='auto',
random_state=None)
>
> Lasso(alpha=1.0,
fit_intercept=True,
normalize=False,
precompute=False,
copy_X=True,
max_iter=1000,
tol=0.0001,
random_state=None)
>
> RidgeCV(alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize=False,
scoring=None,
cv=None,
gcv_mode=None,
store_cv_values=False,)
3. 多项式回归使用PolynomialFeatures做特征工程实现
> from sklearn.preprocessing import PolynomialFeatures
> poly = PolynomialFeatures(degree=2, interaction_only=False, include_bias=True, order='C')
> X_train_poly = poly.fit_transform(X_train)
> X_test_poly = poly.transform(X_test)
4. 正规化Normalizer和标准化StandardScaler
> from sklearn.preprocessing import StandardScaler, Normalizer
>
> scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
> X_train_scaler = scaler.fit_transform(X_train)
> X_test_scaler = scaler.transform(X_test)
>
> norm = Normalizer(norm="l2", copy=True)
> X_train_norm = norm.fit_transform(X_train)
> X_test_norm = norm.transform(X_test)
>
## 作业
1. 熟悉每个模型的各个参数
2. 三种归一化有什么区别?什么时候用Normalizer,什么时候用StandardScaler,什么时候用MinMaxScaler?
3. 试着用numpy实现PolynomialFeatures
## 相关链接
<a href="./02.1.LinearRegression.ipynb" style=""> 2.1 线性回归、岭回归、Lasso、SGD、局部加权线性回归原理</a>
<a href="./02.3.LinearRegression-numpy.ipynb" style=""> 2.3 numpy实现线性回归、岭回归、SGD回归</a>
<a href="./02.4.LinearRegression-tf2.ipynb"> 2.4 TensorFlow2实现线性回归、岭回归、SGD回归 </a>
<a href="./02.5.LinearRegression-torch1.ipynb"> 2.5 PyTorch1实现线性回归、岭回归、SGD回归 </a>
## 项目源码
https://github.com/LossJ
进入后点击Statistic-Machine-Learning
| github_jupyter |
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from ipyexperiments import *
from lib.fastai.imports import *
from lib.fastai.structured import *
import pandas as pd
import numpy as np
import lightgbm as lgb
from scipy.sparse import vstack, csr_matrix, save_npz, load_npz
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold
from datetime import datetime
from path import Path
import re2 as re
import joblib
## Dainis's work
def display_n(df, n=250):
with pd.option_context("display.max_rows", n):
with pd.option_context("display.max_columns", n):
display(df)
def add_datepart(df, fldname, drop=False, time=False):
"Helper function that adds columns relevant to a date."
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
## Pietro and Wojtek work
def add_timestamps(df):
"Funection that loads time values from numpy files"
datedictAS = np.load('dates/AvSigVersionTimestamps.npy')[()]
df['DateAS'] = df['AvSigVersion'].map(datedictAS)
datedictOS = np.load('dates/OSVersionTimestamps.npy')[()]
df['DateOS'] = df['Census_OSVersion'].map(datedictOS)
# BL timestamp
def convert(x):
try:
d = datetime.strptime(x.split('.')[4],'%y%m%d-%H%M')
except:
d = np.nan
return d
df['DateBL'] = df['OsBuildLab'].map(convert)
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
# Uncomment the followng block on the first run
'''
with IPyExperimentsCPU():
print('Download Train and Test Data.\n')
# Pietro, uncomment the following line and comment out the next one
# INPUT_DIR = Path('E:/malware_microsoft' )
INPUT_DIR = Path('./input' )
train = pd.read_csv(Path(INPUT_DIR / 'train.csv'), dtype=dtypes, low_memory=True)
train['MachineIdentifier'] = train.index.astype('uint32')
test = pd.read_csv(Path(INPUT_DIR /'test.csv'), dtype=dtypes, low_memory=True)
test['MachineIdentifier'] = test.index.astype('uint32')
add_timestamps(train)
add_timestamps(test)
joblib.dump(train, 'data/train_w_time_origin.pkl')
joblib.dump(test, 'data/test_w_time_origin.pkl')
'''
def versioning(df, fldname, drop=False):
"Helper function that adds columns relevant to a date."
versions = df[fldname].str.split('.', expand=True)
for i, v in enumerate(versions):
df[fldname+'V'+str(i)] = versions[v]
if drop: df.drop(fldname, axis=1, inplace=True)
def versioning(df, fldname, categorical_vars, drop=False):
"Helper function that adds columns relevant to a date."
versions = df[fldname].str.split(',', expand=True)
for i, v in enumerate(versions):
newfld = fldname+'V'+i
df[newfld] = versions[v]
categorical_vars.append(newfld)
if drop: df.drop(fldname, axis=1, inplace=True)
with IPyExperimentsCPU() as preprocess:
categorical_vars = [
'MachineIdentifier',
'ProductName',
'EngineVersion',
'AppVersion',
'AvSigVersion',
'Platform',
'Processor',
'OsVer',
'OsPlatformSubRelease',
'OsBuildLab',
'SkuEdition',
'PuaMode',
'SmartScreen',
'Census_MDC2FormFactor',
'Census_DeviceFamily',
'Census_ProcessorClass',
'Census_PrimaryDiskTypeName',
'Census_ChassisTypeName',
'Census_PowerPlatformRoleName',
'Census_InternalBatteryType',
'Census_OSVersion',
'Census_OSArchitecture',
'Census_OSBranch',
'Census_OSEdition',
'Census_OSSkuName',
'Census_OSInstallTypeName',
'Census_OSWUAutoUpdateOptionsName',
'Census_GenuineStateName',
'Census_ActivationChannel',
'Census_FlightRing',
]
train=joblib.load('data/train_w_time_origin.pkl')
test=joblib.load('data/test_w_time_origin.pkl')
test['HasDetections'] = -1
add_datepart(train, 'DateAS', drop=False, time=True)
add_datepart(train, 'DateOS', drop=False, time=True)
add_datepart(train, 'DateBL', drop=False, time=True)
add_datepart(test, 'DateAS', drop=False, time=True)
add_datepart(test, 'DateOS', drop=False, time=True)
add_datepart(test, 'DateBL', drop=False, time=True)
preprocess.keep_var_names('train', 'test', 'categorical_vars')
joblib.dump(categorical_vars, 'val/categorical.pkl')
with pd.option_context("display.max_rows", 100):
with pd.option_context("display.max_columns", 100):
display(train[categorical_vars].head())
versioned = ['EngineVersion','AppVersion','AvSigVersion','OsVer','Census_OSVersion','OsBuildLab']
with IPyExperimentsCPU() as vsplits:
for ver in versioned:
versioning(train, ver)
versioning(test, ver)
df_raw = pd.concat([train, test], sort=False)
train_cats(df_raw)
df, y, nas = proc_df(df_raw)
train = df.head(len(train)).reset_index(drop=True)
test = df.tail(len(test)).reset_index(drop=True)
joblib.dump(train,'data/train_dainis.pkl')
joblib.dump(test,'data/test_dainis.pkl')
with IPyExperimentsCPU() as transform:
'''
print('Transform all features to category.\n')
for i, usecol in enumerate(categorical_vars):
print(str(i) + " / " + str(len(categorical_vars)))
train[usecol] = train[usecol].astype('str')
test[usecol] = test[usecol].astype('str')
train[usecol] = train[usecol].astype('str')
test[usecol] = test[usecol].astype('str')
#Fit LabelEncoder
le = LabelEncoder().fit(
np.unique(train[usecol].unique().tolist()+
test[usecol].unique().tolist()))
#At the end 0 will be used for dropped values
train[usecol] = le.transform(train[usecol])+1
test[usecol] = le.transform(test[usecol])+1
agg_tr = (train
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Train'}, axis=1))
agg_te = (test
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Test'}, axis=1))
agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0)
#Select values with more than 1000 observations
agg = agg[(agg['Train'] > 1000)].reset_index(drop=True)
agg['Total'] = agg['Train'] + agg['Test']
#Drop unbalanced values
agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)]
agg[usecol+'Copy'] = agg[usecol]
train[usecol+'bis'] = (pd.merge(train[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category'))
test[usecol+'bis'] = (pd.merge(test[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category'))
del le, agg_tr, agg_te, agg, usecol
'''
EXP_TAG=Path('dainis0')
train_ids = train.index
test_ids = test.index
y_train = np.array(train['HasDetections'])
# Fulfill contract with evaluator notebook
joblib.dump(categorical_vars, Path('val' / EXP_TAG / 'categorical.pkl'))
joblib.dump(train, Path('val' / EXP_TAG / 'train-original.pkl'))
joblib.dump(test,Path( 'val' / EXP_TAG / ' test-original.pkl'))
joblib.dump(y_train, Path('val' / EXP_TAG / 'y_train-original.pkl'))
joblib.dump(train_ids,Path( 'val' / EXP_TAG / 'train_ids-original.pkl'))
joblib.dump(test_ids, Path('val' / EXP_TAG / 'test_ids-original.pkl'))
```
| github_jupyter |
```
#Goal: obtain a universal time, in Julian Date from a local time in the header of the fits images
from astropy.io import fits #work with fits images
from astropy.time import Time #work with time in header
import glob #work with files in the directory
import yaml #work with yaml files
import numpy as np
import sys
import os
%matplotlib inline
import matplotlib.pyplot as plt #plot library
def init_plotting():
plt.rcParams['figure.figsize'] = (14.0,8.0)
plt.rcParams['font.size'] = 10
#plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['axes.labelsize'] = plt.rcParams['font.size']
plt.rcParams['axes.titlesize'] = 2*plt.rcParams['font.size']
plt.rcParams['legend.fontsize'] = 0.65*plt.rcParams['font.size']
plt.rcParams['xtick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['ytick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['xtick.major.size'] = 3
plt.rcParams['xtick.minor.size'] = 3
plt.rcParams['xtick.major.width'] = 1
plt.rcParams['xtick.minor.width'] = 1
plt.rcParams['ytick.major.size'] = 3
plt.rcParams['ytick.minor.size'] = 3
plt.rcParams['ytick.major.width'] = 1
plt.rcParams['ytick.minor.width'] = 1
plt.rcParams['legend.frameon'] = True
plt.rcParams['legend.loc'] = 'best'
plt.rcParams['axes.linewidth'] = 1
init_plotting()
#BAR Progress function to visualize the progress status:
def update_progress(progress):
"""
Progress Bar to visualize the status of a procedure
___
INPUT:
progress: percent of the data
___
Example:
print ""
print "progress : 0->1"
for i in range(100):
time.sleep(0.1)
update_progress(i/100.0)
"""
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
save_path = u'C:\\Users\\walte\\Desktop\\exoplanet\\data\\xo2b\\xo2b.b\\teste_pyraf'
data_path = u'C:\\Users\\walte\\Desktop\\exoplanet\\data\\xo2b\\xo2b.b'
pwd
cd C:/Users/walte/Desktop/exoplanet/data/xo2b/xo2b.b/teste_pyraf/
images = glob.glob('ABxo2b*.fits')
print images
print len(images)
im,hdr = fits.getdata(images[0],header=True) #reading the fits image (data + header)
hdr
```
# Local Time
```
hdr['LOCTIME'] #local time at start of exposure in header
images_time = []
for i in range(len(images)):
im,hdr = fits.getdata(images[i],header=True) #reading the fits image (data + header)
images_time.append(hdr['LOCTIME'])
update_progress((i+1.)/len(images))
print images_time #our local time series
```
# FITS Time
```
fits_time = []
for i in range(len(images)):
im,hdr = fits.getdata(images[i],header=True) #reading the fits image (data + header)
fits_time.append(hdr['DATE'])
update_progress((i+1.)/len(images))
print fits_time
```
# Observatory (location)
```
#geting the observatory
im,hdr = fits.getdata(images[0],header=True) #reading the fits image (data + header)
observatory_loc = hdr['OBSERVAT']
print observatory_loc
```
# Obtain UT using local time and observatory
```
#time formats
print list(Time.FORMATS)
#Let's using fits time
teste = Time(fits_time[0],format=u'fits')
teste
teste.jd #convert my object test in fits date to julian date
#Let's make to all time series
serie = np.zeros(len(fits_time))
for i in range(len(fits_time)):
serie[i] = Time(fits_time[i],format=u'fits').jd
serie
#Let's confirm our serie
hjd = np.loadtxt('../Results/hjd') #original data
hjd
```
# Error 404: Date don't found!
Yes, and I know why! THe date in abxo2b*.fits images are the date from when it were created. Because of that, we need to extract the date from original images!
```
os.chdir('../')
images = glob.glob('xo2b*.fits')
os.chdir(save_path)
print images
fits_time = []
os.chdir(data_path)
for i in range(len(images)):
im,hdr = fits.getdata(images[i],header=True) #reading the fits image (data + header)
fits_time.append(hdr['DATE'])
update_progress((i+1.)/len(images))
os.chdir(save_path)
print fits_time
#Let's make to all time series
serie = np.zeros(len(fits_time))
for i in range(len(fits_time)):
serie[i] = Time(fits_time[i],format=u'fits').jd
serie
hjd
diff = serie-hjd
plt.figure()
plt.grid()
plt.scatter(hjd,diff)
plt.ylim(min(diff),max(diff))
im,hdr = fits.getdata('../'+images[0],header=True)
hdr
hdr['LOCTIME'],hdr['DATE-OBS']
tempo_imagem = hdr['DATE-OBS']+' '+hdr['LOCTIME']
print tempo_imagem
teste = Time(tempo_imagem,format=u'iso')
teste.jd #Nope
hjd[0]
#****** change time
hdr['UT']
location = '+32:24:59.3 110:44:04.3'
teste = Time(hdr['DATE-OBS']+'T'+hdr['UT'],format='isot',scale='utc')
teste
teste.jd
hjd[0]
hdr.['']
```
# Working with date in header following Kyle's subroutine stcoox.cl in ExoDRPL
```
import yaml
file = yaml.load(open('C:/Users/walte/MEGA/work/codes/iraf_task/input_path.yaml'))
RA,DEC, epoch = file['RA'],file['DEC'],file['epoch']
print RA,DEC,epoch
hdr['DATE-OBS'], hdr['UT']
local_time = Time(hdr['DATE-OBS']+'T'+hdr['ut'],format='isot')
print local_time.jd
teste_loc_time = Time('2012-12-09'+'T'+hdr['ut'],format='isot')
print teste_loc_time.jd
hdr['DATE']
Time(hdr['DATE'],format='fits',scale='tai')
hjd[0]
Time(hdr['DATE'],format='fits',scale='tai').jd2000
hdr
import datetime
hdr['DATE-OBS'],hdr['DATE'],hdr['LOCTIME'],hdr['TIME-OBS'],hdr['TIMESYS']
Time(hdr['DATE'],format='fits',scale='utc')
print Time(hdr['DATE'],scale='utc',format='isot').jd
print Time(hdr['DATE-OBS']+'T'+hdr['TIME-OBS'],scale='utc',format='isot').jd
hjd[0], len(hjd)
hdr['UTC-OBS']
Time(hdr['IRAF-TLM'],scale='utc',format='isot').jd
diff = (Time(hdr['IRAF-TLM'],scale='utc',format='isot').jd - Time(hdr['DATE'],scale='utc',format='isot').jd)/2
print diff
print Time(hdr['IRAF-TLM'],scale='utc',format='isot').jd - diff
```
# Local Time to sideral time
```
local_time = Time(hdr['DATE-OBS']+'T'+hdr['Time-obs'],format='isot',scale='utc')
time_sd = local_time.sidereal_time('apparent',longitude=file['lon-obs'])#with precession and nutation
print time_sd
time_sd.T.hms[0],time_sd.T.hms[1],time_sd.T.hms[2]
local_time.sidereal_time('mean',longitude=file['lon-obs']) #with precession
file['observatory'],file['lon-obs']
time_sd.deg, time_sd.hour
```
# Change degrees to hours...
```
from astropy.coordinates import SkyCoord
from astropy import units as unit
from astropy.coordinates import Angle
RA = Angle(file['RA']+file['u.RA'])
DEC = Angle(file['DEC']+file['u.DEC'])
coordenadas = SkyCoord(RA,DEC,frame='fk5')
coordenadas
coordenadas.ra.hour, coordenadas.dec.deg,coordenadas.equinox,coordenadas.equinox.value
local_time
local_time.hjd
#airmass
airmass = np.loadtxt('../Results/XYpos+Airmass.txt',unpack=True)
airmass[2]
hdr['DATE-OBS'],hdr['UTC-OBS']
file['time-zone'] = 7
file['time-zone']
local_time
import string
hdr['DATE-OBS'].split('-')
float(hdr['DATE-OBS'].split('-')[2])
hdr['UTC-OBS'].split(':'),hdr['UTC-OBS'].split(':')[0]
if float(hdr['UTC-OBS'].split(':')[0]) < file['time-zone']:
new_date = float(hdr['DATE-OBS'].split('-')[2]) - 1
hdr['DATE-OBS'] = hdr['DATE-OBS'].split('-')[0]+'-'+hdr['DATE-OBS'].split('-')[1]+'-'+str(int(new_date))
new_date
hdr['DATE-OBS']
```
| github_jupyter |
**This notebook is an exercise in the [Geospatial Analysis](https://www.kaggle.com/learn/geospatial-analysis) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/interactive-maps).**
---
# Introduction
You are an urban safety planner in Japan, and you are analyzing which areas of Japan need extra earthquake reinforcement. Which areas are both high in population density and prone to earthquakes?
<center>
<img src="https://i.imgur.com/Kuh9gPj.png" width="450"><br/>
</center>
Before you get started, run the code cell below to set everything up.
```
import pandas as pd
import geopandas as gpd
import folium
from folium import Choropleth
from folium.plugins import HeatMap
from learntools.core import binder
binder.bind(globals())
from learntools.geospatial.ex3 import *
```
We define a function `embed_map()` for displaying interactive maps. It accepts two arguments: the variable containing the map, and the name of the HTML file where the map will be saved.
This function ensures that the maps are visible [in all web browsers](https://github.com/python-visualization/folium/issues/812).
```
def embed_map(m, file_name):
from IPython.display import IFrame
m.save(file_name)
return IFrame(file_name, width='100%', height='500px')
```
# Exercises
### 1) Do earthquakes coincide with plate boundaries?
Run the code cell below to create a DataFrame `plate_boundaries` that shows global plate boundaries. The "coordinates" column is a list of (latitude, longitude) locations along the boundaries.
```
plate_boundaries = gpd.read_file("../input/geospatial-learn-course-data/Plate_Boundaries/Plate_Boundaries/Plate_Boundaries.shp")
plate_boundaries['coordinates'] = plate_boundaries.apply(lambda x: [(b,a) for (a,b) in list(x.geometry.coords)], axis='columns')
plate_boundaries.drop('geometry', axis=1, inplace=True)
plate_boundaries.head()
```
Next, run the code cell below without changes to load the historical earthquake data into a DataFrame `earthquakes`.
```
# Load the data and print the first 5 rows
earthquakes = pd.read_csv("../input/geospatial-learn-course-data/earthquakes1970-2014.csv", parse_dates=["DateTime"])
earthquakes.head()
```
The code cell below visualizes the plate boundaries on a map. Use all of the earthquake data to add a heatmap to the same map, to determine whether earthquakes coincide with plate boundaries.
```
# Create a base map with plate boundaries
m_1 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
for i in range(len(plate_boundaries)):
folium.PolyLine(locations=plate_boundaries.coordinates.iloc[i], weight=2, color='black').add_to(m_1)
# Your code here: Add a heatmap to the map
HeatMap(data=earthquakes[['Latitude', 'Longitude']], radius=10).add_to(m_1)
# Uncomment to see a hint
#q_1.a.hint()
# Show the map
embed_map(m_1, 'q_1.html')
# Get credit for your work after you have created a map
q_1.a.check()
# Uncomment to see our solution (your code may look different!)
q_1.a.solution()
```
So, given the map above, do earthquakes coincide with plate boundaries?
```
# View the solution (Run this code cell to receive credit!)
q_1.b.solution()
```
### 2) Is there a relationship between earthquake depth and proximity to a plate boundary in Japan?
You recently read that the depth of earthquakes tells us [important information](https://www.usgs.gov/faqs/what-depth-do-earthquakes-occur-what-significance-depth?qt-news_science_products=0#qt-news_science_products) about the structure of the earth. You're interested to see if there are any intereresting global patterns, and you'd also like to understand how depth varies in Japan.
```
# Create a base map with plate boundaries
m_2 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
for i in range(len(plate_boundaries)):
folium.PolyLine(locations=plate_boundaries.coordinates.iloc[i], weight=2, color='black').add_to(m_2)
# Your code here: Add a map to visualize earthquake depth
# Custom function to assign a color to each circle
def color_producer(val):
if val < 50:
return 'forestgreen'
elif val < 100:
return 'darkorange'
else:
return 'darkred'
# Add a map to visualize earthquake depth
for i in range(0,len(earthquakes)):
folium.Circle(
location=[earthquakes.iloc[i]['Latitude'], earthquakes.iloc[i]['Longitude']],
radius=2000,
color=color_producer(earthquakes.iloc[i]['Depth'])).add_to(m_2)
# Uncomment to see a hint
#q_2.a.hint()
# View the map
embed_map(m_2, 'q_2.html')
# Get credit for your work after you have created a map
q_2.a.check()
# Uncomment to see our solution (your code may look different!)
q_2.a.solution()
```
Can you detect a relationship between proximity to a plate boundary and earthquake depth? Does this pattern hold globally? In Japan?
```
# View the solution (Run this code cell to receive credit!)
q_2.b.solution()
```
### 3) Which prefectures have high population density?
Run the next code cell (without changes) to create a GeoDataFrame `prefectures` that contains the geographical boundaries of Japanese prefectures.
```
# GeoDataFrame with prefecture boundaries
prefectures = gpd.read_file("../input/geospatial-learn-course-data/japan-prefecture-boundaries/japan-prefecture-boundaries/japan-prefecture-boundaries.shp")
prefectures.set_index('prefecture', inplace=True)
prefectures.head()
```
The next code cell creates a DataFrame `stats` containing the population, area (in square kilometers), and population density (per square kilometer) for each Japanese prefecture. Run the code cell without changes.
```
# DataFrame containing population of each prefecture
population = pd.read_csv("../input/geospatial-learn-course-data/japan-prefecture-population.csv")
population.set_index('prefecture', inplace=True)
# Calculate area (in square kilometers) of each prefecture
area_sqkm = pd.Series(prefectures.geometry.to_crs(epsg=32654).area / 10**6, name='area_sqkm')
stats = population.join(area_sqkm)
# Add density (per square kilometer) of each prefecture
stats['density'] = stats["population"] / stats["area_sqkm"]
stats.head()
```
Use the next code cell to create a choropleth map to visualize population density.
```
# Create a base map
m_3 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
# Your code here: create a choropleth map to visualize population density
Choropleth(geo_data=prefectures['geometry'].__geo_interface__,
data=stats['density'],
key_on="feature.id",
fill_color='YlGnBu',
legend_name='Population density (per square kilometer)'
).add_to(m_3)
# Uncomment to see a hint
# q_3.a.hint()
# View the map
embed_map(m_3, 'q_3.html')
# Get credit for your work after you have created a map
q_3.a.check()
# Uncomment to see our solution (your code may look different!)
q_3.a.solution()
```
Which three prefectures have relatively higher density than the others? Are they spread throughout the country, or all located in roughly the same geographical region? (*If you're unfamiliar with Japanese geography, you might find [this map](https://en.wikipedia.org/wiki/Prefectures_of_Japan) useful to answer the questions.)*
```
# View the solution (Run this code cell to receive credit!)
q_3.b.solution()
```
### 4) Which high-density prefecture is prone to high-magnitude earthquakes?
Create a map to suggest one prefecture that might benefit from earthquake reinforcement. Your map should visualize both density and earthquake magnitude.
```
# Create a base map
m_4 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
# Your code here: create a map
def color_producer(magnitude):
if magnitude > 6.5:
return 'red'
else:
return 'green'
Choropleth(
geo_data=prefectures['geometry'].__geo_interface__,
data=stats['density'],
key_on="feature.id",
fill_color='BuPu',
legend_name='Population density (per square kilometer)').add_to(m_4)
for i in range(0,len(earthquakes)):
folium.Circle(
location=[earthquakes.iloc[i]['Latitude'], earthquakes.iloc[i]['Longitude']],
popup=("{} ({})").format(
earthquakes.iloc[i]['Magnitude'],
earthquakes.iloc[i]['DateTime'].year),
radius=earthquakes.iloc[i]['Magnitude']**5.5,
color=color_producer(earthquakes.iloc[i]['Magnitude'])).add_to(m_4)
# Uncomment to see a hint
q_4.a.hint()
# View the map
embed_map(m_4, 'q_4.html')
# Get credit for your work after you have created a map
q_4.a.check()
# Uncomment to see our solution (your code may look different!)
q_4.a.solution()
```
Which prefecture do you recommend for extra earthquake reinforcement?
```
# View the solution (Run this code cell to receive credit!)
q_4.b.solution()
```
# Keep going
Learn how to convert names of places to geographic coordinates with **[geocoding](https://www.kaggle.com/alexisbcook/manipulating-geospatial-data)**. You'll also explore special ways to join information from multiple GeoDataFrames.
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/geospatial-analysis/discussion) to chat with other learners.*
| github_jupyter |
# Classification with Neural Network for Yoga poses detection
## Import Dependencies
```
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, log_loss, accuracy_score
from sklearn.model_selection import train_test_split
```
## Getting the data (images) and labels
```
# Data path
train_dir = 'pose_recognition_data/dataset'
# Getting the folders name to be able to labelize the data
Name=[]
for file in os.listdir(train_dir):
Name+=[file]
print(Name)
print(len(Name))
N=[]
for i in range(len(Name)):
N+=[i]
normal_mapping=dict(zip(Name,N))
reverse_mapping=dict(zip(N,Name))
def mapper(value):
return reverse_mapping[value]
dataset=[]
testset=[]
count=0
for file in os.listdir(train_dir):
t=0
path=os.path.join(train_dir,file)
for im in os.listdir(path):
image=load_img(os.path.join(path,im), grayscale=False, color_mode='rgb', target_size=(40,40))
image=img_to_array(image)
image=image/255.0
if t<60:
dataset+=[[image,count]]
else:
testset+=[[image,count]]
t+=1
count=count+1
data,labels0=zip(*dataset)
test,testlabels0=zip(*testset)
labels1=to_categorical(labels0)
labels=np.array(labels1)
# Transforming the into Numerical Data
data=np.array(data)
test=np.array(test)
trainx,testx,trainy,testy=train_test_split(data,labels,test_size=0.2,random_state=44)
print(trainx.shape)
print(testx.shape)
print(trainy.shape)
print(testy.shape)
# Data augmentation
datagen = ImageDataGenerator(horizontal_flip=True,vertical_flip=True,rotation_range=20,zoom_range=0.2,
width_shift_range=0.2,height_shift_range=0.2,shear_range=0.1,fill_mode="nearest")
# Loading the pretrained model , here DenseNet201
pretrained_model3 = tf.keras.applications.DenseNet201(input_shape=(40,40,3),include_top=False,weights='imagenet',pooling='avg')
pretrained_model3.trainable = False
inputs3 = pretrained_model3.input
x3 = tf.keras.layers.Dense(128, activation='relu')(pretrained_model3.output)
outputs3 = tf.keras.layers.Dense(107, activation='softmax')(x3)
model = tf.keras.Model(inputs=inputs3, outputs=outputs3)
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
his=model.fit(datagen.flow(trainx,trainy,batch_size=32),validation_data=(testx,testy),epochs=50)
y_pred=model.predict(testx)
pred=np.argmax(y_pred,axis=1)
ground = np.argmax(testy,axis=1)
print(classification_report(ground,pred))
#Checking accuracy of our model
get_acc = his.history['accuracy']
value_acc = his.history['val_accuracy']
get_loss = his.history['loss']
validation_loss = his.history['val_loss']
epochs = range(len(get_acc))
plt.plot(epochs, get_acc, 'r', label='Accuracy of Training data')
plt.plot(epochs, value_acc, 'b', label='Accuracy of Validation data')
plt.title('Training vs validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
# Checking the loss of data
epochs = range(len(get_loss))
plt.plot(epochs, get_loss, 'r', label='Loss of Training data')
plt.plot(epochs, validation_loss, 'b', label='Loss of Validation data')
plt.title('Training vs validation loss')
plt.legend(loc=0)
plt.figure()
plt.show()
load_img("pose_recognition_data/dataset/adho mukha svanasana/95. downward-facing-dog-pose.png",target_size=(40,40))
image = load_img("pose_recognition_data/dataset/adho mukha svanasana/95. downward-facing-dog-pose.png",target_size=(40,40))
image=img_to_array(image)
image=image/255.0
prediction_image=np.array(image)
prediction_image= np.expand_dims(image, axis=0)
prediction=model.predict(prediction_image)
value=np.argmax(prediction)
move_name=mapper(value)
print("Prediction is {}.".format(move_name))
print(test.shape)
pred2=model.predict(test)
print(pred2.shape)
PRED=[]
for item in pred2:
value2=np.argmax(item)
PRED+=[value2]
ANS=testlabels0
accuracy=accuracy_score(ANS,PRED)
print(accuracy)
```
| github_jupyter |
TVAE Model
===========
In this guide we will go through a series of steps that will let you
discover functionalities of the `TVAE` model, including how to:
- Create an instance of `TVAE`.
- Fit the instance to your data.
- Generate synthetic versions of your data.
- Use `TVAE` to anonymize PII information.
- Specify hyperparameters to improve the output quality.
What is TVAE?
--------------
The `sdv.tabular.TVAE` model is based on the VAE-based Deep Learning
data synthesizer which was presented at the NeurIPS 2020 conference by
the paper titled [Modeling Tabular data using Conditional
GAN](https://arxiv.org/abs/1907.00503).
Let\'s now discover how to learn a dataset and later on generate
synthetic data with the same format and statistical properties by using
the `TVAE` class from SDV.
Quick Usage
-----------
We will start by loading one of our demo datasets, the
`student_placements`, which contains information about MBA students that
applied for placements during the year 2020.
<div class="alert alert-warning">
**Warning**
In order to follow this guide you need to have `tvae` installed on your
system. If you have not done it yet, please install `tvae` now by
executing the command `pip install sdv` in a terminal.
</div>
```
from sdv.demo import load_tabular_demo
data = load_tabular_demo('student_placements')
data.head()
```
As you can see, this table contains information about students which
includes, among other things:
- Their id and gender
- Their grades and specializations
- Their work experience
- The salary that they were offered
- The duration and dates of their placement
You will notice that there is data with the following characteristics:
- There are float, integer, boolean, categorical and datetime values.
- There are some variables that have missing data. In particular, all
the data related to the placement details is missing in the rows
where the student was not placed.
T There are float, integer, boolean, categorical and datetime values.
- There are some variables that have missing data. In particular, all
the data related to the placement details is missing in the rows
where the student was not placed.
Let us use `TVAE` to learn this data and then sample synthetic data
about new students to see how well the model captures the characteristics
indicated above. In order to do this you will need to:
- Import the `sdv.tabular.TVAE` class and create an instance of it.
- Call its `fit` method passing our table.
- Call its `sample` method indicating the number of synthetic rows
that you want to generate.
```
from sdv.tabular import TVAE
model = TVAE()
model.fit(data)
```
<div class="alert alert-info">
**Note**
Notice that the model `fitting` process took care of transforming the
different fields using the appropriate [Reversible Data
Transforms](http://github.com/sdv-dev/RDT) to ensure that the data has a
format that the underlying TVAESynthesizer class can handle.
</div>
### Generate synthetic data from the model
Once the modeling has finished you are ready to generate new synthetic
data by calling the `sample` method from your model passing the number
of rows that we want to generate.
```
new_data = model.sample(num_rows=200)
```
This will return a table identical to the one which the model was fitted
on, but filled with new data which resembles the original one.
```
new_data.head()
```
<div class="alert alert-info">
**Note**
There are a number of other parameters in this method that you can use to
optimize the process of generating synthetic data. Use ``output_file_path``
to directly write results to a CSV file, ``batch_size`` to break up sampling
into smaller pieces & track their progress and ``randomize_samples`` to
determine whether to generate the same synthetic data every time.
See the <a href=https://sdv.dev/SDV/api_reference/tabular/api/sdv.tabular.ctgan.TVAE.sample>API Section</a>
for more details.
</div>
### Save and Load the model
In many scenarios it will be convenient to generate synthetic versions
of your data directly in systems that do not have access to the original
data source. For example, if you may want to generate testing data on
the fly inside a testing environment that does not have access to your
production database. In these scenarios, fitting the model with real
data every time that you need to generate new data is feasible, so you
will need to fit a model in your production environment, save the fitted
model into a file, send this file to the testing environment and then
load it there to be able to `sample` from it.
Let\'s see how this process works.
#### Save and share the model
Once you have fitted the model, all you need to do is call its `save`
method passing the name of the file in which you want to save the model.
Note that the extension of the filename is not relevant, but we will be
using the `.pkl` extension to highlight that the serialization protocol
used is [pickle](https://docs.python.org/3/library/pickle.html).
```
model.save('my_model.pkl')
```
This will have created a file called `my_model.pkl` in the same
directory in which you are running SDV.
<div class="alert alert-info">
**Important**
If you inspect the generated file you will notice that its size is much
smaller than the size of the data that you used to generate it. This is
because the serialized model contains **no information about the
original data**, other than the parameters it needs to generate
synthetic versions of it. This means that you can safely share this
`my_model.pkl` file without the risc of disclosing any of your real
data!
</div>
#### Load the model and generate new data
The file you just generated can be sent over to the system where the
synthetic data will be generated. Once it is there, you can load it
using the `TVAE.load` method, and then you are ready to sample new data
from the loaded instance:
```
loaded = TVAE.load('my_model.pkl')
new_data = loaded.sample(num_rows=200)
```
<div class="alert alert-warning">
**Warning**
Notice that the system where the model is loaded needs to also have
`sdv` and `tvae` installed, otherwise it will not be able to load the
model and use it.
</div>
### Specifying the Primary Key of the table
One of the first things that you may have noticed when looking at the demo
data is that there is a `student_id` column which acts as the primary
key of the table, and which is supposed to have unique values. Indeed,
if we look at the number of times that each value appears, we see that
all of them appear at most once:
```
data.student_id.value_counts().max()
```
However, if we look at the synthetic data that we generated, we observe
that there are some values that appear more than once:
```
new_data[new_data.student_id == new_data.student_id.value_counts().index[0]]
```
This happens because the model was not notified at any point about the
fact that the `student_id` had to be unique, so when it generates new
data it will provoke collisions sooner or later. In order to solve this,
we can pass the argument `primary_key` to our model when we create it,
indicating the name of the column that is the index of the table.
```
model = TVAE(
primary_key='student_id'
)
model.fit(data)
new_data = model.sample(200)
new_data.head()
```
As a result, the model will learn that this column must be unique and
generate a unique sequence of values for the column:
```
new_data.student_id.value_counts().max()
```
### Anonymizing Personally Identifiable Information (PII)
There will be many cases where the data will contain Personally
Identifiable Information which we cannot disclose. In these cases, we
will want our Tabular Models to replace the information within these
fields with fake, simulated data that looks similar to the real one but
does not contain any of the original values.
Let\'s load a new dataset that contains a PII field, the
`student_placements_pii` demo, and try to generate synthetic versions of
it that do not contain any of the PII fields.
<div class="alert alert-info">
**Note**
The `student_placements_pii` dataset is a modified version of the
`student_placements` dataset with one new field, `address`, which
contains PII information about the students. Notice that this additional
`address` field has been simulated and does not correspond to data from
the real users.
</div>
```
data_pii = load_tabular_demo('student_placements_pii')
data_pii.head()
```
If we use our tabular model on this new data we will see how the
synthetic data that it generates discloses the addresses from the real
students:
```
model = TVAE(
primary_key='student_id',
)
model.fit(data_pii)
new_data_pii = model.sample(200)
new_data_pii.head()
```
More specifically, we can see how all the addresses that have been
generated actually come from the original dataset:
```
new_data_pii.address.isin(data_pii.address).sum()
```
In order to solve this, we can pass an additional argument
`anonymize_fields` to our model when we create the instance. This
`anonymize_fields` argument will need to be a dictionary that contains:
- The name of the field that we want to anonymize.
- The category of the field that we want to use when we generate fake
values for it.
The list complete list of possible categories can be seen in the [Faker
Providers](https://faker.readthedocs.io/en/master/providers.html) page,
and it contains a huge list of concepts such as:
- name
- address
- country
- city
- ssn
- credit_card_number
- credit_card_expire
- credit_card_security_code
- email
- telephone
- \...
In this case, since the field is an address, we will pass a
dictionary indicating the category `address`
```
model = TVAE(
primary_key='student_id',
anonymize_fields={
'address': 'address'
}
)
model.fit(data_pii)
```
As a result, we can see how the real `address` values have been replaced
by other fake addresses:
```
new_data_pii = model.sample(200)
new_data_pii.head()
```
Which means that none of the original addresses can be found in the
sampled data:
```
data_pii.address.isin(new_data_pii.address).sum()
```
As we can see, in this case these modifications changed the obtained
results slightly, but they did neither introduce dramatic changes in the
performance.
### Conditional Sampling
As the name implies, conditional sampling allows us to sample from a conditional distribution using the `TVAE` model, which means we can generate only values that satisfy certain conditions. These conditional values can be passed to the `sample_conditions` method as a list of `sdv.sampling.Condition` objects or to the `sample_remaining_columns` method as a dataframe.
When specifying a `sdv.sampling.Condition` object, we can pass in the desired conditions as a dictionary, as well as specify the number of desired rows for that condition.
```
from sdv.sampling import Condition
condition = Condition({
'gender': 'M'
}, num_rows=5)
model.sample_conditions(conditions=[condition])
```
It's also possible to condition on multiple columns, such as `gender = M, 'experience_years': 0`.
```
condition = Condition({
'gender': 'M',
'experience_years': 0
}, num_rows=5)
model.sample_conditions(conditions=[condition])
```
In the `sample_remaining_columns` method, `conditions` is passed as a dataframe. In that case, the model will generate one sample for each row of the dataframe, sorted in the same order. Since the model already knows how many samples to generate, passing it as a parameter is unnecessary. For example, if we want to generate three samples where `gender = M` and three samples with `gender = F`, we can do the following:
```
import pandas as pd
conditions = pd.DataFrame({
'gender': ['M', 'M', 'M', 'F', 'F', 'F'],
})
model.sample_remaining_columns(conditions)
```
`TVAE` also supports conditioning on continuous values, as long as the values are within the range of seen numbers. For example, if all the values of the dataset are within 0 and 1, `TVAE` will not be able to set this value to 1000.
```
condition = Condition({
'degree_perc': 70.0
}, num_rows=5)
model.sample_conditions(conditions=[condition])
```
<div class="alert alert-info">
**Note**
Currently, conditional sampling works through a rejection sampling process, where
rows are sampled repeatedly until one that satisfies the conditions is found.
In case you are not able to sample enough valid rows, update the related parameters:
increasing ``max_tries`` or increasing ``batch_size_per_try``.
More information about these paramters can be found in the
<a href=https://sdv.dev/SDV/api_reference/tabular/api/sdv.tabular.ctgan.TVAE.sample_conditions.html> API section</a>.
If you have many conditions that cannot easily be satisified, consider switching
to the <a href=https://sdv.dev/SDV/user_guides/single_table/gaussian_copula.html>GaussianCopula model</a>, which is able to handle conditional
sampling more efficiently.
</div>
### How do I specify constraints?
If you look closely at the data you may notice that some properties were
not completely captured by the model. For example, you may have seen
that sometimes the model produces an `experience_years` number greater
than `0` while also indicating that `work_experience` is `False`. These
types of properties are what we call `Constraints` and can also be
handled using `SDV`. For further details about them please visit the
[Handling Constraints](04_Handling_Constraints.ipynb) guide.
### Can I evaluate the Synthetic Data?
A very common question when someone starts using **SDV** to generate
synthetic data is: *\"How good is the data that I just generated?\"*
In order to answer this question, **SDV** has a collection of metrics
and tools that allow you to compare the *real* that you provided and the
*synthetic* data that you generated using **SDV** or any other tool.
You can read more about this in the [Evaluating Synthetic Data Generators](
05_Evaluating_Synthetic_Data_Generators.ipynb) guide.
| github_jupyter |
```
!pip install kornia
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from scipy import io
import torch.utils.data
import scipy
from scipy.stats import entropy
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import math
from sklearn.metrics import mean_squared_error
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
!pip install -U spectral
!pip install pytorch_ssim
from pytorch_ssim import ssim
if not (os.path.isfile('/content/Salinas_corrected.mat')):
!wget https://github.com/gokriznastic/HybridSN/raw/master/data/Salinas_corrected.mat
if not (os.path.isfile('/content/Salinas_gt.mat')):
!wget https://github.com/gokriznastic/HybridSN/raw/master/data/Salinas_gt.mat
from torch.nn import Module, Sequential, Conv2d, ReLU,AdaptiveMaxPool2d, AdaptiveAvgPool2d, \
NLLLoss, BCELoss, CrossEntropyLoss, AvgPool2d, MaxPool2d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding
from torch.nn import functional as F
import scipy.io as sio
def loadData():
data = sio.loadmat('Salinas_corrected.mat')['salinas_corrected']
labels = sio.loadmat('Salinas_gt.mat')['salinas_gt']
return data, labels
def padWithZeros(X, margin=2):
## From: https://github.com/gokriznastic/HybridSN/blob/master/Hybrid-Spectral-Net.ipynb
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
def createImageCubes(X, y, windowSize=5, removeZeroLabels = True):
## From: https://github.com/gokriznastic/HybridSN/blob/master/Hybrid-Spectral-Net.ipynb
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]), dtype=np.uint8)
patchesLabels = np.zeros((X.shape[0] * X.shape[1]), dtype=np.uint8)
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
class HyperSpectralDataset(Dataset):
"""HyperSpectral dataset."""
def __init__(self,data_url,label_url):
self.data = np.array(scipy.io.loadmat('/content/'+data_url.split('/')[-1])['salinas_corrected'])
self.targets = np.array(scipy.io.loadmat('/content/'+label_url.split('/')[-1])['salinas_gt'])
self.data, self.targets = createImageCubes(self.data,self.targets, windowSize=5)
self.data = torch.Tensor(self.data)
self.data = self.data.permute(0,3,1,2)
print(self.data.shape)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
return self.data[idx,:,:,:] , self.targets[idx]
data_train = HyperSpectralDataset('Salinas_corrected.mat','Salinas_gt.mat')
train_loader = DataLoader(data_train, batch_size=16, shuffle=True)
print(data_train.__getitem__(0)[0].shape)
print(data_train.__len__())
class PAM_Module(Module):
""" Position attention module https://github.com/junfu1115/DANet/blob/master/encoding/nn/attention.py"""
#Ref from SAGAN
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
#out = F.avg_pool2d(out, out.size()[2:4])
return out
class CAM_Module(Module):
""" Channel attention module https://github.com/junfu1115/DANet/blob/master/encoding/nn/attention.py"""
def __init__(self):
super(CAM_Module, self).__init__()
#self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
#out = F.avg_pool2d(out, out.size()[2:4])
return out
class RecNet(nn.Module):
def __init__(self):
super(RecNet, self).__init__()
self.conv3d_1 = nn.Sequential(nn.Conv3d(1, 128, (1, 3, 3), 1),
nn.BatchNorm3d(128),
nn.PReLU())
self.conv3d_2 = nn.Sequential(nn.Conv3d(128, 64, (1, 3, 3), 1),
nn.BatchNorm3d(64),
nn.PReLU())
self.pool3d = nn.MaxPool3d((1, 1, 1), (1, 1, 1))
self.deconv3d_1 = nn.Sequential(nn.ConvTranspose3d(64, 128, (1, 3, 3), 1),
nn.BatchNorm3d(128),
nn.PReLU())
self.deconv3d_2 = nn.Sequential(nn.ConvTranspose3d(128, 1, (1, 3, 3), 1),
nn.BatchNorm3d(1))
def forward(self, x):
x = self.conv3d_1(x)
x = self.conv3d_2(x)
x = self.pool3d(x)
x = self.deconv3d_1(x)
x = self.deconv3d_2(x)
return x.squeeze(1)
class DANet(Module):
def __init__(self):
super(DANet,self).__init__()
self.PAM_Module = PAM_Module(204)
self.CAM_Module = CAM_Module()
self.RecNet = RecNet()
def forward(self,x):
P = self.PAM_Module(x)
C = self.CAM_Module(x)
#B,Ch,H,W = P.size()
J = P + C
J = J.unsqueeze(1)
ret = self.RecNet(J)
return ret
danet_model = DANet().to(device)
from torchsummary import summary
summary(danet_model,input_size=(204,5,5))
!nvidia-smi
#model = BSNET_Conv().to(device)
optimizer = optim.SGD(danet_model.parameters(), lr=0.005, momentum=0.9)optimizer = optim.SGD(danet_model.parameters(), lr=0.005, momentum=0.9)
top = 20
import skimage
import kornia
global bsnlist
ssim = kornia.losses.SSIM(5, reduction='none')
psnr = kornia.losses.PSNRLoss(2500)
from skimage import measure
ssim_list = []
psnr_list = []
l1_list = []
channel_weight_list = []
def train(epoch):
danet_model.train()
ENTROPY = torch.zeros(204)
for batch_idx, (data, __) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
output = danet_model(data)
loss = F.l1_loss(output,data)
loss.backward()
optimizer.step()
D = output.detach().cpu().numpy()
for i in range(0,204):
ENTROPY[i]+=skimage.measure.shannon_entropy(D[:,i,:,:])
if batch_idx % (0.5*len(train_loader)) == 0:
L1 = loss.item()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),L1))
l1_list.append(L1)
ssim_val = torch.mean(ssim(data,output))
print("SSIM: {}".format(ssim_val))
ssim_list.append(ssim_val)
psnr_val = psnr(data,output)
print("PSNR: {}".format(psnr_val))
psnr_list.append(psnr_val)
ENTROPY = np.array(ENTROPY)
bsnlist = np.asarray(ENTROPY.argsort()[-top:][::-1])
print('Top {} bands with Entropy ->'.format(top),list(bsnlist))
for epoch in range(0, 10):
train(epoch)
x,xx,xxx = psnr_list,ssim_list,l1_list
print(len(x)),print(len(xx)),print(len(xxx))
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
np.save('psnr_SV.npy',np.asarray(x))
np.save('ssim_SV.npy',np.asarray(xx))
np.save('l1_SV.npy',np.asarray(xxx))
plt.figure(figsize=(20,10))
plt.xlabel('Epoch',fontsize=50)
plt.ylabel('PSNR',fontsize=50)
plt.xticks(fontsize=40)
plt.yticks(np.arange(0,100 , 10.0),fontsize=40)
plt.ylim(10,100)
plt.plot(x,linewidth=5.0)
plt.savefig('PSNR-SV.pdf')
plt.show()
plt.figure(figsize=(20,10))
plt.xlabel('Epoch',fontsize=50)
plt.ylabel('SSIM',fontsize=50)
plt.xticks(fontsize=40)
plt.yticks(fontsize=40)
plt.ylim(0,0.6)
plt.plot(xx,linewidth=5.0)
plt.savefig('SSIM-SV.pdf')
plt.show()
plt.figure(figsize=(20,10))
plt.xlabel('Epoch',fontsize=50)
plt.ylabel('L1 Reconstruction loss',fontsize=50)
plt.xticks(fontsize=40)
plt.yticks(fontsize=40)
plt.ylim(0,160)
plt.plot(xxx,linewidth=5.0)
plt.savefig('L1-SV.pdf')
plt.show()
from google.colab import files
files.download('SSIM-SV.pdf')
files.download('PSNR-SV.pdf')
files.download('L1-SV.pdf')
!wget https://raw.githubusercontent.com/ucalyptus/Double-Branch-Dual-Attention-Mechanism-Network/master/SV.csv
dabsrecnet = [24, 42, 63, 77, 57, 49, 35, 68, 64, 69, 50, 44, 43, 15, 90, 37, 48, 72, 54, 79]
bsnetconv = [116,153,19,189,97,179,171,141,95,144,142,46,104,203,91,18,176,108,150,194]
pca = [169,67,168,63,68,78,167,166,165,69,164,163,77,162,70,62,160,161,76,158]
spabs = [0,79,166,80,203,78,77,76,55,81,97,5,23,75,2,82,56,74,143,85]
snmf = [24,1,105,196,203,0,39,116,38,60,89,104,198,147,158,3,146,4,93,88]
issc = [141,182,106,147,107,146,108,202,203,109,145,148,112,201,110,113,144,149,105,154]
def MeanSpectralDivergence(band_subset):
n_row, n_column, n_band = band_subset.shape
N = n_row * n_column
hist = []
for i in range(n_band):
hist_, _ = np.histogram(band_subset[:, :, i], 256)
hist.append(hist_ / N)
hist = np.asarray(hist)
hist[np.nonzero(hist <= 0)] = 1e-20
# entropy_lst = entropy(hist.transpose())
info_div = 0
# band_subset[np.nonzero(band_subset <= 0)] = 1e-20
for b_i in range(n_band):
for b_j in range(n_band):
band_i = hist[b_i].reshape(-1)/np.sum(hist[b_i])
band_j = hist[b_j].reshape(-1)/np.sum(hist[b_j])
entr_ij = entropy(band_i, band_j)
entr_ji = entropy(band_j, band_i)
entr_sum = entr_ij + entr_ji
info_div += entr_sum
msd = info_div * 2 / (n_band * (n_band - 1))
return msd
def MeanSpectralAngle(band_subset):
"""
Spectral Angle (SA) is defined as the angle between two bands.
We use Mean SA (MSA) to quantify the redundancy among a band set.
i-th band B_i, and j-th band B_j,
SA = arccos [B_i^T * B_j / ||B_i|| * ||B_j||]
MSA = 2/n*(n-1) * sum(SA_ij)
Ref:
[1] GONG MAOGUO, ZHANG MINGYANG, YUAN YUAN. Unsupervised Band Selection Based on Evolutionary Multiobjective
Optimization for Hyperspectral Images [J]. IEEE Transactions on Geoscience and Remote Sensing, 2016, 54(1): 544-57.
:param band_subset: with shape (n_row, n_clm, n_band)
:return:
"""
n_row, n_column, n_band = band_subset.shape
spectral_angle = 0
for i in range(n_band):
for j in range(n_band):
band_i = band_subset[i].reshape(-1)
band_j = band_subset[j].reshape(-1)
lower = np.sum(band_i ** 2) ** 0.5 * np.sum(band_j ** 2) ** 0.5
higher = np.dot(band_i, band_j)
if higher / lower > 1.:
angle_ij = np.arccos(1. - 1e-16)
# print('1-higher-lower', higher - lower)
# elif higher / lower < -1.:
# angle_ij = np.arccos(1e-8 - 1.)
# print('2-higher-lower', higher - lower)
else:
angle_ij = np.arccos(higher / lower)
spectral_angle += angle_ij
msa = spectral_angle * 2 / (n_band * (n_band - 1))
return msa
def MSA(bsnlist):
X, _ = loadData()
print('[',end=" ")
for a in range(2,len(bsnlist)):
band_subset_list = []
for i in bsnlist[:a]:
band_subset_list.append(X[:,:,i])
band_subset = np.array(band_subset_list)
band_subset = np.stack(band_subset,axis =2)
print(MeanSpectralAngle(band_subset),end=" ")
if a!= len(bsnlist)-1:
print(",",end=" ")
print(']')
MSA(dabsrecnet)
MSA(bsnetconv)
MSA(pca)
MSA(spabs)
MSA(snmf)
MSA(issc)
def MSD(bsnlist):
X, _ = loadData()
print('[',end=" ")
for a in range(2,len(bsnlist)):
band_subset_list = []
for i in bsnlist[:a]:
band_subset_list.append(X[:,:,i])
band_subset = np.array(band_subset_list)
band_subset = np.stack(band_subset,axis =2)
print(MeanSpectralDivergence(band_subset),end=" ")
if a!= len(bsnlist)-1:
print(",",end=" ")
print(']')
MSD(dabsrecnet)
MSD(bsnetconv)
MSD(pca)
MSD(spabs)
MSD(snmf)
MSD(issc)
import skimage
from skimage import measure
def sumentr(band_subset,X):
nbands = len(band_subset)
ENTROPY=np.ones(nbands)
for i in range(0,len(band_subset)):
ENTROPY[i]+=skimage.measure.shannon_entropy(X[:,:,band_subset[i]])
return np.sum(ENTROPY)
def EntropySum(bsnlist):
X, _ = loadData()
print('[',end=" ")
for a in range(2,len(bsnlist)):
band_subset_list = []
for i in bsnlist[:a]:
band_subset_list.append(X[:,:,i])
band_subset = np.array(band_subset_list)
band_subset = np.stack(band_subset,axis =2)
print(sumentr(bsnlist[:a],X),end=" ")
if a!= len(bsnlist)-1:
print(",",end=" ")
print(']')
EntropySum(dabsrecnet)
EntropySum(bsnetconv)
EntropySum(pca)
EntropySum(spabs)
EntropySum(snmf)
EntropySum(issc)
if not (os.path.isfile('/content/SV.csv')):
!wget https://raw.githubusercontent.com/ucalyptus/Double-Branch-Dual-Attention-Mechanism-Network/master/SV.csv
import pandas as pd
import re
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv("/content/SV.csv")
import matplotlib.pyplot as plt
X, _ = loadData()
n_row,n_column,n_band= X.shape
N = n_row * n_column
hist = []
Entropy = []
for i in range(n_band):
hist_, _ = np.histogram(X[:, :, i], 256)
hist.append(hist_ / N)
band_i = hist[i].reshape(-1)/np.sum(hist[i])
entr_i = entropy(band_i)
Entropy.append(entr_i)
for i in range(0,len(df['Selected Bands'])):
df['Selected Bands'][i] = re.findall('[0-9]+', df['Selected Bands'][i])
df['Selected Bands'][i] = [int(k) for k in df['Selected Bands'][i]]
meth = ["BS-Net-Conv","SpaBS","PCA","SNMF","DARecNet-BS"]
cols = ['b','y','g','r','m']
fig1,(ax1,ax2) = plt.subplots(2,sharex='col',figsize=(37,20))
ax1.grid(True)
ax1.yaxis.grid(False)
ax1.set_xticks([0,7,15,30,45,60,75,90,105,120,135,150,165,180,195,205])
ax1.yaxis.set_tick_params(labelsize=55)
plt.ylabel(meth)
scatar = []
for i in range(0,len(meth)):
ax1.hlines(y = meth[i],xmin=min(df['Selected Bands'][i]),xmax=max(df['Selected Bands'][i]),colors=cols[i],linewidth=7)
SCATTER = ax1.scatter(x=df['Selected Bands'][i],y = [i]*20,edgecolors=cols[i-1],linewidths=14)
scatar.append(SCATTER)
ax2.grid(True)
ax2.yaxis.grid(False)
ax2.set_yticks([1,2,3,4,5])
ax2.set_ylabel("Value of Entropy",fontsize=55)
ax2.set_xlabel("Spectral Band",fontsize=55)
ax2.xaxis.set_tick_params(labelsize=55)
ax2.yaxis.set_tick_params(labelsize=55)
ax2.plot(Entropy,linewidth=7)
plt.savefig('Entropy_SV.pdf')
```
| github_jupyter |
## _*H2 ground state energy computation using Iterative QPE*_
This notebook demonstrates using Qiskit Chemistry to plot graphs of the ground state energy of the Hydrogen (H2) molecule over a range of inter-atomic distances using IQPE (Iterative Quantum Phase Estimation) algorithm. It is compared to the same energies as computed by the ExactEigensolver
This notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the qiskit_chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop.
This notebook has been written to use the PYSCF chemistry driver. See the PYSCF chemistry driver readme if you need to install the external PySCF library that this driver requires.
```
import numpy as np
import pylab
from qiskit import LegacySimulators
from qiskit_chemistry import QiskitChemistry
import time
# Input dictionary to configure Qiskit Chemistry for the chemistry problem.
qiskit_chemistry_dict = {
'driver': {'name': 'PYSCF'},
'PYSCF': {'atom': '', 'basis': 'sto3g'},
'operator': {'name': 'hamiltonian', 'transformation': 'full', 'qubit_mapping': 'parity'},
'algorithm': {'name': ''},
'initial_state': {'name': 'HartreeFock'},
}
molecule = 'H .0 .0 -{0}; H .0 .0 {0}'
algorithms = [
{
'name': 'IQPE',
'num_iterations': 16,
'num_time_slices': 3000,
'expansion_mode': 'trotter',
'expansion_order': 1,
},
{
'name': 'ExactEigensolver'
}
]
backends = [
LegacySimulators.get_backend('qasm_simulator'),
None
]
start = 0.5 # Start distance
by = 0.5 # How much to increase distance by
steps = 20 # Number of steps to increase by
energies = np.empty([len(algorithms), steps+1])
hf_energies = np.empty(steps+1)
distances = np.empty(steps+1)
import concurrent.futures
import multiprocessing as mp
import copy
def subrountine(i, qiskit_chemistry_dict, d, backend, algorithm):
solver = QiskitChemistry()
qiskit_chemistry_dict['PYSCF']['atom'] = molecule.format(d/2)
qiskit_chemistry_dict['algorithm'] = algorithm
result = solver.run(qiskit_chemistry_dict, backend=backend)
return i, d, result['energy'], result['hf_energy']
start_time = time.time()
max_workers = max(4, mp.cpu_count())
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = []
for j in range(len(algorithms)):
algorithm = algorithms[j]
backend = backends[j]
for i in range(steps+1):
d = start + i*by/steps
future = executor.submit(
subrountine,
i,
copy.deepcopy(qiskit_chemistry_dict),
d,
backend,
algorithm
)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
i, d, energy, hf_energy = future.result()
energies[j][i] = energy
hf_energies[i] = hf_energy
distances[i] = d
print(' --- complete')
print('Distances: ', distances)
print('Energies:', energies)
print('Hartree-Fock energies:', hf_energies)
print("--- %s seconds ---" % (time.time() - start_time))
pylab.plot(distances, hf_energies, label='Hartree-Fock')
for j in range(len(algorithms)):
pylab.plot(distances, energies[j], label=algorithms[j]['name'])
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('H2 Ground State Energy')
pylab.legend(loc='upper right')
pylab.show()
pylab.plot(distances, np.subtract(hf_energies, energies[1]), label='Hartree-Fock')
pylab.plot(distances, np.subtract(energies[0], energies[1]), label='IQPE')
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('Energy difference from ExactEigensolver')
pylab.legend(loc='upper right')
pylab.show()
```
| github_jupyter |
# ML Pipeline Preparation
Follow the instructions below to help you create your ML pipeline.
### 1. Import libraries and load data from database.
- Import Python libraries
- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
- Define feature and target variables X and Y
```
# import necessary libraries
import pandas as pd
import numpy as np
import os
import pickle
import nltk
import re
from sqlalchemy import create_engine
import sqlite3
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,AdaBoostClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, accuracy_score, f1_score, fbeta_score, classification_report
from sklearn.metrics import precision_recall_fscore_support
from scipy.stats import hmean
from scipy.stats.mstats import gmean
from nltk.corpus import stopwords
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])
import matplotlib.pyplot as plt
%matplotlib inline
# load data from database
engine = create_engine('sqlite:///InsertDatabaseName.db')
df = pd.read_sql("SELECT * FROM InsertTableName", engine)
df.head()
# View types of unque 'genre' attribute
genre_types = df.genre.value_counts()
genre_types
# check for attributes with missing values/elements
df.isnull().mean().head()
# drops attributes with missing values
df.dropna()
df.head()
# load data from database with 'X' as attributes for message column
X = df["message"]
# load data from database with 'Y' attributes for the last 36 columns
Y = df.drop(['id', 'message', 'original', 'genre'], axis = 1)
```
### 2. Write a tokenization function to process your text data
```
# Proprocess text by removing unwanted properties
def tokenize(text):
'''
input:
text: input text data containing attributes
output:
clean_tokens: cleaned text without unwanted texts
'''
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# take out all punctuation while tokenizing
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
# lemmatize as shown in the lesson
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
```
### 3. Build a machine learning pipeline
This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
```
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier())),
])
# Visualize model parameters
pipeline.get_params()
```
### 4. Train pipeline
- Split data into train and test sets
- Train pipeline
```
# use sklearn split function to split dataset into train and 20% test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2)
# Train pipeline using RandomForest Classifier algorithm
pipeline.fit(X_train, y_train)
```
### 5. Test your model
Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's classification_report on each.
```
# Output result metrics of trained RandomForest Classifier algorithm
def evaluate_model(model, X_test, y_test):
'''
Input:
model: RandomForest Classifier trained model
X_test: Test training features
Y_test: Test training response variable
Output:
None:
Display model precision, recall, f1-score, support
'''
y_pred = model.predict(X_test)
for item, col in enumerate(y_test):
print(col)
print(classification_report(y_test[col], y_pred[:, item]))
# classification_report to display model precision, recall, f1-score, support
evaluate_model(pipeline, X_test, y_test)
```
### 6. Improve your model
Use grid search to find better parameters.
```
parameters = {'clf__estimator__max_depth': [10, 50, None],
'clf__estimator__min_samples_leaf':[2, 5, 10]}
cv = GridSearchCV(pipeline, parameters)
```
### 7. Test your model
Show the accuracy, precision, and recall of the tuned model.
Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
```
# Train pipeline using the improved model
cv.fit(X_train, y_train)
# # classification_report to display model precision, recall, f1-score, support
evaluate_model(cv, X_test, y_test)
cv.best_estimator_
```
### 8. Try improving your model further. Here are a few ideas:
* try other machine learning algorithms
* add other features besides the TF-IDF
```
# Improve model using DecisionTree Classifier
new_pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(DecisionTreeClassifier()))
])
# Train improved model
new_pipeline.fit(X_train, y_train)
# Run result metric score display function
evaluate_model(new_pipeline, X_test, y_test)
```
### 9. Export your model as a pickle file
```
# save a copy file of the the trained model to disk
trained_model_file = 'trained_model.sav'
pickle.dump(cv, open(trained_model_file, 'wb'))
```
### 10. Use this notebook to complete `train.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
| github_jupyter |
```
#using tensorflow kernel
import tensorflow as tf
print(tf.__version__)
!pip list | grep waymo
!pip list | grep torch
!nvidia-smi
import tensorflow.compat.v1 as tf
import math
import numpy as np
import itertools
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
#tf.enable_eager_execution()
import os
import argparse
from pathlib import Path
import cv2
import json
import utils
from PIL import Image
from glob import glob
import sys
import datetime
import os
WAYMO_CLASSES = ['unknown', 'vehicle', 'pedestrian', 'sign', 'cyclist']
def get_camera_labels(frame):
if frame.camera_labels:
return frame.camera_labels
return frame.projected_lidar_labels
def extract_segment_frontcamera(tfrecord_files, out_dir, step):
images = []
annotations = []
categories = [{'id': i, 'name': n} for i, n in enumerate(WAYMO_CLASSES)][1:]
image_globeid=0
for segment_path in tfrecord_files:
print(f'extracting {segment_path}')
segment_path=Path(segment_path)#convert str to Path object
segment_name = segment_path.name
print(segment_name)
segment_out_dir = out_dir # remove segment_name as one folder, duplicate with image name
# segment_out_dir = out_dir / segment_name
# print(segment_out_dir)#output path + segment_name(with tfrecord)
# segment_out_dir.mkdir(parents=True, exist_ok=True)
dataset = tf.data.TFRecordDataset(str(segment_path), compression_type='')
for i, data in enumerate(dataset):
if i % step != 0:
continue
print('.', end='', flush=True)
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
#get one frame
context_name = frame.context.name
frame_timestamp_micros = str(frame.timestamp_micros)
for index, image in enumerate(frame.images):
if image.name != 1: #Only use front camera
continue
camera_name = open_dataset.CameraName.Name.Name(image.name)
image_globeid = image_globeid + 1
#print("camera name:", camera_name)
img = tf.image.decode_jpeg(image.image).numpy()
image_name='_'.join([frame_timestamp_micros, camera_name])#image name
image_id = '/'.join([context_name, image_name]) #using "/" join, context_name is the folder
#New: do not use sub-folder
image_id = '_'.join([context_name, image_name])
#image_id = '/'.join([context_name, frame_timestamp_micros, camera_name]) #using "/" join
file_name = image_id + '.jpg'
#print(file_name)
filepath = out_dir / file_name
#filepath = segment_out_dir / file_name
#print('Image output path',filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
#images.append(dict(file_name=file_name, id=image_id, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
images.append(dict(file_name=file_name, id=image_globeid, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
print("current image id: ", image_globeid)
cv2.imwrite(str(filepath), img)
for camera_labels in get_camera_labels(frame):
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name == image.name:
# Iterate over the individual labels.
for label in camera_labels.labels:
# object bounding box.
width = int(label.box.length)
height = int(label.box.width)
x = int(label.box.center_x - 0.5 * width)
y = int(label.box.center_y - 0.5 * height)
area = width * height
annotations.append(dict(image_id=image_globeid,
bbox=[x, y, width, height], area=area, category_id=label.type,
object_id=label.id,
tracking_difficulty_level=2 if label.tracking_difficulty_level == 2 else 1,
detection_difficulty_level=2 if label.detection_difficulty_level == 2 else 1))
with (segment_out_dir / 'annotations.json').open('w') as f:
for i, anno in enumerate(annotations):
anno['id'] = i #set as image frame ID
json.dump(dict(images=images, annotations=annotations, categories=categories), f)
def extract_segment_allcamera(tfrecord_files, out_dir, step):
images = []
annotations = []
categories = [{'id': i, 'name': n} for i, n in enumerate(WAYMO_CLASSES)][1:]
image_globeid=0
for segment_path in tfrecord_files:
print(f'extracting {segment_path}')
segment_path=Path(segment_path)#convert str to Path object
segment_name = segment_path.name
print(segment_name)
segment_out_dir = out_dir # remove segment_name as one folder, duplicate with image name
# segment_out_dir = out_dir / segment_name
# print(segment_out_dir)#output path + segment_name(with tfrecord)
# segment_out_dir.mkdir(parents=True, exist_ok=True)
dataset = tf.data.TFRecordDataset(str(segment_path), compression_type='')
for i, data in enumerate(dataset):
if i % step != 0:
continue
print('.', end='', flush=True)
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
#get one frame
context_name = frame.context.name
frame_timestamp_micros = str(frame.timestamp_micros)
for index, image in enumerate(frame.images):
camera_name = open_dataset.CameraName.Name.Name(image.name)
image_globeid = image_globeid + 1
#print("camera name:", camera_name)
img = tf.image.decode_jpeg(image.image).numpy()
image_name='_'.join([frame_timestamp_micros, camera_name])#image name
image_id = '/'.join([context_name, image_name]) #using "/" join, context_name is the folder
#New: use sub-folder
#image_id = '_'.join([context_name, image_name])
image_id = '/'.join([context_name, frame_timestamp_micros, camera_name]) #using "/" join
file_name = image_id + '.jpg'
#print(file_name)
filepath = out_dir / file_name
#filepath = segment_out_dir / file_name
#print('Image output path',filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
#images.append(dict(file_name=file_name, id=image_id, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
images.append(dict(file_name=file_name, id=image_globeid, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
print("current image id: ", image_globeid)
cv2.imwrite(str(filepath), img)
for camera_labels in get_camera_labels(frame):
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name == image.name:
# Iterate over the individual labels.
for label in camera_labels.labels:
# object bounding box.
width = int(label.box.length)
height = int(label.box.width)
x = int(label.box.center_x - 0.5 * width)
y = int(label.box.center_y - 0.5 * height)
area = width * height
annotations.append(dict(image_id=image_globeid,
bbox=[x, y, width, height], area=area, category_id=label.type,
object_id=label.id,
tracking_difficulty_level=2 if label.tracking_difficulty_level == 2 else 1,
detection_difficulty_level=2 if label.detection_difficulty_level == 2 else 1))
with (segment_out_dir / 'annotations.json').open('w') as f:
for i, anno in enumerate(annotations):
anno['id'] = i #set as image frame ID
json.dump(dict(images=images, annotations=annotations, categories=categories), f)
def extract_segment_allfrontcamera(PATH,folderslist, out_dir, step):
#folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026"]
#PATH='/data/cmpe295-liu/Waymo'
images = []
annotations = []
categories = [{'id': i, 'name': n} for i, n in enumerate(WAYMO_CLASSES)][1:]
image_globeid=0
for index in range(len(folderslist)):
foldername=folderslist[index]
print("Folder name:", foldername)
tfrecord_files = glob(os.path.join(PATH, foldername, "*.tfrecord")) #[path for path in glob(os.path.join(PATH, foldername, "*.tfrecord"))]
print("Num of tfrecord file:", len(tfrecord_files))
#print(tfrecord_files)
for segment_path in tfrecord_files:
print(f'extracting {segment_path}')
segment_path=Path(segment_path)#convert str to Path object
segment_name = segment_path.name
print(segment_name)
segment_out_dir = out_dir # remove segment_name as one folder, duplicate with image name
# segment_out_dir = out_dir / segment_name
# print(segment_out_dir)#output path + segment_name(with tfrecord)
# segment_out_dir.mkdir(parents=True, exist_ok=True)
dataset = tf.data.TFRecordDataset(str(segment_path), compression_type='')
for i, data in enumerate(dataset):
if i % step != 0:
continue
print('.', end='', flush=True)
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
#get one frame
context_name = frame.context.name
frame_timestamp_micros = str(frame.timestamp_micros)
for index, image in enumerate(frame.images):
if image.name != 1: #Only use front camera
continue
camera_name = open_dataset.CameraName.Name.Name(image.name)
image_globeid = image_globeid + 1
#print("camera name:", camera_name)
img = tf.image.decode_jpeg(image.image).numpy()
image_name='_'.join([frame_timestamp_micros, camera_name])#image name
#image_id = '/'.join([context_name, image_name]) #using "/" join, context_name is the folder
#New: do not use sub-folder
image_id = '_'.join([context_name, image_name])
#image_id = '/'.join([context_name, frame_timestamp_micros, camera_name]) #using "/" join
file_name = image_id + '.jpg'
#print(file_name)
file_name = '/'.join([foldername, file_name])
filepath = out_dir / file_name
#filepath = segment_out_dir / file_name
#print('Image output path',filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
#images.append(dict(file_name=file_name, id=image_id, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
images.append(dict(file_name=file_name, id=image_globeid, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
#print("current image id: ", image_globeid)
cv2.imwrite(str(filepath), img)
for camera_labels in get_camera_labels(frame):
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name == image.name:
# Iterate over the individual labels.
for label in camera_labels.labels:
# object bounding box.
width = int(label.box.length)
height = int(label.box.width)
x = int(label.box.center_x - 0.5 * width)
y = int(label.box.center_y - 0.5 * height)
area = width * height
annotations.append(dict(image_id=image_globeid,
bbox=[x, y, width, height], area=area, category_id=label.type,
object_id=label.id,
tracking_difficulty_level=2 if label.tracking_difficulty_level == 2 else 1,
detection_difficulty_level=2 if label.detection_difficulty_level == 2 else 1))
with (segment_out_dir / 'annotations.json').open('w') as f:
for i, anno in enumerate(annotations):
anno['id'] = i #set as image frame ID
json.dump(dict(images=images, annotations=annotations, categories=categories), f)
!rm -r /data/cmpe295-liu/WaymoExport
!rm -r /data/cmpe295-liu/WaymoExportAll/
!mkdir /data/cmpe295-liu/Waymo/WaymoCOCOsmall
!rm -r /data/cmpe295-liu/Waymo/WaymoCOCOsmall/Training
folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026"]
PATH='/data/cmpe295-liu/Waymo'
for index in range(len(folderslist)):
foldername=folderslist[index]
print(foldername)
tfrecord_files = glob(os.path.join(PATH, foldername, "*.tfrecord")) #[path for path in glob(os.path.join(PATH, foldername, "*.tfrecord"))]
print(tfrecord_files)
len(folderslist)
folderslist[1]
foldername="training_0031"
tfrecord_files = glob(os.path.join(PATH, foldername, "*.tfrecord")) #[path for path in glob(os.path.join(PATH, foldername, "*.tfrecord"))]
print(tfrecord_files)
PATH='/data/cmpe295-liu/Waymo'
folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026"]
#folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026","training_0025", "training_0024", "training_0023","training_0022","training_0021","training_0020","training_0019","training_0018","training_0017","training_0016","training_0015","training_0014","training_0013","training_0012","training_0011","training_0010","training_0009","training_0008","training_0007","training_0006","training_0005","training_0004","training_0003","training_0002","training_0001","training_0000"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCOsmall/Training'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_frontcamera(tfrecord_files, out_dir, step)
PATH='/data/cmpe295-liu/Waymo'
folderslist = ["validation_0007","training_0006"]#,"training_0029","training_0028","training_0027","training_0026"]
#folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026","training_0025", "training_0024", "training_0023","training_0022","training_0021","training_0020","training_0019","training_0018","training_0017","training_0016","training_0015","training_0014","training_0013","training_0012","training_0011","training_0010","training_0009","training_0008","training_0007","training_0006","training_0005","training_0004","training_0003","training_0002","training_0001","training_0000"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCOsmall/Validation'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_frontcamera(tfrecord_files, out_dir, step)
PATH='/data/cmpe295-liu/Waymo'
#folderslist = ["training_0031","training_0030"]#,"training_0029","training_0028","training_0027","training_0026"]
folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026","training_0025", "training_0024", "training_0023","training_0022","training_0021","training_0020","training_0019","training_0018","training_0017","training_0016","training_0015","training_0014","training_0013","training_0012","training_0011","training_0010","training_0009","training_0008","training_0007","training_0006","training_0005","training_0004","training_0003","training_0002","training_0001","training_0000"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCO/Training'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_allfrontcamera(PATH,folderslist, out_dir, step)
folderslist = validation_folders = ["validation_0000","validation_0001","validation_0002","validation_0003","validation_0004","validation_0005", "validation_0006", "validation_0007"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCO/Validation'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_allfrontcamera(PATH,folderslist, out_dir, step)
#extract_segment_frontcamera(tfrecord_files, out_dir, step)
!ls /data/cmpe295-liu/Waymo/WaymoCOCOsmall/Validation
!pwd
!python /home/010796032/PytorchWork/WaymoDetectron2Train.py
FULL_LABEL_CLASSES = ['unknown', 'vehicle', 'pedestrian', 'sign', 'cyclist']
len(FULL_LABEL_CLASSES)
```
| github_jupyter |
```
### from datetime import datetime
from os import environ
from os.path import join
import json
# YES/NO data dictionary
YES = 1
NO = 2
NOT_APPLY = 97
IGNORED = 98
NOT_SPECIFIED = 99
# Laboratory result dictionary
LAB_POSITIVE = 1
LAB_NO_POSITIVE = 2
LAB_PENDING_RESULT = 3
LAB_WRONG_RESULT = 4
LAB_NOT_APPLY = 97 # CASO SIN MUESTRA
months = ["",
"Enero",
"Febrero",
"Marzo",
"Abril",
"Mayo",
"Junio",
"Julio",
"Agosto",
"Septiembre",
"Octubre",
"Noviembre",
"Diciembre"]
input_folder = environ.get('CROSSCOMPUTE_INPUT_FOLDER', 'tests/standard/input')
output_folder = environ.get('CROSSCOMPUTE_OUTPUT_FOLDER', 'tests/standard/output')
settings_path = join(input_folder, 'settings.json')
d = json.load(open(settings_path, 'rt'))
d
from datetime import datetime
now = datetime.now()
report_day = f'{now.day} de {months[now.month]} del {now.year}'
with open(join(output_folder, 'report_date.txt'), 'wt') as report_date_file:
report_date_file.write(report_day)
import pandas as pd
pd.options.display.float_format = '{:,.2f}'.format
covid_zip_data = 'data/datos_abiertos_covid19.zip'
covid_pd = pd.read_csv(covid_zip_data, compression='zip', header=0, )
covid_pd.set_index('ID_REGISTRO')
# covid_pd.groupby('RESULTADO_LAB').size()
covid_pd.info()
```
# Total de Casos y Mortalidad padecimiento
```
import matplotlib.pyplot as plt
cv19_confirmed_cases = covid_pd[covid_pd['RESULTADO_LAB'] == YES]
pneumonia_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['NEUMONIA'] == YES]
diabetes_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['DIABETES'] == YES]
epoc_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['EPOC'] == YES]
asma_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['ASMA'] == YES]
inmusupr_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['INMUSUPR'] == YES]
hyper_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['HIPERTENSION'] == YES]
# others_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['OTRAS_COM'] == YES]
cardio_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['CARDIOVASCULAR'] == YES]
obesity_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['OBESIDAD'] == YES]
renal_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['RENAL_CRONICA'] == YES]
#
smoking_confirmed_cases = cv19_confirmed_cases[cv19_confirmed_cases['TABAQUISMO'] == YES]
TOTAL_POSITIVE_COV19_CASES = cv19_confirmed_cases.shape[0] # len(list(filter(lambda x: x, covid_pd['RESULTADO_LAB'] == YES)))
TOTAL_PNEUMONIA_CASES = pneumonia_confirmed_cases.shape[0]
print(TOTAL_POSITIVE_COV19_CASES)
def percentage_died(df):
part = who_died(df).shape[0]
whole = df.shape[0]
percentage = 100 * float(part)/float(whole)
return f'{int(percentage)}%'
def who_died(df):
return df[df['FECHA_DEF'] != '9999-99-99']
diseases_dfs = [
diabetes_confirmed_cases,
# pneumonia_confirmed_cases,
epoc_confirmed_cases,
asma_confirmed_cases,
inmusupr_confirmed_cases,
hyper_confirmed_cases,
cardio_confirmed_cases,
obesity_confirmed_cases,
renal_confirmed_cases,
smoking_confirmed_cases,
]
_ = lambda value: '{:,.2f}'.format(value).split('.')[0] if type(value) != str else value
cases_by_disease = pd.DataFrame.from_dict({
'Padecimiento': ['Diabetes',
# 'Neumonía',
'EPOC', 'Asma', 'Inmunosupresión', 'Hipertensión', 'Cardiovascular',
'Obesidad', 'Renal Crónica', 'Tabaquismo'],
'Positivos': [
diabetes_confirmed_cases.shape[0],
# pneumonia_confirmed_cases.shape[0],
epoc_confirmed_cases.shape[0],
asma_confirmed_cases.shape[0],
inmusupr_confirmed_cases.shape[0],
hyper_confirmed_cases.shape[0],
cardio_confirmed_cases.shape[0],
obesity_confirmed_cases.shape[0],
renal_confirmed_cases.shape[0],
smoking_confirmed_cases.shape[0],
],
'Muertes': [
who_died(diabetes_confirmed_cases).shape[0],
# who_died(pneumonia_confirmed_cases).shape[0],
who_died(epoc_confirmed_cases).shape[0],
who_died(asma_confirmed_cases).shape[0],
who_died(inmusupr_confirmed_cases).shape[0],
who_died(hyper_confirmed_cases).shape[0],
who_died(cardio_confirmed_cases).shape[0],
who_died(obesity_confirmed_cases).shape[0],
who_died(renal_confirmed_cases).shape[0],
who_died(smoking_confirmed_cases).shape[0],
],
'Porcentaje de Muerte': [
percentage_died(diabetes_confirmed_cases),
# percentage_died(pneumonia_confirmed_cases),
percentage_died(epoc_confirmed_cases),
percentage_died(asma_confirmed_cases),
percentage_died(inmusupr_confirmed_cases),
percentage_died(hyper_confirmed_cases),
percentage_died(cardio_confirmed_cases),
percentage_died(obesity_confirmed_cases),
percentage_died(renal_confirmed_cases),
percentage_died(smoking_confirmed_cases),
],
})
cases_by_disease = cases_by_disease.set_index('Padecimiento')
# cases_by_disease = cases_by_disease.astype({'Positivos': float, 'Muertes' : float})
cases_by_disease.applymap(_).to_csv(join(output_folder, 'table1.csv'))
cases_by_disease.applymap(_)
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter, StrMethodFormatter
cases_by_disease
ax = cases_by_disease.plot.bar(rot=0, figsize=(15,5))
plt.yticks(fontsize = 13)
plt.xlabel('Casos positivos y defunciones por padecimiento', fontsize = 18)
# add value label to each bar, displayng its height
for p in ax.patches:
ax.annotate(p.get_height(),
(p.get_x() + p.get_width()/2., p.get_height()),
ha = 'center', va = 'center', xytext = (0,7), textcoords = 'offset points', size=9)
ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,}'))
plt.tight_layout()
# save Figure 7 as an image
plt.savefig(join(output_folder, 'figure1.png'))
from matplotlib_venn import venn3, venn3_circles
from matplotlib.pyplot import gca
major_diseases = [set(diabetes_confirmed_cases['ID_REGISTRO']),
set(hyper_confirmed_cases['ID_REGISTRO']),
set(obesity_confirmed_cases['ID_REGISTRO'])]
major_diseases_deaths = [set(who_died(diabetes_confirmed_cases)['ID_REGISTRO']),
set(who_died(hyper_confirmed_cases)['ID_REGISTRO']),
set(who_died(obesity_confirmed_cases)['ID_REGISTRO'])]
fig, axes = plt.subplots(1, 2, figsize=(15, 15))
venn3(major_diseases,
set_colors=('#3E64AF', '#3EAF5D', '#D74E3B'),
set_labels = ('Diabetes',
'Hipertensión',
'Obesidad',
),
alpha=0.75,
)
venn3_circles(major_diseases, lw=0.7)
plt.subplot(1, 2, 1)
venn3(major_diseases_deaths,
set_colors=('#3E64AF', '#3EAF5D', '#D74E3B'),
set_labels = ('Fallecimientos por \nDiabetes',
'Fallecimientos por \nHipertensión',
'Fallecimientos por \nObesidad'),
alpha=0.75)
venn3_circles(major_diseases_deaths, lw=0.7)
plt.show()
plt.tight_layout()
plt.savefig(join(output_folder, 'figure2.png'), bbox_inches='tight')
axes
fig, axes = plt.subplots(3, 3, figsize=(10, 10), dpi=100)
colors = ['tab:red', 'tab:blue', 'tab:green', 'tab:pink', 'tab:olive']
disease_title = [
'Diabetes',
'EPOC',
'Asma',
'Inmunosuprecion',
'Hipertension',
'Cardiovascular',
'Obesidad',
'Insuficiencia renal',
'Tabaquismo'
]
for i, (ax, df) in enumerate(zip(axes.flatten(), diseases_dfs)):
ax.hist(df['EDAD'], alpha=0.5, bins=100, density=True, stacked=True, label=disease_title[i], color=colors[ i % 4])
ax.set_xlabel("Edad")
ax.set_ylabel("Frecuencia")
ax.legend(loc='upper left', frameon=False)
# ax.set_title(disease_title[i])
ax.set_xlim(0, 90);
plt.suptitle('Afectacion de pacientes con enfermadad preexistente por edad ', y=1.05, size=16)
plt.tight_layout();
plt.savefig(join(output_folder, 'figure3.png'), bbox_inches='tight')
#diabetes_confirmed_cases
fig, axes = plt.subplots(3, 3, figsize=(10, 10), dpi=100)
diseases_dfs = [
who_died(diabetes_confirmed_cases),
who_died(pneumonia_confirmed_cases),
who_died(epoc_confirmed_cases),
who_died(asma_confirmed_cases),
who_died(inmusupr_confirmed_cases),
who_died(hyper_confirmed_cases),
who_died(cardio_confirmed_cases),
who_died(obesity_confirmed_cases),
who_died(renal_confirmed_cases),
who_died(smoking_confirmed_cases),
]
for i, (ax, df) in enumerate(zip(axes.flatten(), diseases_dfs)):
ax.hist(df['EDAD'], alpha=0.5, bins=100, density=True, stacked=True, label=disease_title[i], color=colors[ i % 4])
# ax.set_title(disease_title[i])
ax.set_xlabel("Edad")
ax.set_ylabel("Frecuencia")
ax.legend(loc='upper left', frameon=False)
ax.set_xlim(0, 90);
plt.suptitle('Afectacion de fallecidos con enfermadad preexistente por edad ', y=1.05, size=16)
plt.tight_layout();
plt.savefig(join(output_folder, 'figure4.png'), bbox_inches='tight')
```
| github_jupyter |
# Computer Vision Nanodegree
## Project: Image Captioning
---
In this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/#home). You will also design a CNN-RNN model for automatically generating image captions.
Note that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**.
Feel free to use the links below to navigate the notebook:
- [Step 1](#step1): Explore the Data Loader
- [Step 2](#step2): Use the Data Loader to Obtain Batches
- [Step 3](#step3): Experiment with the CNN Encoder
- [Step 4](#step4): Implement the RNN Decoder
<a id='step1'></a>
## Step 1: Explore the Data Loader
We have already written a [data loader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) that you can use to load the COCO dataset in batches.
In the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**.
> For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.
The `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:
1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.
2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.
3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.
4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words.
5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file.
We will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!
```
# install PixieDebugger - A Visual Python Debugger for Jupyter Notebooks
# https://medium.com/codait/the-visual-python-debugger-for-jupyter-notebooks-youve-always-wanted-761713babc62
# https://www.analyticsvidhya.com/blog/2018/07/pixie-debugger-python-debugging-tool-jupyter-notebooks-data-scientist-must-use/
!pip install pixiedust
# install other toolboxes
!pip install tqdm==4.14 # https://stackoverflow.com/questions/59109313/tqdm-tqdm-tqdmkeyerror-unknown-arguments-unit-divisor-1024
!pip install nltk
!pip install torch==1.2.0 torchvision==0.4.0
!pip install torchsummary
import sys
sys.path.append('/opt/cocoapi/PythonAPI')
from pycocotools.coco import COCO
import nltk
nltk.download('punkt')
from data_loader import get_loader
import torch
print('PyTorch Version:', torch.__version__)
print('CUDA available:', torch.cuda.is_available())
from torchvision import transforms
from torchsummary import summary
import pixiedust
# Define a transform to pre-process the training images.
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
# Set the minimum word count threshold.
vocab_threshold = 5
# Specify the batch size.
batch_size = 64
# Obtain the data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=False)
```
When you ran the code cell above, the data loader was stored in the variable `data_loader`.
You can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).
### Exploring the `__getitem__` Method
The `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).
When the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`).
#### Image Pre-Processing
Image pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):
```python
# Convert image to tensor and pre-process using transform
image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')
image = self.transform(image)
```
After loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader.
#### Caption Pre-Processing
The captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.
To understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:
```python
def __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file, img_folder):
...
self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file)
...
```
From the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**.
We use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):
```python
# Convert caption to tensor of word ids.
tokens = nltk.tokenize.word_tokenize(str(caption).lower()) # line 1
caption = [] # line 2
caption.append(self.vocab(self.vocab.start_word)) # line 3
caption.extend([self.vocab(token) for token in tokens]) # line 4
caption.append(self.vocab(self.vocab.end_word)) # line 5
caption = torch.Tensor(caption).long() # line 6
```
As you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.
```
sample_caption = 'A person doing a trick on a rail while riding a skateboard.'
```
In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.
```
sample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())
print(sample_tokens)
```
In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.
This special start word (`"<start>"`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word="<start>"`).
As you will see below, the integer `0` is always used to mark the start of a caption.
```
sample_caption = []
start_word = data_loader.dataset.vocab.start_word
print('Special start word:', start_word)
sample_caption.append(data_loader.dataset.vocab(start_word))
print(sample_caption)
```
In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.
```
sample_caption.extend([data_loader.dataset.vocab(token) for token in sample_tokens])
print(sample_caption)
```
In **`line 5`**, we append a final integer to mark the end of the caption.
Identical to the case of the special start word (above), the special end word (`"<end>"`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word="<end>"`).
As you will see below, the integer `1` is always used to mark the end of a caption.
```
end_word = data_loader.dataset.vocab.end_word
print('Special end word:', end_word)
sample_caption.append(data_loader.dataset.vocab(end_word))
print(sample_caption)
```
Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.html#torch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).
```
sample_caption = torch.Tensor(sample_caption).long()
print(sample_caption)
```
And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:
```
[<start>, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', <end>]
```
This list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:
```
[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]
```
Finally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above.
As you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**.
```python
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx[self.unk_word]
return self.word2idx[word]
```
The `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.
Use the code cell below to view a subset of this dictionary.
```
# Preview the word2idx dictionary.
dict(list(data_loader.dataset.vocab.word2idx.items())[:10])
```
We also print the total number of keys.
```
# Print the total number of keys in the word2idx dictionary.
print('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))
```
As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader.
```
# Modify the minimum word count threshold.
vocab_threshold = 4
# Obtain the data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=False)
# Print the total number of keys in the word2idx dictionary.
print('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))
```
There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`"<start>"`) and special end word (`"<end>"`). There is one more special token, corresponding to unknown words (`"<unk>"`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.
```
unk_word = data_loader.dataset.vocab.unk_word
print('Special unknown word:', unk_word)
print('All unknown words are mapped to this integer:', data_loader.dataset.vocab(unk_word))
```
Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions.
```
print(data_loader.dataset.vocab('jfkafejw'))
print(data_loader.dataset.vocab('ieowoqjf'))
```
The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.
If you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect.
But once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.
Note that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.
```
# Obtain the data loader (from file). Note that it runs much faster than before!
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_from_file=True)
```
In the next section, you will learn how to use the data loader to obtain batches of training data.
<a id='step2'></a>
## Step 2: Use the Data Loader to Obtain Batches
The captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption).
In the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare.
```
from collections import Counter
# Tally the total number of training captions with each length.
counter = Counter(data_loader.dataset.caption_lengths)
lengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)
for value, count in lengths:
print('value: %2d --- count: %5d' % (value, count))
```
To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.
Run the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.
These indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.
```
import numpy as np
import torch.utils.data as data
# Randomly sample a caption length, and sample indices with that length.
indices = data_loader.dataset.get_train_indices()
print('selected caption length:', set(data_loader.dataset.caption_lengths[i] for i in indices))
print('batch size:', data_loader.dataset.batch_size)
print('sampled indices:', indices)
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
new_sampler = data.sampler.SubsetRandomSampler(indices=indices)
data_loader.batch_sampler.sampler = new_sampler
# Obtain the batch.
images, captions = next(iter(data_loader))
print('images.shape:', images.shape)
print('captions.shape:', captions.shape)
```
Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!
You will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.
> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__
In the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning.
<a id='step3'></a>
## Step 3: Experiment with the CNN Encoder
Run the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**.
```
# Watch for any changes in model.py, and re-load it automatically.
% load_ext autoreload
% autoreload 2
```
In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.
```
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
```
Run the code cell below to instantiate the CNN encoder in `encoder`.
The pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.
```
from model import EncoderCNN
# Specify the dimensionality of the image embedding.
embed_size = 256
#-#-#-# Do NOT modify the code below this line. #-#-#-#
# Initialize the encoder. (Optional: Add additional arguments if necessary.)
encoder = EncoderCNN(embed_size)
# Move the encoder to GPU if CUDA is available.
encoder.to(device)
# Move last batch of images (from Step 2) to GPU if CUDA is available.
images = images.to(device)
# Print encoder summary
summary(encoder, images.cpu().data.numpy().shape[1:])
# Pass the images through the encoder.
features = encoder(images)
print('type(features):', type(features))
print('features.shape:', features.shape)
# Check that your encoder satisfies some requirements of the project! :D
assert type(features)==torch.Tensor, "Encoder output needs to be a PyTorch Tensor."
assert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), "The shape of the encoder output is incorrect."
```
The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.

You are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.html#normalization-layers).
> You are **not** required to change anything about the encoder.
For this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.
If you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`.
<a id='step4'></a>
## Step 4: Implement the RNN Decoder
Before executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)
> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.
Your decoder will be an instance of the `DecoderRNN` class and must accept as input:
- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with
- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.
Note that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**.
> While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`.
Although you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input.

In the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss) optimizer in PyTorch.
```
from model import DecoderRNN
# Specify the number of features in the hidden state of the RNN decoder.
hidden_size = 512
#-#-#-# Do NOT modify the code below this line. #-#-#-#
# Store the size of the vocabulary.
vocab_size = len(data_loader.dataset.vocab)
# Initialize the decoder.
decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
# Move the decoder to GPU if CUDA is available.
decoder.to(device)
# Move last batch of captions (from Step 1) to GPU if CUDA is available
captions = captions.to(device)
# Pass the encoder output and captions through the decoder.
print('features.shape:', features.shape)
print('captions.shape:', captions.shape)
print(decoder)
outputs = decoder(features, captions)
print('type(outputs):', type(outputs))
print('outputs.shape:', outputs.shape)
# Check that your decoder satisfies some requirements of the project! :D
assert type(outputs)==torch.Tensor, "Decoder output needs to be a PyTorch Tensor."
assert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), "The shape of the decoder output is incorrect."
```
When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `hidden_size`.
| github_jupyter |
```
from scipy.signal import savgol_filter
from math import factorial
from sklearn.cluster import KMeans
import os
import numpy as np
from spectral import *
import matplotlib.pyplot as plt
import math
from scipy.io import loadmat
from sklearn.decomposition import PCA
from sklearn import preprocessing
import pickle
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
DATASTORE = 'D:\\TCC\\Datasets\\bacterias_new'
SAVESTORE = 'D:\\TCC\\Datasets\\preprocess_bac_new'
spectral.settings.envi_support_nonlowercase_params = True
Join = os.path.join
# PLOT_COLORS = ['b','g','r','c','m','y','k']
# PLOT_SHAPES = ['-',':','--','-.','+']
LABELS = ['Bacillusscereus', 'Bacillussubtilis', 'Coryniumbacteriumlutaminum',
'Enterobactearerogenes', 'Enterobactercloacal', 'Enterococcusfaecalis', 'Escheriachiacoli',
'Klesbsialapneumonial', 'Micrococcusluteus', 'Proteusmirabilis', 'Pseudomonasaeoruginosas', 'Salmonellaspp',
'Serratiamarcences', 'Staphylococcusaureus_6538', 'Staphylococcusaureus_25923', 'Staphylococcusepidemides']
COLORS = {
'Bacillusscereus': '#ff1900',
'Bacillussubtilis': '#c27c51',
'Coryniumbacteriumlutaminum': '#7d5e20',
'Enterobactearerogenes': '#dbcf5c',
'Enterobactercloacal': '#9db031',
'Enterococcusfaecalis': '#9dff00',
'Escheriachiacoli': '#b58ad4',
'Klesbsialapneumonial': '#f200ff',
'Micrococcusluteus': '#6e9669',
'Proteusmirabilis': '#11521d',
'Pseudomonasaeoruginosas': '#85868c',
'Salmonellaspp': '#17e68f',
'Serratiamarcences': '#4ad9d9',
'Staphylococcusaureus_6538': '#1aaeb0',
'Staphylococcusaureus_25923': '#9117cf',
'Staphylococcusepidemides': '#bf324b',
}
def get_fewer_lines(mat, ammount):
n_mat = []
r, _, _ = mat.shape
for i in range(0, r, int(r/ammount)):
n_mat.append(mat[i, :, :])
return np.array(n_mat)
def calibration(I, W, D):
row,column,wave = I.shape
arr = np.copy(I)
meanw = np.mean(W, axis=0)
meand = np.mean(D, axis=0)
for z in range(wave):
if (z % 30 == 0):
print('CAMADAS {}-{}'.format(z, 256 if z+30>256 else z+30))
for x in range(row):
for y in range(column):
w = meanw[0,y,z]
d = meand[0,y,z]
s = I[x,y,z]
den = w-d
num = s-d
if den and num/den > 0:
arr[x,y,z] = -math.log10(num / den)
else:
arr[x,y,z] = 0
return arr
def hsi2matrix(arr):
if len(arr.shape) != 3:
raise BaseException('A entrada deve possuir 3 dimensões')
r, c, w = arr.shape
return np.reshape(arr, (r*c, w))
def mat2hsi(mat, shape):
return np.reshape(mat, (-1, shape[1], shape[2]))
def pca_95(x):
scaled_data = preprocessing.scale(x)
return PCA(n_components=0.95).fit_transform(scaled_data)
def get_clusters(x):
pca_data = pca_95(x)
km = KMeans(n_clusters=2).fit(pca_data)
return km
def get_layer(hsi, layer):
return hsi[:,:,layer]
def savitzky_golay_filter(y, window_size, order, deriv=0, rate=1):
order_range = range(order+1)
half_window = (window_size - 1) // 2
b = np.mat([[k**i for i in order_range]
for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
firstvals = y[0] - np.abs(y[1:half_window+1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
def snv_filter(mat):
nmat = np.copy(mat)
mean = np.mean(mat, axis=1)
std = np.std(mat, axis=1)
for i in range(mat.shape[0]):
nmat[i] = (nmat[i] - mean[i])/std[i]
return nmat
def remove_pixels(cube, side, amount):
cpy_cube = np.copy(cube)
if side == 'top':
cpy_cube[0:amount,:,:]=0
elif side == 'left':
cpy_cube[:, 0:amount, :] = 0
elif side == 'right':
cpy_cube[:,-amount:,:]=0
else:
cpy_cube[-amount:, :, :] = 0
return cpy_cube
def remove_pixels_from_all_dir(cube, ammount_top, ammount_left, ammount_right, ammount_down):
cpy_cube = np.copy(cube)
if ammount_top != 0:
cpy_cube = remove_pixels(cpy_cube, 'top', ammount_top)
if ammount_left != 0:
cpy_cube = remove_pixels(cpy_cube, 'left', ammount_left)
if ammount_right != 0:
cpy_cube = remove_pixels(cpy_cube, 'right', ammount_right)
if ammount_down != 0:
cpy_cube = remove_pixels(cpy_cube, 'down', ammount_down)
return cpy_cube
def apply_mask(km,mat):
mask1 = np.copy(mat)
mask2 = np.copy(mat)
lab = km.labels_
for i in range(mat.shape[0]):
if lab[i] == 0:
mask1[i,:] = 0
else:
mask2[i,:] = 0
return (mat2hsi(mask1, mat.shape) ,mat2hsi(mask2, mat.shape))
def hsi_remove_background(mat):
mat_cpy = apply_filters(mat)
km = get_clusters(mat_cpy)
m1, m2 = apply_mask(km, mat)
return (m1,m2)
def which_cluster_to_mantain(mask1, mask2):
plt.figure()
plt.title("FIGURE 1")
plt.imshow(get_layer(mask1,10), cmap='gray')
plt.figure()
plt.title("FIGURE 2")
plt.imshow(get_layer(mask2, 10), cmap='gray')
plt.show()
resp = int(input('Qual cluster deseja manter? (1/2)'))
if resp != 1 and resp != 2:
raise BaseException("Selected option not available.")
return resp - 1
def get_hsi_data(path):
orig_name = [a for a in os.listdir(path) if '.hdr' in a and 'DARK' not in a and 'WHITE' not in a]
dark_name = [a for a in os.listdir(path) if '.hdr' in a and 'DARK' in a]
white_name = [a for a in os.listdir(path) if '.hdr' in a and 'WHITE' in a]
I = open_image(os.path.join(path, orig_name[0]))
W = open_image(os.path.join(path, white_name[0]))
D = open_image(os.path.join(path, dark_name[0]))
return (I.load(), W.load(), D.load())
def get_no_background_pixels(mat):
return np.where(mat != 0)
def apply_filters(mat):
mat_cpy = np.copy(mat)
for i in range(mat.shape[0]):
mat_cpy[i] = savgol_filter(mat_cpy[i], 21, 2, 1)
# mat_cpy[i] = savgol_filter(mat_cpy[i], 25, 3, 2)
return snv_filter(mat_cpy)
def preprocess_training_data_full(choose_bac: int, semipath: str):
"""
choose_bac is the bacteria to process (since takes forever to do all at once)
returns a calibrated array based on dark and white hdr's, the pixels containing the bacteria (with no background) and the label for that bacteria
"""
bac_dirs = os.listdir(DATASTORE)
for ind, bac in enumerate(bac_dirs):
if (choose_bac == ind):
individual_bac_dir = os.path.join(os.path.join(DATASTORE, bac), semipath)
I, W, D = get_hsi_data(individual_bac_dir)
W = get_fewer_lines(W, 25)
D = get_fewer_lines(D, 25)
arr_calib = calibration(I, W, D)
cube = preprocess_training_data_from_calibration(arr_calib)
return [arr_calib, cube]
def get_file_cube_from_folder_to_train(folder, bac_index, filename = 'calib.pickle'):
bacs = os.path.join(SAVESTORE, folder)
for i, bac in enumerate(os.listdir(bacs)):
if i == bac_index:
ind_bac_dir = os.path.join(bacs, bac)
calib = load_pickle(filename, ind_bac_dir)
return calib
def preprocess_training_data_from_calibration(arr_calib):
cube = replace_median(arr_calib)
mat = hsi2matrix(cube)
mask1, mask2 = hsi_remove_background(mat)
mask1 = mat2hsi(mask1, arr_calib.shape)
mask2 = mat2hsi(mask2, arr_calib.shape)
cluster = which_cluster_to_mantain(mask1, mask2)
retCube = mask1
if cluster == 1:
retCube = mask2
return retCube[:, :, 1:256-14]
def replace_zero_in_background(originalCube, maskedCube):
cubecpy = np.copy(originalCube)
for i in range(cubecpy.shape[0]):
for j in range(cubecpy.shape[1]):
if maskedCube[i,j,0] == 0:
cubecpy[i,j,:] = 0
return cubecpy
def preprocess_training_data_from_calibration_no_filters(arr_calib):
cube = replace_median(arr_calib)
mat = hsi2matrix(cube)
mask1, mask2 = hsi_remove_background(mat)
mask1 = mat2hsi(mask1, arr_calib.shape)
mask2 = mat2hsi(mask2, arr_calib.shape)
cluster = which_cluster_to_mantain(mask1, mask2)
retCube = cube
if cluster == 0:
retCube = replace_zero_in_background(retCube, mask1)
else:
retCube = replace_zero_in_background(retCube, mask2)
return retCube[:, :, 1:256-14]
def replace_median(cube):
x,y,z = cube.shape
for i in range(z):
rows, cols = np.where(cube[:,:,i] == 0)
for j in range(len(rows)):
if rows[j] > 1 and cols[j] > 1 and rows[j] < x - 1 and cols[j] < y - 1:
wdn = cube[rows[j]-1:rows[j]+2, cols[j]-1: cols[j]+2, i]
r, _ = np.where(wdn == 0)
if len(r) == 1:
wdn = np.where(wdn != 0)
cube[rows[j], cols[j], i] = np.median(wdn)
return cube
def remove_mean_of_spectre(mat):
return mat - np.mean(mat)
################################ HELPERS #######################################
def save_pickle(path, filename, p):
pickle_out = open(os.path.join(path, filename), "wb")
pickle.dump(p, pickle_out)
pickle_out.close()
def save_all(path, calib, masked):
try:
os.makedirs(path)
except:
print("Skipped - Directory already created!")
save_pickle(path, 'calib.pickle', calib)
save_pickle(path, 'masked.pickle', masked)
def load_pickle(filename, dirpath):
path = os.path.join(dirpath, filename)
pickle_in = open(path, "rb")
return pickle.load(pickle_in)
def plot_dif_spectrum_refs(refs: list,labels:list, ismat=False, plotTest=True,onlyCurves=False, saveDir = None):
mats = refs
if not ismat:
for i in refs:
mats.append(hsi2matrix(i))
xmin = mats[0].shape[0]
for i in mats:
xmin = min(xmin, i.shape[0])
means = []
for i in range(len(mats)):
mats[i] = mats[i][:xmin,:]
# mats[i] = mats[i] - np.mean(mats[i])
means.append(np.mean(mats[i], axis=0))
s = ""
if not onlyCurves:
for i in range(0,len(mats),2):
s += "BAC: {}\n".format(labels[i//2])
s += "RMSE: {}\nMean: {}\n\n".format(
math.sqrt(np.mean(np.square(mats[i] - mats[i+1]))), np.mean(mats[i]) - np.mean(mats[i+1]))
plt.figure(figsize=(10,10))
x = np.linspace(0, mats[0].shape[1], mats[0].shape[1])
for i in range(len(means)):
# line, name = "-", "Spt"
if plotTest:
line = '--' if i % 2 == 0 else '-'
name = 'Train' if i % 2 == 0 else 'Test'
plt.plot(x, means[i], line, color=COLORS[labels[i//2]],linewidth=2,
label='{}-{}'.format(name,labels[i//2]))
plt.figlegend(bbox_to_anchor=(1.05, 1), loc='upper left',
borderaxespad=0., fontsize=12)
plt.text(175, -0.25, s, size=12)
# s = "{}".format(labels[0])
# for i in range(1,len(labels)):
# s += "-x-{}".format(labels[i])
plt.title(s)
plt.show()
if saveDir is not None:
plt.savefig(saveDir)
def get_cube_by_index(path, index, filename):
bac = get_dir_name(path, index)
return load_pickle(filename, Join(path, bac))
def get_dir_name(path, index):
return os.listdir(path)[index]
def show_img_on_wave(cube, layer):
mat = get_layer(cube, layer)
plt.imshow(mat, cmap='gray')
plt.show()
def plot_spectre(cube, isCube=True):
mat = cube
if isCube:
mat = hsi2matrix(cube)
nn = np.mean(mat, axis=0)
x = np.linspace(0, mat.shape[1], mat.shape[1])
plt.xlabel("Comprimento de onda (nm)")
plt.ylabel("Pseudo-absortância")
plt.plot(x,nn)
def remove_blank_lines(mat):
return mat[~np.all(mat == 0, axis=1)]
def remove_spectrum(x, s=-1,f=-1):
ss, ff = 50,210
if s != -1:
ss = s
if f != -1:
ff = f
return x[:,ss:ff]
testpath = Join(SAVESTORE, 'Test')
trainpath = Join(SAVESTORE, 'Train')
indx = [7]
bac_names = []
mats = []
for i in indx:
tr = get_cube_by_index(trainpath, i, 'mat_nobg.pickle')
tt = get_cube_by_index(testpath, i, 'mat_nobg.pickle')
mats.append((tr))
mats.append((tt))
for i in indx:
bac_names.append(LABELS[i])
plot_dif_spectrum_refs(mats, bac_names, ismat=True, plotTest=True, onlyCurves=True)
```
| github_jupyter |
**Student BENREKIA Mohamed Ali (IASD 2021-2022)**
```
%matplotlib inline
import numpy as np
from scipy.linalg import norm
import matplotlib.pyplot as plt
import seaborn as sns
%load_ext autoreload
%autoreload 2
```
# Loading data
```
!wget https://raw.githubusercontent.com/nishitpatel01/predicting-age-of-abalone-using-regression/master/Abalone_data.csv
# Use this code to read from a CSV file.
import pandas as pd
U = pd.read_csv('/content/Abalone_data.csv')
U.shape
U.info()
U.head()
U.tail()
U.Sex=U.Sex.astype('category').cat.codes
U.head()
U.describe(include='all')
U.sample(10)
U.isnull().sum()
U.dtypes
U.hist(figsize=(10,15))
corr = U.corr()
corr
sns.heatmap(corr, annot=False)
# split train - validation
shuffle_df = U.sample(frac=1)
# Define a size for your train set
train_size = int(0.8 * len(U))
# Split your dataset
train_set = shuffle_df[:train_size]
valid_set = shuffle_df[train_size:]
#split feature target
x_train = train_set.drop("Rings",axis=1).to_numpy()
y_train = train_set["Rings"]
x_valid = valid_set.drop("Rings",axis=1)
y_valid = valid_set["Rings"]
#no need
mA = x_train.mean(axis=0)
sA = x_train.std(axis=0)
x_train = (x_train-mA)/sA
x_valid = (x_valid-mA)/sA
# no need
m = y_train.mean()
y_train = y_train-m
y_valid = y_valid-m
x_train.shape[1]
```
# Problem definition (Linear regression)
```
class RegPb(object):
'''
A class for regression problems with linear models.
Attributes:
X: Data matrix (features)
y: Data vector (labels)
n,d: Dimensions of X
loss: Loss function to be considered in the regression
'l2': Least-squares loss
lbda: Regularization parameter
'''
# Instantiate the class
def __init__(self, X, y,lbda=0,loss='l2'):
self.X = X
self.y = y
self.n, self.d = X.shape
self.loss = loss
self.lbda = lbda
# Objective value
def fun(self, w):
if self.loss=='l2':
return np.square(self.X.dot(w) - self.y).mean() + self.lbda * norm(w) ** 2
else:
return np.square(self.X.dot(w) - self.y).mean()
"""
# Partial objective value
def f_i(self, i, w):
if self.loss=='l2':
return norm(self.X[i].dot(w) - self.y[i]) ** 2 / (2.) + self.lbda * norm(w) ** 2
else:
return norm(self.X[i].dot(w) - self.y[i]) ** 2 / (2.)
"""
# Full gradient computation
def grad(self, w):
if self.loss=='l2':
return self.X.T.dot(self.X.dot(w) - self.y) * (2/self.n) + 2 * self.lbda * w
else:
return self.X.T.dot(self.X.dot(w) - self.y) * (2/self.n)
# Partial gradient
def grad_i(self,i,w):
x_i = self.X[i]
if self.loss=='l2':
return (2/self.n) * (x_i.dot(w) - self.y[i]) * x_i + 2 * self.lbda*w
else:
return (2/self.n) * (x_i.dot(w) - self.y[i]) * x_i
"""
# Lipschitz constant for the gradient
def lipgrad(self):
if self.loss=='l2':
L = norm(self.X, ord=2) ** 2 / self.n + self.lbda
"""
lda = 1. / x_train.shape[0] ** (0.5)
pblinreg = RegPb(x_train, y_train, lbda=lda, loss='l2')
```
**PCA**
```
U, s, V = np.linalg.svd(x_train.T.dot(x_train))
eig_values, eig_vectors = s, U
explained_variance=(eig_values / np.sum(eig_values))*100
plt.figure(figsize=(8,4))
plt.bar(range(8), explained_variance, alpha=0.6)
plt.ylabel('Percentage of explained variance')
plt.xlabel('Dimensions')
# calculating our new axis
pc1 = x_train.dot(eig_vectors[:,0])
pc2 = x_train.dot(eig_vectors[:,1])
plt.plot(pc1, pc2, '.')
plt.axis('equal');
```
# Btach Gradietn Descent
```
def batch_grad(w0,problem, stepchoice=0, lr= 0.01, n_iter=1000,verbose=False):
# objective history
objvals = []
# Number of samples
n = problem.n
# Initial value of current iterate
w = w0.copy()
nw = norm(w)
# Current objective
obj = problem.fun(w)
objvals.append(obj);
# Initialize iteration counter
k=0
# Plot initial quantities of interest
if verbose:
print("Gradient Descent")
print(' | '.join([name.center(8) for name in ["iter", "MSE_Loss"]]))
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
# Main loop
while (k < n_iter ):#and nw < 10**100
# gradient calculation
gr = np.zeros(d)
gr = problem.grad(w)
if stepchoice==0:
w[:] = w - lr * gr
elif stepchoice>0:
if (k*nb*10) % n == 0:
sk = float(lr/stepchoice)
w[:] = w - sk * gr
nw = norm(w) #Computing the norm to measure divergence
obj = problem.fun(w)
k += 1
# Plot quantities of interest at the end of every epoch only
objvals.append(obj)
if verbose:
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
# End of main loop
#################
# Plot quantities of interest for the last iterate (if needed)
if k % n_iter > 0:
objvals.append(obj)
if verbose:
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
# Outputs
w_output = w.copy()
return w_output, np.array(objvals)
```
**Different Learning rates**
```
nb_epochs = 100
n = pblinreg.n
d = pblinreg.d
w0 = np.zeros(d)
valsstep0 = [0.1,0.01,0.001,0.0001,0.00001]
nvals = len(valsstep0)
objs = np.zeros((nvals,nb_epochs+1))
for val in range(nvals):
w_temp, objs_temp = batch_grad(w0,pblinreg, lr=valsstep0[val], n_iter=nb_epochs)
objs[val] = objs_temp
epochs = range(1,102)
plt.figure(figsize=(7, 5))
for val in range(nvals):
plt.plot(epochs, objs[val], label="BG - "+str(valsstep0[val]), lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs", fontsize=14)
plt.ylabel("Objective", fontsize=14)
plt.legend()
plt.show()
```
# Accelerated Gradient Descent
```
def accelerated_grad(w_0,problem,lr=0.001,method="nesterov",momentum=None,n_iter=100,verbose=False):
"""
A generic code for Nesterov's accelerated gradient method.
Inputs:
w0: Initial vector
problem: Problem structure
lr: Learning rate
method: Type of acceleration technique that is used
'nesterov': Accelerated gradient for convex functions (Nesterov)
momentum: Constant value for the momentum parameter (only used if method!='nesterov')
n_iter: Number of iterations
verbose: Boolean value indicating whether the outcome of every iteration should be displayed
Outputs:
z_output: Final iterate of the method
objvals: History of function values in z (output as a Numpy array of length n_iter+1)
"""
############
# Initial step: Compute and plot some initial quantities
# objective history
objvals = []
# Initial value of current and next iterates
w = w0.copy()
w_new = w0.copy()
z = w0.copy()
if method=='nesterov':
# Initialize parameter sequence
tk = 0
tkp1 = 1
momentum = 0
# Initialize iteration counter
k=0
# Initial objective
obj = problem.fun(z)
objvals.append(obj);
# Plot the initial values if required
if verbose:
print("Accelerated Gradient/"+method)
print(' | '.join([name.center(8) for name in ["iter", "fval"]]))
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
#######################
# Main loop
while (k < n_iter):
# Perform the accelerated iteration
# Gradient step
g = problem.grad(z)
w_new[:] = z - lr * g
# Momentum step
z[:] = w_new + momentum*(w_new-w)
# Update sequence
w[:] = w_new[:]
# Adjusting the momentum parameter if needed
if method=='nesterov':
tkp1 = 0.5*(1+np.sqrt(1+4*(tk**2)))
momentum = (tk-1)/tkp1
tk = tkp1
# Compute and plot the new objective value and distance to the minimum
obj = problem.fun(z)
objvals.append(obj)
# Plot these values if required
if verbose:
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
# Increment the iteration counter
k += 1
# End loop
#######################
# Output
z_output = z.copy()
return z_output, np.array(objvals)
```
**GD Vs NAGD**
```
nb_epochs = 100
n = pblinreg.n
d = pblinreg.d
w0 = np.zeros(d)
learning_rate = 0.01
w_g, obj_g = batch_grad(w0,pblinreg, lr=learning_rate, n_iter=nb_epochs)
w_n, obj_n = accelerated_grad(w0,pblinreg, lr=learning_rate, n_iter=nb_epochs)
epochs = range(1,102)
plt.figure(figsize=(7, 5))
plt.plot(epochs, obj_g, label="GD", lw=2)
plt.plot(epochs, obj_n, label="NAGD", lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs", fontsize=14)
plt.ylabel("Objective", fontsize=14)
plt.legend()
plt.show()
```
# Stochastic gradient Descent
```
def stoch_grad(w0,problem, stepchoice=0, lr= 0.01, n_iter=1000,nb=1,average=0,scaling=0,with_replace=False,verbose=False):
"""
A code for gradient descent with various step choices.
Inputs:
w0: Initial vector
problem: Problem structure
problem.fun() returns the objective function, which is assumed to be a finite sum of functions
problem.n returns the number of components in the finite sum
problem.grad_i() returns the gradient of a single component f_i
stepchoice: Strategy for computing the stepsize
0: Constant step size equal to lr
1: Step size decreasing in lr/ stepchoice
lr: Learning rate
n_iter: Number of iterations, used as stopping criterion
nb: Number of components drawn per iteration/Batch size
1: Classical stochastic gradient algorithm (default value)
problem.n: Classical gradient descent (default value)
average: Indicates whether the method computes the average of the iterates
0: No averaging (default)
1: With averaging
scaling: Use a diagonal scaling
0: No scaling (default)
1: Average of magnitudes (RMSProp)
2: Normalization with magnitudes (Adagrad)
with_replace: Boolean indicating whether components are drawn with or without replacement
True: Components drawn with replacement
False: Components drawn without replacement (Default)
verbose: Boolean indicating whether information should be plot at every iteration (Default: False)
Outputs:
w_output: Final iterate of the method (or average if average=1)
objvals: History of function values (Numpy array of length n_iter at most)
"""
############
# Initial step: Compute and plot some initial quantities
# objective history
objvals = []
# iterates distance to the minimum history
normits = []
"""
# Lipschitz constant
L = problem.lipgrad()
"""
# Number of samples
n = problem.n
# Initial value of current iterate
w = w0.copy()
nw = norm(w)
# Average (if needed)
if average:
wavg=np.zeros(len(w))
#Scaling values
if scaling>0:
mu=1/(2 *(n ** (0.5)))
v = np.zeros(d)
beta = 0.8
# Initialize iteration counter
k=0
# Current objective
obj = problem.fun(w)
objvals.append(obj);
# Plot initial quantities of interest
if verbose:
print("Stochastic Gradient, batch size=",nb,"/",n)
print(' | '.join([name.center(8) for name in ["iter", "MSE_Loss"]]))
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
################
# Main loop
while (k < n_iter ):#and nw < 10**100
# Draw the batch indices
ik = np.random.choice(n,nb,replace=with_replace)# Batch gradient
# Stochastic gradient calculation
sg = np.zeros(d)
for j in range(nb):
gi = problem.grad_i(ik[j],w)
sg = sg + gi
sg = (1/nb)*sg
if scaling>0:
if scaling==1:
# RMSProp update
v = beta*v + (1-beta)*sg*sg
elif scaling==2:
# Adagrad update
v = v + sg*sg
sg = sg/(np.sqrt(v+mu))
if stepchoice==0:
w[:] = w - lr * sg
elif stepchoice>0:
if (k*nb*10) % n == 0:
sk = float(lr/stepchoice)
w[:] = w - sk * sg
nw = norm(w) #Computing the norm to measure divergence
if average:
# If average, compute the average of the iterates
wavg = k/(k+1) *wavg + w/(k+1)
obj = problem.fun(wavg)
else:
obj = problem.fun(w)
k += 1
# Plot quantities of interest at the end of every epoch only
if k % int(n/nb) == 0:
objvals.append(obj)
if verbose:
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
# End of main loop
#################
# Plot quantities of interest for the last iterate (if needed)
if (k*nb) % n > 0:
objvals.append(obj)
if verbose:
print(' | '.join([("%d" % k).rjust(8),("%.2e" % obj).rjust(8)]))
# Outputs
if average:
w_output = wavg.copy()
else:
w_output = w.copy()
return w_output, np.array(objvals)
```
**Constant Vs Decreasing LR**
```
nb_epochs = 60
n = pblinreg.n
d = pblinreg.d
w0 = np.zeros(d)
# Run a - GD with constant stepsize
w_a, obj_a = stoch_grad(w0,pblinreg, n_iter=nb_epochs,nb=n)
# Run b - Stochastic gradient with constant stepsize
# The version below may diverges, in which case the bound on norm(w) in the code will be triggered
w_b, obj_b = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1)
# Run Gradient descent with decreasing stepsize
w_c, obj_c = stoch_grad(w0,pblinreg, stepchoice=0.5, lr=0.2, n_iter=nb_epochs,nb=n)
# Run Stochastic gradient with decreasing stepsize
w_d, obj_d = stoch_grad(w0,pblinreg, stepchoice=0.5, lr=0.2, n_iter=nb_epochs*n,nb=1)
epochs = range(1,62)
plt.figure(figsize=(7, 5))
plt.plot(epochs, obj_a, label="GD - const-lbda", lw=2)
plt.plot(epochs, obj_b, label="SG - const-lbda", lw=2)
plt.plot(epochs, obj_c, label="GD - decr-lbda", lw=2)
plt.plot(epochs, obj_d, label="SG - decr-lbda", lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs", fontsize=14)
plt.ylabel("Objective MSE", fontsize=14)
plt.legend()
plt.show()
```
**Different Constant LR**
```
nb_epochs = 60
n = pblinreg.n
d = pblinreg.d
w0 = np.zeros(d)
valsstep0 = [0.01,0.001,0.0001,0.00001]
nvals = len(valsstep0)
objs = np.zeros((nvals,nb_epochs+1))
for val in range(nvals):
w_temp, objs_temp = stoch_grad(w0,pblinreg, lr=valsstep0[val], n_iter=nb_epochs*n,nb=1)
objs[val] = objs_temp
plt.figure(figsize=(7, 5))
for val in range(nvals):
plt.plot(epochs, objs[val], label="SG - "+str(valsstep0[val]), lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs", fontsize=14)
plt.ylabel("Objective", fontsize=14)
plt.legend()
plt.show()
```
**Different decreasing LR**
```
nb_epochs = 60
n = pblinreg.n
nbset = 1
w0 = np.zeros(d)
decstep = [1,2,10,20,100]
nvals = len(decstep)
objs = np.zeros((nvals,nb_epochs+1))
for val in range(nvals):
_, objs[val] = stoch_grad(w0,pblinreg,stepchoice=decstep[val],lr=0.02, n_iter=nb_epochs*n,nb=1)
plt.figure(figsize=(7, 5))
for val in range(nvals):
plt.semilogy(epochs, objs[val], label="SG - "+str(decstep[val]), lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs", fontsize=14)
plt.ylabel("Objective", fontsize=14)
plt.legend()
plt.show()
```
**Different Batch size**
```
nb_epochs = 100
n = pblinreg.n
w0 = np.zeros(d)
# Stochastic gradient (batch size 1)
w_a, obj_a= stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1)
# Batch stochastic gradient (batch size n/100)
nbset=int(n/100)
w_b, obj_b = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*100,nb=nbset)
# Batch stochastic gradient (batch size n/10)
nbset=int(n/10)
w_c, obj_c = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=int(nb_epochs*10),nb=nbset)
# Batch stochastic gradient (batch size n/2)
nbset=int(n/2)
w_d, obj_d = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=int(nb_epochs*2),nb=nbset)
# Gradient descent (batch size n, taken without replacement)
w_f, obj_f = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=int(nb_epochs),nb=n)
nbset=int(n/100)
w_b, obj_b = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=int(nb_epochs*100),nb=nbset,verbose=True)
print(len(obj_b))
epochs = range(1,102)
plt.figure(figsize=(7, 5))
plt.semilogy(epochs, obj_a, label="SG (batch=1)", lw=2)
plt.semilogy(epochs, obj_b, label="Batch SG - n/100", lw=2)
plt.semilogy(epochs, obj_c, label="Batch SG - n/10", lw=2)
plt.semilogy(epochs, obj_d, label="Batch SG - n/2", lw=2)
plt.semilogy(epochs, obj_f, label="GD", lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs", fontsize=14)
plt.ylabel("Objective", fontsize=14)
plt.legend()
plt.show()
plt.figure(figsize=(7, 5))
plt.plot(epochs, obj_a, label="SG (batch=1)", lw=2)
plt.plot(epochs, obj_b, label="Batch SG - n/100", lw=2)
plt.plot(epochs, obj_c, label="Batch SG - n/10", lw=2)
plt.plot(epochs, obj_d, label="Batch SG - n/2", lw=2)
plt.plot(epochs, obj_f, label="GD", lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs", fontsize=14)
plt.ylabel("Objective", fontsize=14)
plt.legend()
plt.show()
```
# Other variants for SGD
**batch with replacement**
```
#Batch with replacement for GD, SGD and Batch SGD
nb_epochs = 100
n = pblinreg.n
w0 = np.zeros(d)
nruns = 3
for i in range(nruns):
# Run standard stochastic gradient (batch size 1)
_, obj_a= stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1,with_replace=True)
# Batch stochastic gradient (batch size n/10)
nbset=int(n/2)
_, obj_b= stoch_grad(w0,pblinreg, lr=0.0001, n_iter=int(nb_epochs*n/nbset),nb=nbset,with_replace=True)
# Batch stochastic gradient (batch size n, with replacement)
nbset=n
_, obj_c=stoch_grad(w0,pblinreg, lr=0.0001, n_iter=int(nb_epochs*n/nbset),nb=nbset,with_replace=True)
if i<nruns-1:
plt.semilogy(obj_a,color='orange',lw=2)
plt.semilogy(obj_b,color='green', lw=2)
plt.semilogy(obj_c,color='blue', lw=2)
plt.semilogy(obj_a,label="SG",color='orange',lw=2)
plt.semilogy(obj_b,label="batch n/2",color='green', lw=2)
plt.semilogy(obj_c,label="batch n",color='blue', lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs ", fontsize=14)
plt.ylabel("Objective ", fontsize=14)
plt.legend()
```
**Averaging**
```
# Comparison of stochastic gradient with and without averaging
nb_epochs = 100
n = pblinreg.n
w0 = np.zeros(d)
# Run standard stochastic gradient without averaging
_, obj_a =stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1)
# Run stochastic gradient with averaging
_, obj_b =stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1,average=1)
# Plot the results
plt.figure(figsize=(7, 5))
plt.semilogy(obj_a,label='SG',color='orange',lw=2)
plt.semilogy(obj_b,label='SG+averaging',color='red', lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs (log scale)", fontsize=14)
plt.ylabel("Objective (log scale)", fontsize=14)
plt.legend()
```
**Diagonal Scaling**
```
# Comparison of stochastic gradient with and without diagonal scaling
nb_epochs = 60
n = pblinreg.n
w0 = np.zeros(d)
# Stochastic gradient (batch size 1) without diagonal scaling
w_a, obj_a= stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1)
# Stochastic gradient (batch size 1) with RMSProp diagonal scaling
w_b, obj_b = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1,average=0,scaling=1)
# Stochastic gradient (batch size 1) with Adagrad diagonal scaling - Constant step size
w_c, obj_c = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1,average=0,scaling=2)
# Stochastic gradient (batch size 1) with Adagrad diagonal scaling - Decreasing step size
w_d, obj_d = stoch_grad(w0,pblinreg, lr=0.0001, n_iter=nb_epochs*n,nb=1,average=0,scaling=2)
# Plot the results - Comparison of stochastic gradient with and without diagonal scaling
# In terms of objective value (logarithmic scale)
plt.figure(figsize=(7, 5))
plt.semilogy(obj_a, label="SG", lw=2)
plt.semilogy(obj_b, label="SG/RMSProp", lw=2)
plt.semilogy(obj_c, label="SG/Adagrad (Cst)", lw=2)
plt.semilogy(obj_d, label="SG/Adagrad (Dec)", lw=2)
plt.title("Convergence plot", fontsize=16)
plt.xlabel("#epochs (log scale)", fontsize=14)
plt.ylabel("Objective (log scale)", fontsize=14)
plt.legend()
plt.show
```
# Regression (Lasso with iterative soft thersholding)
**Lasso regression with ISTA**
```
#Minimization fucntion with l1 norm (Lasso regression)
def cost(w, X, y, lbda):
return np.square(X.dot(w) - y).mean() + lbda * norm(w,1)
def ista_solve( A, d, lbdaa ):
"""
Iterative soft-thresholding solves the minimization problem
Minimize |Ax-d|_2^2 + lambda*|x|_1 (Lasso regression)
"""
max_iter = 300
objvals = []
tol = 10**(-3)
tau = 1.5/np.linalg.norm(A,2)**2
n = A.shape[1]
w = np.zeros((n,1))
for j in range(max_iter):
z = w - tau*(A.T@(A@w-d))
w_old = w
w = np.sign(z) * np.maximum(np.abs(z)-tau*lbdaa, np.zeros(z.shape))
if j % 100 == 0:
obj = cost(w,A,d,lbdaa)
objvals.append(obj)
if np.linalg.norm(w - w_old) < tol:
break
return w, objvals
#we iterate over multiple values of lambda
lmbdas = [0.000001, 0.000002, 0.00001, 0.00002, 0.0001, 0.0002, 0.001, 0.002, 0.01, 0.02, 0.1, 0.2, 1, 2, 10, 20]
mse_list=[]
for lda in lmbdas:
w_star, obj_x = ista_solve_hot( x_train, y_train, lda)
mse_list.append(obj_x[-1])
x_range = range(1,len(lmbdas)+1)
plt.figure(figsize=(7, 5))
plt.plot(x_range,mse_list, label="Lasso-ISTA", lw=2)
plt.title("Best Lambda factor", fontsize=16)
plt.xlabel("Lambda", fontsize=14)
plt.xticks(np.arange(len(lmbdas)),lmbdas,rotation=40)
plt.ylabel("Objective Lasso reg", fontsize=14)
plt.legend()
plt.show()
w_star, obj_x = ista_solve_hot( x_train, y_train, 0.00001)
```
# Performance on Test set
```
#MSE on lasso-ISTA
cost(w_star, x_valid, y_valid, 0.00001)
# MSE on best sgd algo
cost(w_b, x_valid, y_valid, 0.00001)
```
| github_jupyter |
# The Monte Carlo Simulation of Radiation Transport
WE will discuss essentiall physics and method to do gamma quanta (photons with high enough energy) radiation transport using Monte Carlo methods. We will covers interactions processes, basics of radiation passing through matter as well as Monte Carlo method and how it helps with radiation propagation.
## Glossary
- $h$ Plank's constant
- $\hbar$ reduced Plank's constant, $h/2\pi$
- $\omega$ photon circular frequency,
- $\hbar \omega$ photon energy
- $\lambda$ photon wavelength
- $\theta$ scattering angle, between incoming and outgoing photon
- $\phi$ azimuthal angle
- $c$ speed of light in vacuum
- $m_e$ electron mass
- $r_e$ classical electron radius
- $N_A$ Avogadro Constant, 6.02214076$\times$10$^{23}$ mol$^{-1}$
## Basic physics
We would cover typical energies and wave length when photons are behaving like a point-like particle interaction with matter.
### Units
Common unit for a photon energy would be electron-volt (eV). This is the kinetic energy electron aquire when it moves in electric field (say, between plates of the capacitor) with potential difference 1Volt. This is very small energy and is equal to about $1.6\times10^{-19}$Joules. Typical energies we are interested inare in the 1keV to 100MeV range.
### Spatial size and wave length
Photons are massless particles, and it is very easy to compute photon "size" which is photon wavelength.
$$ \lambda = \frac{hc}{E_\gamma} = \frac{hc}{\hbar \omega} = \frac{2 \pi c}{\omega}$$
where $\lambda$ is wavelength, $h$ is Plank's constant, $c$ is speed of light and $E_\gamma$ is photon energy. For example, lets compute wavelength for photon with energy 1eV.
```
h = 6.625e-34
c = 3e8
hw = 1.0 * 1.6e-19 # eV
λ = h*c/hw
print(f"Photon wavelength = {λ*1.0e9} nanometers")
```
Thus, for 1keV photon we will get wave length about 1.2 nm, and for 1MeV photon we will get wave length about $1.2\times10^{-3}$nm.
FOr comparison, typical atom size is from 0.1nm (He) to 0.4nm (Fr and other heavy). Therefore, for most interactions between photon and atoms in our enery range we could consider it particles, not waves.
## Basics of Monte Carlo methods
Was first introduced by Conte du Buffon, as needle dropping experiment to calculate value of $\pi$. Laplace extended the example of the CduB by using sampling in the square to calculate value of $\pi$. It is a very general method of stochastic integration of the function. Was successfully applied to the particles (neutron in this case) transport by Enrico Fermi. Since growing applications of computers it is growing exponentially in use - finances, radiation therapy, machine learning, astrophysics, optimizations, younameit.
Let's try to calculate $\pi$ with the Laplace method, namely sampe points uniformly in the
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
N = 1000 # number of points to sample
x = 2.0*np.random.random(N) - 1.0
y = 2.0*np.random.random(N) - 1.0
unitCircle = plt.Circle((0, 0), 1.0, color='r', fill=False)
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, 'bo', label='Sampling in square')
ax.add_artist(unitCircle)
plt.axhline(0, color='grey')
plt.axvline(0, color='grey')
plt.title("Sampling in square")
plt.show()
r = np.sqrt(x*x + y*y)
#print(r)
pinside = r[r<=1.0]
Ninside = len(pinside)
print(4.0*Ninside/N)
```
Result shall be close to $\pi$
## Basic Photons Interactions with atoms
There are several interaction processess of photons with media.
### Compton Scattering
Compton scattering is described by Klein-Nishina formula with energy of scattered photon directly tied to incoming energy and scattering angle
$$
\hbar \omega'=\frac{\hbar\omega}{1+\frac{\hbar \omega}{m_e c^2} (1 - \cos{\theta})}
$$
where prime marks particle after scattering. It is clear to see that for backscattering photon ($\theta=\pi$, $\cos{\theta}=-1$) the energy of scattered photon reach minimum, which means scattered photon energy has limits
$$
\frac{\hbar \omega }{1 + 2\hbar\omega/m_ec^2} \le \hbar\omega' \le \hbar\omega
$$
Scattering cross-section (you could think of this as denormalized probability to be scattered to a given enegy)
$$
\frac{d\sigma}{d\hbar\omega'} = \pi r_e^2 \frac{m_ec^2}{(\hbar\omega)^2} \lbrace \frac{\hbar\omega}{\hbar\omega'} + \frac{\hbar\omega'}{\hbar\omega} +
\left ( \frac{m_ec^2}{\hbar\omega'} - \frac{m_ec^2}{\hbar\omega} \right )^2 -
2m_ec^2 \left ( \frac{1}{\hbar\omega'} - \frac{1}{\hbar\omega} \right ) \rbrace
$$
Full cross-section, where $x=2 \hbar\omega/m_e c^2$ is double relative photon enery.
$$
\sigma=2\pi r_e^2\frac{1}{x}\lbrace \left ( 1 - \frac{4}{x} - \frac{8}{x^2} \right ) \log{(1+x) +\frac{1}{2} + \frac{8}{x}-\frac{1}{2(1+x)^2}} \rbrace
$$
Then we could divide partial cross-section by total cross-section and get probability of scattered photon energy for different incoming photons. Lets plot few graphs. As one can see, cross-section has dimension of area. They are very small, therefore cross-sections are measured in barns, one barn being $10^-{24}$ centimeter squared.
Let's for reference add expression how to compute angular differential cross-section
$$
\frac{d\sigma}{d\omicron'} = \frac{1}{2} r_e^2 \left( \frac{\hbar\omega'}{\hbar\omega}\right)^2 \left(\frac{\hbar\omega}{\hbar\omega'} + \frac{\hbar\omega'}{\hbar\omega} - \sin^2{\theta}\right)
$$
Let's move to more appropriate units: energy would be always in MeV, unit of length for cross-sections would be in femtometers (1fm = $10^{-15}m$). Barn is 100 femtometers squa.
```
# usefule constants
MeC2 = 0.511 # in MeV
Re = 2.82 # femtometers
# main functions to deal with cross-sections
def hw_prime(hw, cos_theta):
"""computes outgoing photon energy vs cosine of the scattered angle"""
hwp = hw/(1.0 + (1.0 - cos_theta)*hw/MeC2)
return hwp
def cosθ_from_hwp(hw, hwp):
return 1.0 - (MeC2/hwp - MeC2/hw)
def hwp_minimum(hw):
"""Computes minimum scattere energy in MeV given incoming photon energy hw"""
return hw/(1.0 + 2.0*hw/MeC2)
def total_cross_section(hw):
"""Klein-Nishina total cross-section, LDL p.358, eq (86.16)"""
if hw <= 0.0:
raise RuntimeError(f"Photon energy is negative: {hw}")
x = 2.0 * hw / MeC2
q = 1.0/x
z = (1.0 + x)
σ = 2.0*np.pi*Re*Re * q * ((1.0 - 4.0*q - 8.0*q*q)*np.log(z) + 0.5 + 8.0*q - 0.5/z/z)
return σ
def diff_cross_section_dhwp(hw, hwp):
"""Differential cross-section over outgoing photon energy"""
if hw <= 0.0:
raise RuntimeError(f"Photon energy is negative or zero: {hw}")
if hwp <= 0.0:
raise RuntimeError(f"Scattered photon energy is negative or zero: {hwp}")
if hwp < hwp_minimum(hw): # outgoing energy cannot be less than minimum allowed
return 0.0
ei = MeC2/hw
eo = MeC2/hwp
dσ_dhwp = np.pi*Re*Re * (ei/hw) * (ei/eo + eo/ei + (eo-ei)**2 - 2.0*(eo-ei))
return dσ_dhwp
def diff_cross_section_dOp(hw, θ):
"""Differential cross-section over outgoing photon differential angle"""
cst = np.cos(θ)
hwp = hw_prime(hw, cst)
rhw = hwp/hw
dσ_dOp = 0.5*np.pi*Re*Re * rhw*rhw*(rhw + 1.0/rhw - (1.0 - cst)*(1.0 + cst))
return dσ_dOp
def make_energyloss_curve(hw):
N = 101
hwm = hwp_minimum(hw)
hws = np.linspace(0.0, hw-hwm, N)
st = total_cross_section(hw)
sc = np.empty(101)
for k in range(0, len(hws)):
hwp = hw - hws[k]
sc[k] = diff_cross_section_dhwp(hw, hwp)/st
return hws, sc
q_p25, s_p25 = make_energyloss_curve(0.25)
q_p50, s_p50 = make_energyloss_curve(0.50)
q_1p0, s_1p0 = make_energyloss_curve(1.00)
fig, ax = plt.subplots(1, 1)
ax.plot(q_p25, s_p25, 'r-', lw=2, label='Scattering probability vs energy loss, 0.25MeV')
ax.plot(q_p50, s_p50, 'g-', lw=2, label='Scattering probability vs energy loss, 0.50MeV')
ax.plot(q_1p0, s_1p0, 'b-', lw=2, label='Scattering probability vs energy loss, 1.00MeV')
plt.title("Klein-Nishina")
plt.show()
def make_angular_curve(hw):
"""Helper function to make angular probability x,y arrays given incoming photon enenrgy, MeV"""
N = 181
theta_d = np.linspace(0.0, 180.0, N) # angles in degrees
theta_r = theta_d * np.pi / 180.0
st = total_cross_section(hw)
so = np.empty(N)
for k in range(0, len(so)):
so[k] = diff_cross_section_dOp(hw, theta_r[k]) * 2.0*np.pi / st
return theta_d, so
a_p25, s_p25 = make_angular_curve(0.25)
a_p50, s_p50 = make_angular_curve(0.50)
a_1p0, s_1p0 = make_angular_curve(1.00)
fig, ax = plt.subplots(1, 1)
ax.plot(a_p25, s_p25, 'r-', lw=2, label='Scattering angular probability, 0.25MeV')
ax.plot(a_p50, s_p50, 'g-', lw=2, label='Scattering angular probability, 0.50MeV')
ax.plot(a_1p0, s_1p0, 'b-', lw=2, label='Scattering angular probability, 1.00MeV')
plt.title("Klein-Nishina")
plt.show()
```
## Cross-sections
### Microscopic and Macroscopic cross-sections
We learned about so-called microscopic cross-sections, which is oneabout one photon scattering on one electron. It is very small, measured in barns which is $10^{-24}$ cm$^2$. In real life photons interacti with material objects measured in grams and kilograms. For that, we need macroscopic cross-section. For macroscopic cross-section, we have to multiply microscopic one by $N$, which is density of scatterers, as well as atomic number $Z$ (remember, we are scattering on electrons)
For Compton scattering in water, we could write
$$
\Sigma = \rho Z \frac{N_A}{M} \sigma
$$
where $N_A$ is Avogadro constant, $M$ is molar mass (total mass of $N_A$ molecules) and $\rho$ is the density. Lets check the units. Suppose density is in $g/cm^3$, Avogadro Constant is in mol$^{-1}$ and molar mass is in $g/mol$. Therefore, macroscopic cross-section is measured in $cm^{-1}$ and gives the base for linear attenuation coefficient
$$
P(x) = \exp{(-\Sigma x)}
$$
where one can see that value under exponent is dimensionless.
### NIST cross-sections database
National Institute of Standards and Technologies provides a lot of precomputed corss-sections for elements and mixtures, for energies from 1keV up to 10GeV. One can find cross-sections from [XCOM place](https://www.nist.gov/pml/xcom-photon-cross-sections-database). One can pick elements, materials, mixtures and save them into local file. What is worth mentioning is that XCOM provides data as
$$
\Sigma = Z \frac{N_A}{M}\sigma
$$
where density is specifically excluded. It is called mass attenuation coefficient. It is measured in $cm^2/g$. Using such units has certaint advantages, e.g. if you compute photon transport in media where density could change (say, inside nuclear reator where due to heating density of water goes from $\sim$ 1$\;g/cm^3$ to about 0.75$\;g/cm^3$) allows to keep intercation physics separate from density. Multiplying mass attenuation coefficient by density gives you back linear attenuation coefficient.
### Cross-sections for Water
Lets read water cross-sections and plot them
```
lines = None
with open('H2o.data', "r") as f:
lines = f.readlines()
header_len = 3
lines = lines[header_len:41] # remove header, and limit energy to 10MeV
energy = np.empty(len(lines)) # energy scale
coh_xs = np.empty(len(lines)) # coherent cross-section
inc_xs = np.empty(len(lines)) # incoherent cross-section
pht_xs = np.empty(len(lines)) # photo-effect cross-section
npp_xs = np.empty(len(lines)) # nuclear pair production
epp_xs = np.empty(len(lines)) # electron pair production
for k in range(0, len(lines)):
s = lines[k].split('|')
energy[k] = float(s[0])
coh_xs[k] = float(s[1])
inc_xs[k] = float(s[2])
pht_xs[k] = float(s[3])
npp_xs[k] = float(s[4])
epp_xs[k] = float(s[5])
```
Now we will plot together photoeffect, coherent, incoherent and total mass attenuation cross-sections.
```
plt.xscale("log")
plt.yscale("log")
plt.plot(energy, coh_xs, 'g-', linewidth=2)
plt.plot(energy, inc_xs, 'r-', linewidth=2)
plt.plot(energy, pht_xs, 'b-', linewidth=2)
plt.plot(energy, pht_xs+coh_xs+inc_xs, 'o-', linewidth=2) # total cross-section
#plt.plot(energy, npp_xs, 'c-', linewidth=2)
#plt.plot(energy, epp_xs, 'm-', linewidth=2)
plt.show()
```
One can see that for all practical reasons considering only photo-effect and compton (aka incoherent) scatterin is good enough approximation,
## Compton Scattering Sampling
W will use Khan's method to sample Compton scattering.
```
def KhanComptonSampling(hw, rng):
"""Sample scattering energy after Compton interaction"""
α = 2.0*hw/MeC2 # double relative incoming photon energy
t = (α + 1.0)/(α + 9.0)
x = 0.0
while True:
y = 1.0 + α*rng.random()
if rng.random() < t:
if rng.random() < 4.0*(1.0 - 1.0/y)/y:
x = y
break
else:
y = (1.0 + α) / y
c = 2.0*y/α + 1.0
if rng.random() < 0.5*(c*c + 1.0/y):
x = y
break
return hw/x # scattered photon energy back
```
Let's test Compton sampling and compare it with microscopic differential cross-section
```
hw = 1.0 # MeV
hwm = hwp_minimum(hw)
Nt = 1000000
hwp = np.empty(Nt)
rng = np.random.default_rng(312345)
for k in range(0, len(hwp)):
hwp[k] = KhanComptonSampling(hw, rng)
```
Ok, lets check first the minimum energy in sampled values, should be within allowed range.
```
hwm_sampled = np.min(hwp)
print(f"Minimum allowed scattered energy: {hwm} vs actual sampled minimum {hwm_sampled}")
if hwm_sampled < hwm:
print("We have a problem with kinematics!")
count, bins, ignored = plt.hist(hwp, 20, density=True)
plt.show()
# plotting angular distribution
cosθ = cosθ_from_hwp(hw, hwp)
count, bins, ignored = plt.hist(cosθ, 20, density=True)
plt.show()
```
## Monte Carlo photon transport code
```
# several helper functions and constants
X = 0
Y = 1
Z = 2
def isotropic_source(rng):
cosθ = 2.0*rng.random() - 1.0 # uniform cosine of the azimuth angle
sinθ = np.sqrt((1.0 - cosθ)*(1.0 + cosθ))
φ = 2.0*np.pi*rng.random() # uniform polar angle
return np.array((sinθ*np.cos(φ), sinθ*np.sin(φ), cosθ))
def find_energy_index(scale, hw):
return np.searchsorted(scale, hw, side='right') - 1
def calculate_xs(xs, scale, hw, idx):
q = (hw - scale[idx])/(scale[idx+1] - scale[idx])
return xs[idx]*(1.0 - q) + xs[idx+1]*q
def transform_cosines(wx, wy, wz, cosθ, φ):
"""https://www.scratchapixel.com/lessons/mathematics-physics-for-computer-graphics/monte-carlo-methods-in-practice/monte-carlo-simulation"""
# print(wx, wy, wz, cosθ)
sinθ = np.sqrt((1.0 - cosθ)*(1.0 + cosθ))
cosφ = np.cos(φ)
sinφ = np.sin(φ)
if wz == 1.0:
return np.array((sinθ * cosφ, sinθ * sinφ, cosθ))
if wz == -1.0:
return np.array((sinθ * cosφ, -sinθ * sinφ, -cosθ))
denom = np.sqrt((1.0 - wz)*(1.0 + wz)) # denominator
wzcosφ = wz * cosφ
return np.array((wx * cosθ + sinθ * (wx * wzcosφ - wy * sinφ)/denom,
wy * cosθ + sinθ * (wy * wzcosφ + wx * sinφ)/denom,
wz * cosθ - denom * sinθ * cosφ))
def is_inside(pos):
"""Check is photon is inside world box"""
if pos[X] > 20.0:
return False
if pos[X] < -20.0:
return False
if pos[Y] > 20.0:
return False
if pos[Y] < -20.0:
return False
if pos[Z] > 20.0:
return False
if pos[Z] < -20.0:
return False
return True
# main MC loop
rng = np.random.default_rng(312345) # set RNG seed
Nt = 100 # number of trajectories
hw_src = 1.0 # initial energy, MeV
hw_max = energy[-1] # maximum energy in xs tables
pos_src = (0.0, 0.0, 0.0) # initial position
dir_src = (0.0, 0.0, 1.0) # initial direction
density = 1.0 # g/cm^3
for k in range(0, Nt): # loop over all trajectories
print(f"Particle # {k}")
# set energy, position and direction from source terms
hw = hw_src
gpos = np.array(pos_src, dtype=np.float64)
gdir = np.array(dir_src, dtype=np.float64) # could try isotropic source here
if hw < 0.0:
raise ValueError(f"Energy is negative: {hw}")
if hw > hw_max:
raise ValueError(f"Energy is too large: {hw}")
while True: # infinite loop over single trajectory till photon is absorbed or out of the box or out of energy range
idx = find_energy_index(energy, hw)
if idx < 0: # photon fell below 1keV energy threshold, kill it
break
phxs = calculate_xs(pht_xs, energy, hw, idx) # photo-effect cross-section
inxs = calculate_xs(inc_xs, energy, hw, idx) # incoherent, aka Compton cross-section
toxs = (phxs + inxs) # total cross-section
pathlength = - np.log(1.0 - rng.random()) # exponential distribution
pathlength /= (toxs*density) # path length now in cm, because we move from mass attenuation toxs to linear attenuation
#gpos = (gpos[X] + gdir[X]*pathlength, gpos[Y] + gdir[Y]*pathlength, gpos[Z] + gdir[Z]*pathlength) # move to the next interaction point
gpos = gpos + np.multiply(gdir, pathlength)
if not is_inside(gpos): # check if we are in volume of interest
break # we'out, done with trajectory
p_abs = phxs/toxs # probability of absorbtion
if rng.random() < p_abs: # sample absorbtion
break # photoeffect, photon is gone
# compton scattering
hwp = KhanComptonSampling(hw, rng)
cosθ = cosθ_from_hwp(hw, hwp)
φ = 2.0*np.pi*rng.random() # uniform azimuth angle
gdir = transform_cosines(*gdir, cosθ, φ)
gdir = gdir/np.linalg.norm(gdir) # normalization
hw = hwp
# here we have new energy, new position and new direction
```
| github_jupyter |
# Random Signals
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).*
## Auto-Power Spectral Density
The (auto-) [power spectral density](https://en.wikipedia.org/wiki/Spectral_density#Power_spectral_density) (PSD) is defined as the Fourier transformation of the [auto-correlation function](correlation_functions.ipynb) (ACF).
### Definition
For a continuous-amplitude, real-valued, wide-sense stationary (WSS) random signal $x[k]$ the PSD is given as
\begin{equation}
\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \mathcal{F}_* \{ \varphi_{xx}[\kappa] \},
\end{equation}
where $\mathcal{F}_* \{ \cdot \}$ denotes the [discrete-time Fourier transformation](https://en.wikipedia.org/wiki/Discrete-time_Fourier_transform) (DTFT) and $\varphi_{xx}[\kappa]$ the ACF of $x[k]$. Note that the DTFT is performed with respect to $\kappa$. The ACF of a random signal of finite length $N$ can be expressed by way of a linear convolution
\begin{equation}
\varphi_{xx}[\kappa] = \frac{1}{N} \cdot x_N[k] * x_N[-k].
\end{equation}
Taking the DTFT of the left- and right-hand side results in
\begin{equation}
\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, X_N(\mathrm{e}^{-\,\mathrm{j}\,\Omega}) =
\frac{1}{N} \, | X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega}) |^2.
\end{equation}
The last equality results from the definition of the magnitude and the symmetry of the DTFT for real-valued signals. The spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ quantifies the amplitude density of the signal $x_N[k]$. It can be concluded from above result that the PSD quantifies the squared amplitude or power density of a random signal. This explains the term power spectral density.
### Properties
The properties of the PSD can be deduced from the properties of the ACF and the DTFT as:
1. From the link between the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ and the spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ derived above it can be concluded that the PSD is real valued
$$\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \in \mathbb{R}$$
2. From the even symmetry $\varphi_{xx}[\kappa] = \varphi_{xx}[-\kappa]$ of the ACF it follows that
$$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \Phi_{xx}(\mathrm{e}^{\,-\mathrm{j}\, \Omega}) $$
3. The PSD of an uncorrelated random signal is given as
$$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \sigma_x^2 + \mu_x^2 \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) ,$$
which can be deduced from the [ACF of an uncorrelated signal](correlation_functions.ipynb#Properties).
4. The quadratic mean of a random signal is given as
$$ E\{ x[k]^2 \} = \varphi_{xx}[\kappa=0] = \frac{1}{2\pi} \int\limits_{-\pi}^{\pi} \Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) \,\mathrm{d} \Omega $$
The last relation can be found by expressing the ACF via the inverse DTFT of $\Phi_{xx}$ and considering that $\mathrm{e}^{\mathrm{j} \Omega \kappa} = 1$ when evaluating the integral for $\kappa=0$.
### Example - Power Spectral Density of a Speech Signal
In this example the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \,\Omega})$ of a speech signal of length $N$ is estimated by applying a discrete Fourier transformation (DFT) to its ACF. For a better interpretation of the PSD, the frequency axis $f = \frac{\Omega}{2 \pi} \cdot f_s$ has been chosen for illustration, where $f_s$ denotes the sampling frequency of the signal. The speech signal constitutes a recording of the vowel 'o' spoken from a German male, loaded into variable `x`.
In Python the ACF is stored in a vector with indices $0, 1, \dots, 2N - 2$ corresponding to the lags $\kappa = (0, 1, \dots, 2N - 2)^\mathrm{T} - (N-1)$. When computing the discrete Fourier transform (DFT) of the ACF numerically by the fast Fourier transform (FFT) one has to take this shift into account. For instance, by multiplying the DFT $\Phi_{xx}[\mu]$ by $\mathrm{e}^{\mathrm{j} \mu \frac{2 \pi}{2N - 1} (N-1)}$.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
# read audio file
fs, x = wavfile.read('../data/vocal_o_8k.wav')
x = np.asarray(x, dtype=float)
N = len(x)
# compute ACF
acf = 1/N * np.correlate(x, x, mode='full')
# compute PSD
psd = np.fft.fft(acf)
psd = psd * np.exp(1j*np.arange(2*N-1)*2*np.pi*(N-1)/(2*N-1))
f = np.fft.fftfreq(2*N-1, d=1/fs)
# plot PSD
plt.figure(figsize=(10, 4))
plt.plot(f, np.real(psd))
plt.title('Estimated power spectral density')
plt.ylabel(r'$\hat{\Phi}_{xx}(e^{j \Omega})$')
plt.xlabel(r'$f / Hz$')
plt.axis([0, 500, 0, 1.1*max(np.abs(psd))])
plt.grid()
```
**Exercise**
* What does the PSD tell you about the average spectral contents of a speech signal?
Solution: The speech signal exhibits a harmonic structure with the dominant fundamental frequency $f_0 \approx 100$ Hz and a number of harmonics $f_n \approx n \cdot f_0$ for $n > 0$. This due to the fact that vowels generate random signals which are in good approximation periodic. To generate vowels, the sound produced by the periodically vibrating vowel folds is filtered by the resonance volumes and articulators above the voice box. The spectrum of periodic signals is a line spectrum.
## Cross-Power Spectral Density
The cross-power spectral density is defined as the Fourier transformation of the [cross-correlation function](correlation_functions.ipynb#Cross-Correlation-Function) (CCF).
### Definition
For two continuous-amplitude, real-valued, wide-sense stationary (WSS) random signals $x[k]$ and $y[k]$, the cross-power spectral density is given as
\begin{equation}
\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mathcal{F}_* \{ \varphi_{xy}[\kappa] \},
\end{equation}
where $\varphi_{xy}[\kappa]$ denotes the CCF of $x[k]$ and $y[k]$. Note again, that the DTFT is performed with respect to $\kappa$. The CCF of two random signals of finite length $N$ and $M$ can be expressed by way of a linear convolution
\begin{equation}
\varphi_{xy}[\kappa] = \frac{1}{N} \cdot x_N[k] * y_M[-k].
\end{equation}
Note the chosen $\frac{1}{N}$-averaging convention corresponds to the length of signal $x$. If $N \neq M$, care should be taken on the interpretation of this normalization. In case of $N=M$ the $\frac{1}{N}$-averaging yields a [biased estimator](https://en.wikipedia.org/wiki/Bias_of_an_estimator) of the CCF, which consistently should be denoted with $\hat{\varphi}_{xy,\mathrm{biased}}[\kappa]$.
Taking the DTFT of the left- and right-hand side from above cross-correlation results in
\begin{equation}
\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, Y_M(\mathrm{e}^{-\,\mathrm{j}\,\Omega}).
\end{equation}
### Properties
1. The symmetries of $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ can be derived from the symmetries of the CCF and the DTFT as
$$ \underbrace {\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) = \Phi_{xy}^*(\mathrm{e}^{-\,\mathrm{j}\, \Omega})}_{\varphi_{xy}[\kappa] \in \mathbb{R}} =
\underbrace {\Phi_{yx}(\mathrm{e}^{\,- \mathrm{j}\, \Omega}) = \Phi_{yx}^*(\mathrm{e}^{\,\mathrm{j}\, \Omega})}_{\varphi_{yx}[-\kappa] \in \mathbb{R}},$$
from which $|\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})| = |\Phi_{yx}(\mathrm{e}^{\,\mathrm{j}\, \Omega})|$ can be concluded.
2. The cross PSD of two uncorrelated random signals is given as
$$ \Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mu_x^2 \mu_y^2 \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) $$
which can be deduced from the CCF of an uncorrelated signal.
### Example - Cross-Power Spectral Density
The following example estimates and plots the cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ of two random signals $x_N[k]$ and $y_M[k]$ of finite lengths $N = 64$ and $M = 512$.
```
N = 64 # length of x
M = 512 # length of y
# generate two uncorrelated random signals
np.random.seed(1)
x = 2 + np.random.normal(size=N)
y = 3 + np.random.normal(size=M)
N = len(x)
M = len(y)
# compute cross PSD via CCF
acf = 1/N * np.correlate(x, y, mode='full')
psd = np.fft.fft(acf)
psd = psd * np.exp(1j*np.arange(N+M-1)*2*np.pi*(M-1)/(2*M-1))
psd = np.fft.fftshift(psd)
Om = 2*np.pi * np.arange(0, N+M-1) / (N+M-1)
Om = Om - np.pi
# plot results
plt.figure(figsize=(10, 4))
plt.stem(Om, np.abs(psd), basefmt='C0:', use_line_collection=True)
plt.title('Biased estimator of cross power spectral density')
plt.ylabel(r'$|\hat{\Phi}_{xy}(e^{j \Omega})|$')
plt.xlabel(r'$\Omega$')
plt.grid()
```
**Exercise**
* What does the cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega})$ tell you about the statistical properties of the two random signals?
Solution: The cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega})$ is essential only non-zero for $\Omega=0$. It hence can be concluded that the two random signals are not mean-free and uncorrelated to each other.
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples*.
| github_jupyter |
# Table of Contents
<p>
```
#!python
"""
Find the brightest pixel coordinate of a image.
@author: Bhishan Poudel
@date: Oct 27, 2017
@email: [email protected]
"""
# Imports
import time
import numpy as np
from astropy.io import fits
import subprocess
from scipy.ndimage import measurements
def brightest_coord():
with open('centroids_f8.txt','w') as fo:
for i in range(201):
pre = '/Users/poudel/Research/a01_data/original_data/HST_ACS_WFC_f814w/'
infile = '{}/sect23_f814w_gal{}.fits'.format(pre,i)
dat = fits.getdata(infile)
x,y = np.unravel_index(np.argmax(dat), dat.shape)
x,y = int(y+1) , int(x+1)
print("{} {}".format(x, y), file=fo)
def find_centroid():
with open('centroids_f8_scipy.txt','w') as fo:
for i in range(201):
pre = '/Users/poudel/Research/a01_data/original_data/HST_ACS_WFC_f814w/'
infile = '{}/sect23_f814w_gal{}.fits'.format(pre,i)
dat = fits.getdata(infile)
x,y = measurements.center_of_mass(dat)
x,y = int(y+1) , int(x+1)
print("{} {}".format(x, y), file=fo)
def main():
"""Run main function."""
# bright_coord()
# find_centroid()
# # checking
# i = 0
# pre = '/Users/poudel/Research/a01_data/original_data/HST_ACS_WFC_f814w/'
# infile = '{}/sect23_f814w_gal{}.fits'.format(pre,i)
# ds9 = '/Applications/ds9.app/Contents/MacOS/ds9'
# subprocess.call('{} {}'.format(ds9, infile), shell=True)
# when zooming we can see brightest pixel is at 296, 307 image coord.
if __name__ == "__main__":
import time, os
# Beginning time
program_begin_time = time.time()
begin_ctime = time.ctime()
# Run the main program
main()
# Print the time taken
program_end_time = time.time()
end_ctime = time.ctime()
seconds = program_end_time - program_begin_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
print("\n\nBegin time: ", begin_ctime)
print("End time: ", end_ctime, "\n")
print("Time taken: {0: .0f} days, {1: .0f} hours, \
{2: .0f} minutes, {3: f} seconds.".format(d, h, m, s))
print("\n")
!head -n 5 centroids_f8.txt
!head -n 5 centroids_f8_scipy.txt
def find_max_coord(dat):
print("dat = \n{}".format(dat))
maxpos = np.unravel_index(np.argmax(dat), dat.shape)
print("maxpos = {}".format(maxpos))
with open('example_data.txt','w') as fo:
data = """0.1 0.5
0.0 0.0
4.0 3.0
0.0 0.0
1.0 1.0
"""
fo.write(data)
dat = np.genfromtxt('example_data.txt')
find_max_coord(dat)
x,y = measurements.center_of_mass(dat)
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(dat) # default is RGB
plt.imshow(dat,cmap='gray', vmin=int(dat.min()), vmax=int(dat.max()))
# we can see brightest pixel is x=0 and y = 2
# or, if we count from 1, x = 1 and y =3
measurements.center_of_mass(dat)
x,y = measurements.center_of_mass(dat)
x,y = int(x), int(y)
x,y
dat
dat[2][0]
# Numpy index is dat[2][0]
# but image shows x=0 and y =2.
x,y = measurements.center_of_mass(dat)
x,y = int(y), int(x)
x,y
dat[2][0]
# Looking at mean
dat.mean(axis=0)
np.argmax(dat)
np.unravel_index(4,dat.shape)
```
| github_jupyter |
# Implementation of VGG16
> In this notebook I have implemented VGG16 on CIFAR10 dataset using Pytorch
```
#importing libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import torch.optim as optim
import tqdm
import matplotlib.pyplot as plt
from torchvision.datasets import CIFAR10
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
```
Load the data and do standard preprocessing steps,such as resizing and converting the images into tensor
```
transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],
std=[0.229,0.224,0.225])])
train_ds = CIFAR10(root='data/',train = True,download=True,transform = transform)
val_ds = CIFAR10(root='data/',train = False,download=True,transform = transform)
batch_size = 128
train_loader = DataLoader(train_ds,batch_size,shuffle=True,num_workers=4,pin_memory=True)
val_loader = DataLoader(val_ds,batch_size,num_workers=4,pin_memory=True)
```
A custom utility class to print out the accuracy and losses during training and testing
```
def accuracy(outputs,labels):
_,preds = torch.max(outputs,dim=1)
return torch.tensor(torch.sum(preds==labels).item()/len(preds))
class ImageClassificationBase(nn.Module):
def training_step(self,batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out,labels)
return loss
def validation_step(self,batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out,labels)
acc = accuracy(out,labels)
return {'val_loss': loss.detach(),'val_acc': acc}
def validation_epoch_end(self,outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['train_loss'], result['val_loss'], result['val_acc']))
```
### Creating a network
```
VGG_types = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG_net(ImageClassificationBase):
def __init__(self, in_channels=3, num_classes=1000):
super(VGG_net, self).__init__()
self.in_channels = in_channels
self.conv_layers = self.create_conv_layers(VGG_types['VGG16'])
self.fcs = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes)
)
def forward(self, x):
x = self.conv_layers(x)
x = x.reshape(x.shape[0], -1)
x = self.fcs(x)
return x
def create_conv_layers(self, architecture):
layers = []
in_channels = self.in_channels
for x in architecture:
if type(x) == int:
out_channels = x
layers += [nn.Conv2d(in_channels=in_channels,out_channels=out_channels,
kernel_size=(3,3), stride=(1,1), padding=(1,1)),
nn.BatchNorm2d(x),
nn.ReLU()]
in_channels = x
elif x == 'M':
layers += [nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))]
return nn.Sequential(*layers)
```
A custom function to pick a default device
```
def get_default_device():
"""Pick GPU if available else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
device
def to_device(data,device):
"""Move tensors to chosen device"""
if isinstance(data,(list,tuple)):
return [to_device(x,device) for x in data]
return data.to(device,non_blocking=True)
for images, labels in train_loader:
print(images.shape)
images = to_device(images,device)
print(images.device)
break
class DeviceDataLoader():
"""Wrap a DataLoader to move data to a device"""
def __init__(self,dl,device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data to a dataloader"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
train_loader = DeviceDataLoader(train_loader,device)
val_loader = DeviceDataLoader(val_loader,device)
model = VGG_net(in_channels=3,num_classes=10)
to_device(model,device)
```
### Training the model
```
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
train_losses =[]
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
model.train()
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
history = [evaluate(model, val_loader)]
history
#history = fit(2,0.1,model,train_loader,val_loader)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df_rides = pd.read_csv('cab_rides.csv')
df_weather = pd.read_csv('weather.csv')
df_rides['date'] = pd.to_datetime(df_rides['time_stamp']/ 1000, unit = 's')
df_weather['date'] = pd.to_datetime(df_weather['time_stamp'], unit = 's')
df_rides.head()
df_rides['merged_date'] = df_rides['source'].astype('str') + ' - ' + df_rides['date'].dt.strftime('%Y-%m-%d').astype('str') + ' - ' + df_rides['date'].dt.hour.astype('str')
df_weather['merged_date'] = df_weather['location'].astype('str') + ' - ' + df_weather['date'].dt.strftime('%Y-%m-%d').astype('str') + ' - ' + df_weather['date'].dt.hour.astype('str')
df_weather.index = df_weather['merged_date']
df_joined = df_rides.join(df_weather, on = ['merged_date'], rsuffix ='_w')
df_joined.head()
df_joined.info()
id_group = pd.DataFrame(df_joined.groupby('id')['temp','clouds', 'pressure', 'rain', 'humidity', 'wind'].mean())
df_rides_weather = df_rides.join(id_group, on = ['id'])
df_rides_weather.tail()
# Creating the columns for Month, Hour and Weekdays
df_rides_weather['Month'] = df_rides_weather['date'].dt.month
df_rides_weather['Hour'] = df_rides_weather['date'].dt.hour
df_rides_weather['Day'] = df_rides_weather['date'].dt.strftime('%A')
uber_day_count = df_rides_weather[df_rides_weather['cab_type'] == 'Uber']['Day'].value_counts()
uber_day_count = uber_day_count.reindex(index = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday','Saturday','Sunday'])
lyft_day_count = df_rides_weather[df_rides_weather['cab_type'] == 'Lyft']['Day'].value_counts()
lyft_day_count = lyft_day_count.reindex(index = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday','Saturday','Sunday'])
fig , ax = plt.subplots(figsize = (12,12))
ax.plot(lyft_day_count.index, lyft_day_count, label = 'Lyft')
ax.plot(uber_day_count.index, uber_day_count, label = 'Uber')
ax.set(ylabel = 'Number of Rides', xlabel = 'Weekdays')
ax.legend()
plt.show()
# The ride distribution in one day
fig , ax = plt.subplots(figsize= (12,12))
ax.plot(df_rides_weather[df_rides_weather['cab_type'] == 'Lyft'].groupby('Hour').Hour.count().index, df_rides_weather[df_rides_weather['cab_type'] == 'Lyft'].groupby('Hour').Hour.count(), label = 'Lyft')
ax.plot(df_rides_weather[df_rides_weather['cab_type'] == 'Uber'].groupby('Hour').Hour.count().index, df_rides_weather[df_rides_weather['cab_type'] =='Uber'].groupby('Hour').Hour.count(), label = 'Uber')
ax.legend()
ax.set(xlabel = 'Hours', ylabel = 'Number of Rides')
plt.xticks(range(0,24,1))
plt.show()
order = ['Financial District', 'Theatre District', 'Back Bay', 'Haymarket Square', 'Boston University', 'Fenway', 'North End', 'Northeastern University', 'South Station', 'West End', 'Beacon Hill', 'North Station']
print('green - Lyft\norange - Uber')
f = plt.figure(figsize = (40, 25))
ax = f.add_subplot(2,3,1)
plt.xticks(rotation=45)
sns.barplot(x='source', y='price', data=df_rides_weather[df_rides_weather['cab_type']=='Lyft'], ax=ax, order = order, color = 'green')
sns.barplot(x='source', y='price', data=df_rides_weather[df_rides_weather['cab_type']=='Uber'], ax=ax, order = order, color = 'orange')
ax = f.add_subplot(2,3,2)
plt.xticks(rotation=45)
sns.barplot(x='destination', y='price', data=df_rides_weather[df_rides_weather['cab_type']=='Lyft'], ax=ax, order = order, color = 'green')
sns.barplot(x='destination', y='price', data=df_rides_weather[df_rides_weather['cab_type']=='Uber'], ax=ax, order = order, color = 'orange')
plt.show()
fig , ax = plt.subplots(figsize = (12,12))
ax.plot(df_rides_weather[df_rides_weather['cab_type'] == 'Lyft'].groupby('distance').price.mean().index, df_rides_weather[df_rides_weather['cab_type'] == 'Lyft'].groupby('distance')['price'].mean(), label = 'Lyft')
ax.plot(df_rides_weather[df_rides_weather['cab_type'] == 'Uber'].groupby('distance').price.mean().index, df_rides_weather[df_rides_weather['cab_type'] == 'Uber'].groupby('distance')['price'].mean(), label = 'Uber')
ax.set_title('The Average Price by distance', fontsize= 15)
ax.set(xlabel = 'Distance', ylabel = 'Price' )
ax.legend()
plt.show()
fig, ax = plt.subplots(2, 1, figsize = (20,5))
for i,col in enumerate(df_rides_weather[df_rides_weather['cab_type'] == 'Uber']['name'].unique()):
ax[0].plot(df_rides_weather[ df_rides_weather['name'] == col].groupby('distance').price.mean().index, df_rides_weather[ df_rides_weather['name'] == col].groupby('distance').price.mean(), label = col)
ax[0].set_title('Uber Average Prices by Distance')
ax[0].set(xlabel = 'Distance in Mile', ylabel = 'Average price in USD')
ax[0].legend()
for i,col in enumerate(df_rides_weather[df_rides_weather['cab_type'] == 'Lyft']['name'].unique()):
ax[1].plot(df_rides_weather[ df_rides_weather['name'] == col].groupby('distance').price.mean().index, df_rides_weather[ df_rides_weather['name'] == col].groupby('distance').price.mean(), label = col)
ax[1].set(xlabel = 'Distance in Mile', ylabel = 'Average price in USD')
ax[1].set_title('Lyft Average Prices by Distance')
ax[1].legend()
plt.show()
x = df_rides_weather['surge_multiplier'].value_counts()
x.plot.bar(x = 'multiplier', y = 'Number of times')
x = df_rides_weather[df_rides_weather['cab_type'] == 'Uber']['surge_multiplier'].value_counts()
x.plot.bar(x = 'multipler uber', y = 'Number of rides')
x = df_rides_weather[df_rides_weather['cab_type'] == 'Lyft']['surge_multiplier'].value_counts()
x.plot.bar(x = 'multipler lyft', y = 'Number of rides')
df_rides_weather['price/distance'] = (df_rides_weather['price'] / df_rides_weather['distance'])
high_rates = df_rides_weather[df_rides_weather['price/distance'] > 80]
high_rates['cab_type'].value_counts()
high_rates[high_rates['cab_type'] == 'Uber']['distance'].value_counts()
order = ['Financial District', 'Theatre District', 'Back Bay', 'Haymarket Square', 'Boston University', 'Fenway', 'North End', 'Northeastern University', 'South Station', 'West End', 'Beacon Hill', 'North Station']
print('source')
fig, ax = plt.subplots(1, 2, figsize = (20,10))
df_uber = df_rides_weather[df_rides_weather['cab_type'] == 'Uber']
for i, col in enumerate(order):
x = df_uber[df_uber['source'] == col].groupby('distance').price.mean().index
y = df_uber[df_uber['source'] == col].groupby('distance').price.mean()
ax[0].plot(x, y, label = col)
ax[0].set_title('Uber Average Prices by Distance')
ax[0].set(xlabel = 'Distance in Mile', ylabel = 'Average price in USD')
ax[0].legend()
df_lyft = df_rides_weather[df_rides_weather['cab_type'] == 'Lyft']
for i, col in enumerate(order):
x = df_lyft[df_lyft['source'] == col].groupby('distance').price.mean().index
y = df_lyft[df_lyft['source'] == col].groupby('distance').price.mean()
ax[1].plot(x, y, label = col)
ax[1].set(xlabel = 'Distance in Mile', ylabel = 'Average price in USD')
ax[1].set_title('Lyft Average Prices by Distance')
ax[1].legend()
plt.show()
order = ['Financial District', 'Theatre District', 'Back Bay', 'Haymarket Square', 'Boston University', 'Fenway', 'North End', 'Northeastern University', 'South Station', 'West End', 'Beacon Hill', 'North Station']
print('destination')
fig, ax = plt.subplots(1, 2, figsize = (20,10))
df_uber = df_rides_weather[df_rides_weather['cab_type'] == 'Uber']
for i, col in enumerate(order):
x = df_uber[df_uber['destination'] == col].groupby('distance').price.mean().index
y = df_uber[df_uber['destination'] == col].groupby('distance').price.mean()
ax[0].plot(x, y, label = col)
ax[0].set_title('Uber Average Prices by Distance')
ax[0].set(xlabel = 'Distance in Mile', ylabel = 'Average price in USD')
ax[0].legend()
df_lyft = df_rides_weather[df_rides_weather['cab_type'] == 'Lyft']
for i, col in enumerate(order):
x = df_lyft[df_lyft['destination'] == col].groupby('distance').price.mean().index
y = df_lyft[df_lyft['destination'] == col].groupby('distance').price.mean()
ax[1].plot(x, y, label = col)
ax[1].set(xlabel = 'Distance in Mile', ylabel = 'Average price in USD')
ax[1].set_title('Lyft Average Prices by Distance')
ax[1].legend()
plt.show()
```
| github_jupyter |
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
# Model Zoo -- CNN Gender Classifier (ResNet-50 Architecture, CelebA) with Data Parallelism
### Network Architecture
The network in this notebook is an implementation of the ResNet-50 [1] architecture on the CelebA face dataset [2] to train a gender classifier.
References
- [1] He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770-778). ([CVPR Link](https://www.cv-foundation.org/openaccess/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html))
- [2] Zhang, K., Tan, L., Li, Z., & Qiao, Y. (2016). Gender and smile classification using deep convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (pp. 34-38).
**Note that the CelebA images are 218 x 178, not 256 x 256. We resize to 128x128**
The following code implements the residual blocks with skip connections such that the input passed via the shortcut matches the dimensions of the main path's output, which allows the network to learn identity functions. Such a residual block is illustrated below:

The following code implements the residual blocks with skip connections such that the input passed via the shortcut matches is resized to dimensions of the main path's output. Such a residual block is illustrated below:

For a more detailed explanation see the other notebook, [resnet-ex-1.ipynb](resnet-ex-1.ipynb).
The image below illustrates the ResNet-34 architecture (from the He et al. paper):

While ResNet-34 has 34 layers as shown in the figure above, the 50-layer ResNet variant implemented in this notebook uses "bottleneck" approach instead of the basic residual blocks. Figure 5 from the He et al. paper illustrates the difference between a basic residual block (as used in ResNet-34) and the bottleneck block used in ResNet-50:

## Imports
```
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import matplotlib.pyplot as plt
from PIL import Image
```
## Dataset
### Downloading the Dataset
Note that the ~200,000 CelebA face image dataset is relatively large (~1.3 Gb). The download link provided below was provided by the author on the official CelebA website at http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html.
1) Download and unzip the file `img_align_celeba.zip`, which contains the images in jpeg format.
2) Download the `list_attr_celeba.txt` file, which contains the class labels
3) Download the `list_eval_partition.txt` file, which contains training/validation/test partitioning info
### Preparing the Dataset
```
df1 = pd.read_csv('list_attr_celeba.txt', sep="\s+", skiprows=1, usecols=['Male'])
# Make 0 (female) & 1 (male) labels instead of -1 & 1
df1.loc[df1['Male'] == -1, 'Male'] = 0
df1.head()
df2 = pd.read_csv('list_eval_partition.txt', sep="\s+", skiprows=0, header=None)
df2.columns = ['Filename', 'Partition']
df2 = df2.set_index('Filename')
df2.head()
df3 = df1.merge(df2, left_index=True, right_index=True)
df3.head()
df3.to_csv('celeba-gender-partitions.csv')
df4 = pd.read_csv('celeba-gender-partitions.csv', index_col=0)
df4.head()
df4.loc[df4['Partition'] == 0].to_csv('celeba-gender-train.csv')
df4.loc[df4['Partition'] == 1].to_csv('celeba-gender-valid.csv')
df4.loc[df4['Partition'] == 2].to_csv('celeba-gender-test.csv')
img = Image.open('img_align_celeba/000001.jpg')
print(np.asarray(img, dtype=np.uint8).shape)
plt.imshow(img);
```
### Implementing a Custom DataLoader Class
```
class CelebaDataset(Dataset):
"""Custom Dataset for loading CelebA face images"""
def __init__(self, csv_path, img_dir, transform=None):
df = pd.read_csv(csv_path, index_col=0)
self.img_dir = img_dir
self.csv_path = csv_path
self.img_names = df.index.values
self.y = df['Male'].values
self.transform = transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.img_names[index]))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
return img, label
def __len__(self):
return self.y.shape[0]
# Note that transforms.ToTensor()
# already divides pixels by 255. internally
custom_transform = transforms.Compose([transforms.CenterCrop((178, 178)),
transforms.Resize((128, 128)),
#transforms.Grayscale(),
#transforms.Lambda(lambda x: x/255.),
transforms.ToTensor()])
train_dataset = CelebaDataset(csv_path='celeba-gender-train.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
valid_dataset = CelebaDataset(csv_path='celeba-gender-valid.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
test_dataset = CelebaDataset(csv_path='celeba-gender-test.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
BATCH_SIZE=256*torch.cuda.device_count()
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=4)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
device = torch.device("cuda:0")
torch.manual_seed(0)
for epoch in range(2):
for batch_idx, (x, y) in enumerate(train_loader):
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
x = x.to(device)
y = y.to(device)
break
```
## Model
```
##########################
### SETTINGS
##########################
# Hyperparameters
random_seed = 1
learning_rate = 0.001
num_epochs = 5
# Architecture
num_features = 128*128
num_classes = 2
```
The following code cell that implements the ResNet-34 architecture is a derivative of the code provided at https://pytorch.org/docs/0.4.0/_modules/torchvision/models/resnet.html.
```
##########################
### MODEL
##########################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1, padding=2)
self.fc = nn.Linear(2048 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
probas = F.softmax(logits, dim=1)
return logits, probas
def resnet50(num_classes):
"""Constructs a ResNet-34 model."""
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)
return model
torch.manual_seed(random_seed)
##########################
### COST AND OPTIMIZER
##########################
#### DATA PARALLEL START ####
model = resnet50(num_classes)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model = nn.DataParallel(model)
#### DATA PARALLEL END ####
model.to(device)
#### DATA PARALLEL START ####
cost_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
## Training
```
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = cost_fn(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_dataset)//BATCH_SIZE, cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader),
compute_accuracy(model, valid_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
```
## Evaluation
```
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
for batch_idx, (features, targets) in enumerate(test_loader):
features = features
targets = targets
break
plt.imshow(np.transpose(features[0], (1, 2, 0)))
model.eval()
logits, probas = model(features.to(device)[0, None])
print('Probability Female %.2f%%' % (probas[0][0]*100))
```
| github_jupyter |

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fshorts&branch=master&subPath=master.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# Master Index of Notebooks
The Callysto project is focused on creating Jupyter notebooks for the K-12 curriculum in Canadian schools, and to teach students and teachers how to create their own notebooks.
This notebook points to a collection of short demos, explaining how to include basic elements in your own Jupyter notebooks. FOr instance, how to include images, music, videos, graphs, even mathematical and geometric tools useful in science and technology classes.
More complete notebooks with these features are available on teh Callysto website (https://callysto.ca). Here, we show the simplest versions to get you started.
## Note:
This repo is specifically designed to run under "mybinder" as well as Jupyter Books. This limits somewhat the tools we can demonstrate.
## Demos that I want to do
- [Including images](Images.ipynb)
- [Including GIFs](GIFs.ipynb)
- [Including YouTube videos](YouTube.ipynb)
- [Drawing figures in HTML, SVG](HTML_Drawing.ipynb)
- [Plotting in matplotlib](Matplotlib.ipynb)
- [3D Plotting in matplotlib](Matplot3D.ipynb)
- [Animation in matplotlib](MatplotAnimation.ipynb)
- [Plotting in Plotly](Plotly.ipynb)
- [3D Plotting in Plotly](Plotly3D.ipynb)
- [3D graphics](3D_graphics.ipynb)
- [Including WebGL animation](WebGL.ipynb)
- [Animation in D3](D3.ipynb)
- [Plotting in Pylab](Pylab.ipynb)
- [Music and Sounds](Sounds.ipynb)
- [Synthetic sounds](SynthSound.ipynb)
- [Including Callysto banners](Banners.ipynb)
- [Using widgets](Widgets.ipynb)
- [Creating a progress bar](ProgressBar.ipynb)
- [Including Geogebra apps](Geogebra.ipynb)
- [Creating a slideshow](Slideshow.ipynb)
- [A fancy slideshow](Slideshow2Callysto.ipynb)
- [Hiding code](Hiding.ipynb)
- [Saving your work on GitHub](Github.ipynb)
- [Using namespaces](Namespace.ipynb)
- [Importing Data](ImportingData.ipynb)
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| github_jupyter |
```
import pandas as pd
import numpy as np
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import ast
from gensim.corpora import Dictionary
from gensim.models.coherencemodel import CoherenceModel
import gensim
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import ward, dendrogram
from sklearn.decomposition import TruncatedSVD
from collections import Counter
from sklearn.manifold import TSNE
import matplotlib.cm as cm
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
from scipy.cluster.hierarchy import ward, dendrogram, fcluster, single, complete
from sklearn.metrics import silhouette_score
BIGRAMS = True #a parameter that specifies if unigrams (false) or bigrams (true) are used
def dummy_fun(doc):
return doc
cv = CountVectorizer(analyzer='word',
tokenizer=dummy_fun,
preprocessor=dummy_fun,
token_pattern=None)
def make_bigrams(bigram_mod, texts):
return [bigram_mod[doc] for doc in texts]
def read_and_vectorize(path, cv, bigrams):
df = pd.read_csv(path)
df['tokens'] = df['tokens'].apply(ast.literal_eval) #transforming string of tokens to list
if bigrams == True: #specify if bigrams or unigrams are used for future clustering
bigram = gensim.models.Phrases(df['tokens'], min_count=3, threshold=50) # higher threshold fewer phrases.
bigram_mod = gensim.models.phrases.Phraser(bigram)
df['bigrams'] = make_bigrams(bigram_mod, df['tokens'])
print('Bigrams are created.')
data = cv.fit_transform(df['bigrams'])
else:
data = cv.fit_transform(df['tokens'])
terms = cv.get_feature_names()
print(f'Len of terms: {len(terms)}')
tfidf_transformer = TfidfTransformer()
tfidf_matrix = tfidf_transformer.fit_transform(data)
print(f'Tfidf matrix is generated of shape {tfidf_matrix.shape}')
return df, tfidf_matrix, terms
df_feb, tfidf_matrix_feb, terms_feb = read_and_vectorize('preprocessed_results/mediacloud_parsed_corona_df_feb_sample.csv', cv, BIGRAMS)
df_may, tfidf_matrix_may, terms_may = read_and_vectorize('preprocessed_results/mediacloud_parsed_corona_df_may_sample.csv', cv, BIGRAMS)
df_sep, tfidf_matrix_sep, terms_sep = read_and_vectorize('preprocessed_results/mediacloud_parsed_corona_df_sep_sample.csv', cv, BIGRAMS)
def read_best_kmeans_model(path):
models_df = pd.read_csv(path)
best_model = models_df.iloc[models_df['Coherence'].idxmax()]
return best_model, models_df
best_model_feb, models_df_feb = read_best_kmeans_model('preprocessed_results/models_df_feb.csv')
best_model_may, models_df_may = read_best_kmeans_model('preprocessed_results/models_df_may.csv')
best_model_sep, models_df_sep = read_best_kmeans_model('preprocessed_results/models_df_sep.csv')
def transform(tfidf_matrix):
transformed_tokens = np.empty((tfidf_matrix.shape[0], 0)).tolist()
for i in range(tfidf_matrix.shape[0]):
transformed_tokens[i] = tfidf_matrix[i].toarray()[0]
print(f'Matrix is tranformed into array of len {len(transformed_tokens)}')
return np.array(transformed_tokens)
def plot_linkage(linkage_matrix, clusters):
fig, ax = plt.subplots(figsize=(15, 20)) # set size
ax = dendrogram(linkage_matrix, orientation="right", labels=clusters)
plt.tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
plt.tight_layout()
def hierarchical_clustering(best_model, tfidf_matrix, cluster):
random_state = 20
transformed_tokens = transform(tfidf_matrix)
model = KMeans(n_clusters=best_model['Num_Topics'], init='k-means++', max_iter=100, n_init=1, random_state = random_state)
clusters = model.fit_predict(transformed_tokens)
selected_features = [transformed_tokens[i] for i in range(len(transformed_tokens)) if clusters[i]==cluster]
svd = TruncatedSVD(n_components=100, random_state=random_state)
features = svd.fit_transform(selected_features)
print(features.shape)
linkage_matrix = ward(features)
plot_linkage(linkage_matrix, clusters)
return features, transformed_tokens, linkage_matrix, clusters
features_hierarchical_feb, transformed_tokens_feb, linkage_matrix_feb, clusters_feb = hierarchical_clustering(best_model_feb, tfidf_matrix_feb, 6)
def agglomerative_clustering(n_clusters, features, df, cluster, best_model, transformed_tokens, clusters):
random_state=20
model_hierarchical = AgglomerativeClustering(n_clusters=n_clusters, affinity='euclidean', linkage='ward')
model_hierarchical.fit_predict(features)
# model = KMeans(n_clusters=best_model['Num_Topics'], init='k-means++', max_iter=100, n_init=1, random_state = random_state)
# clusters = model.fit_predict(transformed_tokens)
df = df[clusters==cluster]
for label in range(model_hierarchical.n_clusters_):
print(label)
display(df[model_hierarchical.labels_==label]['title'])
agglomerative_clustering(23, features_hierarchical_feb, df_feb, 6, best_model_feb, transformed_tokens_feb, clusters_feb)
def silhouette_k(distance_matrix, linkage_matrix, max_k=20):
scores = []
for i in range(2, max_k+1):
clusters = fcluster(linkage_matrix, i, criterion='maxclust')
score = silhouette_score(distance_matrix, clusters, metric='precomputed')
print("Silhouette score with {} clusters:".format(i), score)
scores.append(score)
plt.title("Silhouette score vs. number of clusters")
plt.xlabel("# of clusters")
plt.ylabel("Score (higher is better)")
plt.plot(np.arange(2, max_k+1), scores)
plt.show()
return scores
def elbow_method(tfidf_matrix, linkage_matrix):
dist = 1 - cosine_similarity(tfidf_matrix)
dist = dist - dist.min()
silhouette_k(dist, linkage_matrix, max_k=30)
elbow_method(tfidf_matrix_feb[clusters_feb==6], linkage_matrix_feb)
```
## May
```
features_hierarchical_may, transformed_tokens_may, linkage_matrix_may, clusters_may = hierarchical_clustering(best_model_may, tfidf_matrix_may, 2)
agglomerative_clustering(6, features_hierarchical_may, df_may, 2, best_model_may, transformed_tokens_may, clusters_may)
elbow_method(tfidf_matrix_may[clusters_may==2], linkage_matrix_may)
```
## September
```
features_hierarchical_sep, transformed_tokens_sep, linkage_matrix_sep, clusters_sep = hierarchical_clustering(best_model_sep, tfidf_matrix_sep, 10)
agglomerative_clustering(2, features_hierarchical_sep, df_sep, 10, best_model_sep, transformed_tokens_sep, clusters_sep)
```
| github_jupyter |
# REINFORCE in PyTorch
Just like we did before for Q-learning, this time we'll design a PyTorch network to learn `CartPole-v0` via policy gradient (REINFORCE).
Most of the code in this notebook is taken from approximate Q-learning, so you'll find it more or less familiar and even simpler.
```
import sys, os
if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash
!touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import gym
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
A caveat: with some versions of `pyglet`, the following cell may crash with `NameError: name 'base' is not defined`. The corresponding bug report is [here](https://github.com/pyglet/pyglet/issues/134). If you see this error, try restarting the kernel.
```
env = gym.make("CartPole-v0")
# gym compatibility: unwrap TimeLimit
if hasattr(env, '_max_episode_steps'):
env = env.env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
plt.imshow(env.render("rgb_array"))
```
# Building the network for REINFORCE
For REINFORCE algorithm, we'll need a model that predicts action probabilities given states.
For numerical stability, please __do not include the softmax layer into your network architecture__.
We'll use softmax or log-softmax where appropriate.
```
import torch
import torch.nn as nn
# Build a simple neural network that predicts policy logits.
# Keep it simple: CartPole isn't worth deep architectures.
model = nn.Sequential(
<YOUR CODE: define a neural network that predicts policy logits>
)
```
#### Predict function
Note: output value of this function is not a torch tensor, it's a numpy array.
So, here gradient calculation is not needed.
<br>
Use [no_grad](https://pytorch.org/docs/stable/autograd.html#torch.autograd.no_grad)
to suppress gradient calculation.
<br>
Also, `.detach()` (or legacy `.data` property) can be used instead, but there is a difference:
<br>
With `.detach()` computational graph is built but then disconnected from a particular tensor,
so `.detach()` should be used if that graph is needed for backprop via some other (not detached) tensor;
<br>
In contrast, no graph is built by any operation in `no_grad()` context, thus it's preferable here.
```
def predict_probs(states):
"""
Predict action probabilities given states.
:param states: numpy array of shape [batch, state_shape]
:returns: numpy array of shape [batch, n_actions]
"""
# convert states, compute logits, use softmax to get probability
<YOUR CODE>
return <YOUR CODE>
test_states = np.array([env.reset() for _ in range(5)])
test_probas = predict_probs(test_states)
assert isinstance(test_probas, np.ndarray), \
"you must return np array and not %s" % type(test_probas)
assert tuple(test_probas.shape) == (test_states.shape[0], env.action_space.n), \
"wrong output shape: %s" % np.shape(test_probas)
assert np.allclose(np.sum(test_probas, axis=1), 1), "probabilities do not sum to 1"
```
### Play the game
We can now use our newly built agent to play the game.
```
def generate_session(env, t_max=1000):
"""
Play a full session with REINFORCE agent.
Returns sequences of states, actions, and rewards.
"""
# arrays to record session
states, actions, rewards = [], [], []
s = env.reset()
for t in range(t_max):
# action probabilities array aka pi(a|s)
action_probs = predict_probs(np.array([s]))[0]
# Sample action with given probabilities.
a = <YOUR CODE>
new_s, r, done, info = env.step(a)
# record session history to train later
states.append(s)
actions.append(a)
rewards.append(r)
s = new_s
if done:
break
return states, actions, rewards
# test it
states, actions, rewards = generate_session(env)
```
### Computing cumulative rewards
$$
\begin{align*}
G_t &= r_t + \gamma r_{t + 1} + \gamma^2 r_{t + 2} + \ldots \\
&= \sum_{i = t}^T \gamma^{i - t} r_i \\
&= r_t + \gamma * G_{t + 1}
\end{align*}
$$
```
def get_cumulative_rewards(rewards, # rewards at each step
gamma=0.99 # discount for reward
):
"""
Take a list of immediate rewards r(s,a) for the whole session
and compute cumulative returns (a.k.a. G(s,a) in Sutton '16).
G_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...
A simple way to compute cumulative rewards is to iterate from the last
to the first timestep and compute G_t = r_t + gamma*G_{t+1} recurrently
You must return an array/list of cumulative rewards with as many elements as in the initial rewards.
"""
<YOUR CODE>
return <YOUR CODE: array of cumulative rewards>
get_cumulative_rewards(rewards)
assert len(get_cumulative_rewards(list(range(100)))) == 100
assert np.allclose(
get_cumulative_rewards([0, 0, 1, 0, 0, 1, 0], gamma=0.9),
[1.40049, 1.5561, 1.729, 0.81, 0.9, 1.0, 0.0])
assert np.allclose(
get_cumulative_rewards([0, 0, 1, -2, 3, -4, 0], gamma=0.5),
[0.0625, 0.125, 0.25, -1.5, 1.0, -4.0, 0.0])
assert np.allclose(
get_cumulative_rewards([0, 0, 1, 2, 3, 4, 0], gamma=0),
[0, 0, 1, 2, 3, 4, 0])
print("looks good!")
```
#### Loss function and updates
We now need to define objective and update over policy gradient.
Our objective function is
$$ J \approx { 1 \over N } \sum_{s_i,a_i} G(s_i,a_i) $$
REINFORCE defines a way to compute the gradient of the expected reward with respect to policy parameters. The formula is as follows:
$$ \nabla_\theta \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \nabla_\theta \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$
We can abuse PyTorch's capabilities for automatic differentiation by defining our objective function as follows:
$$ \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$
When you compute the gradient of that function with respect to network weights $\theta$, it will become exactly the policy gradient.
```
def to_one_hot(y_tensor, ndims):
""" helper: take an integer vector and convert it to 1-hot matrix. """
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
y_one_hot = torch.zeros(
y_tensor.size()[0], ndims).scatter_(1, y_tensor, 1)
return y_one_hot
# Your code: define optimizers
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
def train_on_session(states, actions, rewards, gamma=0.99, entropy_coef=1e-2):
"""
Takes a sequence of states, actions and rewards produced by generate_session.
Updates agent's weights by following the policy gradient above.
Please use Adam optimizer with default parameters.
"""
# cast everything into torch tensors
states = torch.tensor(states, dtype=torch.float32)
actions = torch.tensor(actions, dtype=torch.int32)
cumulative_returns = np.array(get_cumulative_rewards(rewards, gamma))
cumulative_returns = torch.tensor(cumulative_returns, dtype=torch.float32)
# predict logits, probas and log-probas using an agent.
logits = model(states)
probs = nn.functional.softmax(logits, -1)
log_probs = nn.functional.log_softmax(logits, -1)
assert all(isinstance(v, torch.Tensor) for v in [logits, probs, log_probs]), \
"please use compute using torch tensors and don't use predict_probs function"
# select log-probabilities for chosen actions, log pi(a_i|s_i)
log_probs_for_actions = torch.sum(
log_probs * to_one_hot(actions, env.action_space.n), dim=1)
# Compute loss here. Don't forgen entropy regularization with `entropy_coef`
entropy = <YOUR CODE>
loss = <YOUR CODE>
# Gradient descent step
<YOUR CODE>
# technical: return session rewards to print them later
return np.sum(rewards)
```
### The actual training
```
for i in range(100):
rewards = [train_on_session(*generate_session(env)) for _ in range(100)] # generate new sessions
print("mean reward:%.3f" % (np.mean(rewards)))
if np.mean(rewards) > 500:
print("You Win!") # but you can train even further
break
```
### Results & video
```
# Record sessions
import gym.wrappers
with gym.wrappers.Monitor(gym.make("CartPole-v0"), directory="videos", force=True) as env_monitor:
sessions = [generate_session(env_monitor) for _ in range(100)]
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from base64 import b64encode
from IPython.display import HTML
video_paths = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
video_path = video_paths[-1] # You can also try other indices
if 'google.colab' in sys.modules:
# https://stackoverflow.com/a/57378660/1214547
with video_path.open('rb') as fp:
mp4 = fp.read()
data_url = 'data:video/mp4;base64,' + b64encode(mp4).decode()
else:
data_url = str(video_path)
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(data_url))
```
| github_jupyter |
# Equivalent layer technique for estimating total magnetization direction: Analysis of the result
## Importing libraries
```
% matplotlib inline
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import cPickle as pickle
import datetime
import timeit
import string as st
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from fatiando.gridder import regular
notebook_name = 'airborne_EQL_magdirection_RM_analysis.ipynb'
```
## Plot style
```
plt.style.use('ggplot')
```
## Importing my package
```
dir_modules = '../../../mypackage'
sys.path.append(dir_modules)
import auxiliary_functions as fc
```
## Loading model
```
with open('data/model_multi.pickle') as f:
model_multi = pickle.load(f)
```
## Loading observation points
```
with open('data/airborne_survey.pickle') as f:
airborne = pickle.load(f)
```
## Loading data set
```
with open('data/data_set.pickle') as f:
data = pickle.load(f)
```
## Loading results
```
with open('data/result_RM_airb.pickle') as f:
results = pickle.load(f)
```
## List of saved files
```
saved_files = []
```
## Observation area
```
print 'Area limits: \n x_max = %.1f m \n x_min = %.1f m \n y_max = %.1f m \n y_min = %.1f m' % (airborne['area'][1],
airborne['area'][0],
airborne['area'][3],
airborne['area'][2])
```
## Airborne survey information
```
print 'Shape : (%.0f,%.0f)'% airborne['shape']
print 'Number of data: %.1f' % airborne['N']
print 'dx: %.1f m' % airborne['dx']
print 'dy: %.1f m ' % airborne['dy']
```
## Properties of the model
### Main field
```
inc_gf,dec_gf = model_multi['main_field']
print'Main field inclination: %.1f degree' % inc_gf
print'Main field declination: %.1f degree' % dec_gf
```
### Magnetization direction
```
print 'Inclination: %.1f degree' % model_multi['inc_R']
print 'Declination: %.1f degree' % model_multi['dec_R']
inc_R,dec_R = model_multi['inc_R'],model_multi['dec_R']
```
### Coordinates equivalent sources
```
h = results['layer_depth']
shape_layer = (airborne['shape'][0],airborne['shape'][1])
xs,ys,zs = regular(airborne['area'],shape_layer,h)
```
## The best solution using L-curve
```
m_LM = results['magnetic_moment'][4]
inc_est = results['inc_est'][4]
dec_est = results['dec_est'][4]
mu = results['reg_parameter'][4]
phi = results['phi'][4]
print mu
```
## Visualization of the convergence
```
phi = (np.array(phi)/airborne['x'].size)
title_font = 22
bottom_font = 20
saturation_factor = 1.
plt.close('all')
plt.figure(figsize=(10,10), tight_layout=True)
plt.plot(phi,'b-',linewidth=1.5)
plt.title('Convergence', fontsize=title_font)
plt.xlabel('iteration', fontsize = title_font)
plt.ylabel('Goal function ', fontsize = title_font)
plt.tick_params(axis='both', which='major', labelsize=15)
file_name = 'figs/airborne/convergence_LM_NNLS_magRM'
plt.savefig(file_name+'.png',dpi=300)
saved_files.append(file_name+'.png')
plt.show()
```
## Estimated magnetization direction
```
print (inc_est,dec_est)
print (inc_R,dec_R)
```
## Comparison between observed data and predicted data
```
pred = fc.tfa_layer(airborne['x'],airborne['y'],airborne['z'],
xs,ys,zs,inc_gf,dec_gf,m_LM,inc_est,dec_est)
res = pred - data['tfa_obs_RM_airb']
r_norm,r_mean,r_std = fc.residual(data['tfa_obs_RM_airb'],pred)
title_font = 22
bottom_font = 20
plt.figure(figsize=(28,11), tight_layout=True)
ranges = np.abs([data['tfa_obs_RM_airb'].max(),
data['tfa_obs_RM_airb'].min(),
pred.max(), pred.min()]).max()
ranges_r = np.abs([res.max(),res.min()]).max()
## Observed data plot
ax1=plt.subplot(1,4,1)
plt.title('Observed data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
data['tfa_obs_RM_airb'].reshape(airborne['shape']),
30, cmap='viridis',vmin=-ranges, vmax=ranges)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('nT',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
## Predicted data plot
ax2=plt.subplot(1,4,2)
plt.title('Predicted data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
pred.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges, vmax=ranges)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('nT',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
## Residuals plot and histogram
ax3=plt.subplot(1,4,3)
plt.title('Residuals map', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
res.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges_r, vmax=ranges_r)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('nT',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
ax4=plt.subplot(1,4,4)
plt.title('Histogram of residuals', fontsize =title_font)
plt.xlabel('Residuals (nT)', fontsize = title_font)
plt.ylabel('Frequency', fontsize = title_font)
plt.text(0.02, 0.97, "mean = {:.2f}\nstd = {:.2f} ".format(np.mean(res), np.std(res)),
horizontalalignment='left',
verticalalignment='top',
transform = ax4.transAxes, fontsize=bottom_font)
n, bins, patches = plt.hist(res,bins=30, normed=True, facecolor='black')
gauss = mlab.normpdf(bins, 0., 10.)
plt.plot(bins, gauss, 'r-', linewidth=4.)
ax4.set_xticks([-100.0,-50.,0.0,50.,100.0])
ax4.set_yticks([.0,.010,.020,.030,.040,.05,.06])
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
##
file_name = 'figs/airborne/data_fitting_LM_NNLS_magRM'
plt.savefig(file_name+'.png',dpi=300)
saved_files.append(file_name+'.png')
plt.show()
```
## Positive magnetic-moment distribution
```
title_font = 22
bottom_font = 20
plt.close('all')
plt.figure(figsize=(10,10), tight_layout=True)
plt.title('Magnetic moment distribution', fontsize=title_font)
plt.contourf(1e-3*ys.reshape(shape_layer),1e-3*xs.reshape(shape_layer),
m_LM.reshape(shape_layer), 40, cmap='inferno')
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('$A.m^2$',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
plt.xlabel('y (km)', fontsize = title_font)
plt.ylabel('x (km)', fontsize = title_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
file_name = 'figs/airborne/magnetic_moment_positive_LM_NNLS_magRM'
plt.savefig(file_name+'.png',dpi=300)
saved_files.append(file_name+'.png')
plt.show()
```
## Figure for paper
```
#title_font = 17
title_font = 5
#bottom_font = 14
bottom_font = 4
hist_font = 5
height_per_width = 17./15.
plt.figure(figsize=(4.33,4.33*height_per_width), tight_layout=True)
ranges = np.abs([data['tfa_obs_RM_airb'].max(),
data['tfa_obs_RM_airb'].min(),
pred.max(), pred.min()]).max()
ranges_r = np.abs([res.max(),res.min()]).max()
## Observed data plot
ax1=plt.subplot(3,2,1)
plt.title('(a) Observed data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
data['tfa_obs_RM_airb'].reshape(airborne['shape']),
30, cmap='viridis',vmin=-ranges, vmax=ranges)
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('nT',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(a) Observed data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
## Predicted data plot
ax2=plt.subplot(3,2,2)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
pred.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges, vmax=ranges)
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('nT',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(b) Predicted data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
## Residuals plot and histogram
ax3=plt.subplot(3,2,3)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
res.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges_r, vmax=ranges_r)
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('nT',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(c) Residuals', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
ax4= plt.subplot(3,2,4)
plt.text(0.02, 0.97, "mean = {:.2f}\nstd = {:.2f} ".format(np.mean(res), np.std(res)),
horizontalalignment='left',
verticalalignment='top',
transform = ax4.transAxes, fontsize=hist_font)
n, bins, patches = plt.hist(res,bins=20, normed=True, facecolor='black')
gauss = mlab.normpdf(bins, 0., 10.)
plt.plot(bins, gauss, 'r-', linewidth=1.)
ax4.set_xticks([-100.0,-50.,0.0,50.,100.0])
ax4.set_yticks([.0,.010,.020,.030,.040,.05,.06])
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(d) Histogram of residuals', fontsize =title_font)
plt.xlabel('Residuals (nT)', fontsize = title_font)
plt.ylabel('Frequency', fontsize = title_font)
ax5= plt.subplot(3,2,5)
plt.contourf(1e-3*ys.reshape(shape_layer),1e-3*xs.reshape(shape_layer),
m_LM.reshape(shape_layer)*1e-9, 30, cmap='inferno')
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('$10^{9}$ A$\cdot$m$^2$',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(e) Magnetic moment distribution', fontsize=title_font)
plt.xlabel('y (km)', fontsize = title_font)
plt.ylabel('x (km)', fontsize = title_font)
ax6= plt.subplot(3,2,6)
plt.plot(phi, 'b-',linewidth=1.0)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(f) Convergence', fontsize=title_font)
plt.xlabel('iteration', fontsize = title_font)
plt.ylabel('Goal function ', fontsize = title_font)
###########################################################################
#file_name = 'figs/airborne/results_compiled_LM_NNLS_magRM'
file_name = 'figs/airborne/Fig3'
plt.savefig(file_name+'.png',dpi=1200)
saved_files.append(file_name+'.png')
plt.savefig(file_name+'.eps',dpi=1200)
saved_files.append(file_name+'.eps')
plt.show()
```
| github_jupyter |
# Parsing Natural Language in Python
**(C) 2018 by [Damir Cavar](http://damir.cavar.me/)**
**License:** [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/) ([CA BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/))
This is a tutorial related to the discussion of parsing with Probabilistic Context Free Grammars (PCFG) in the class *Advanced Natural Language Processing* taught at Indiana University in Fall 2018.
This code and tutorial is based on different summer school courses that I taught and tutorials that I gave at different occasions in Europe and the US. This particular example will use code from the **TDAParser.py** and other scripts developed since 2002. Most of this material was used in general introduction courses to algorithms in Natural Language Processing that I taught at Indiana University, University of Konstanz, University of Zadar, University of Nova Gorica.
```
import sys
```
## The Grammar Class
Let us assume that our Phrase Structure Grammar consists of rules that contain one symbol in the Left-Hand Side, followed by a production symbol, an arrow, and by a list of at least one terminal and symbol. Comments can be introduced using the *#* symbol. Every rule has to be contained in one line.
```
grammarText = """
# PSG1
# small English grammar
# (C) 2005 by Damir Cavar, Indiana University
# Grammar:
S -> NP VP
NP -> N
NP -> Adj N
NP -> Art Adj N
NP -> Art N
NP -> Art N PP
#NP -> Art N NP
VP -> V
VP -> V NP
VP -> Adv V NP
VP -> V PP
VP -> V NP PP
PP -> P NP
# Lexicon:
N -> John
N -> Mary
N -> bench
N -> cat
N -> mouse
Art -> the
Art -> a
Adj -> green
Adj -> yellow
Adj -> big
Adj -> small
Adv -> often
Adv -> yesterday
V -> kissed
V -> loves
V -> sees
V -> meets
V -> chases
P -> on
P -> in
P -> beside
P -> under
"""
```
We can parse this grammar into a representation that allows us to fetch the left- and the right-hand side of a rule for top- or bottom-up parsing.
```
class PSG:
def __init__(self, grammar):
self.LHS = {}
self.RHS = {}
self.__read__(grammar)
def __str__(self):
text = ""
for i in self.LHS.keys():
if len(text) > 0:
text += "\n"
for x in self.LHS[i]:
text += i + " -> " + " ".join(x) + "\n"
return text
def __read__(self, g):
for i in g.split("\n"):
i = i.split("#")[0].strip() # cut off comment string and strip
if len(i) == 0: continue
tokens = i.split("->")
if len(tokens) != 2: continue
lhs = tokens[0].split()
if len(lhs) != 1: continue
rhs = tuple(tokens[1].split())
value = self.LHS.get(lhs[0], [])
if rhs not in value: value.append(rhs)
self.LHS[lhs[0]] = value
value = self.RHS.get(rhs, [])
if lhs[0] not in value: value.append(lhs[0])
self.RHS[rhs] = value
def getRHS(self, left):
return self.LHS.get(left, [])
def getLHS(self, right):
return self.RHS.get(right, [])
```
The grammar file:
## The Top-Down Parser
Defining some parameters:
```
LIFO = -1
FIFO = 0
strategy = FIFO
def tdparse(inp, goal, grammar, agenda):
print("Got : %s\tinput: %s" % (goal, inp))
if goal == inp == []: print("Success")
elif goal == [] or inp == []:
if agenda == []: print("Fail: Agenda empty!")
else:
entry = agenda.pop(strategy)
print("Backing up to: %s with %s" % (entry[0], entry[1]))
tdparse(entry[1], entry[0], grammar, agenda)
else: # there is something in goal and input
if goal[0] == inp[0]: # if initial symbols match, reduce lists, parse
tdparse(inp[1:], goal[1:], grammar, agenda)
else:
for i in grammar.LHS.get(goal[0], []):
if [list(i) + goal[1:], inp] not in agenda:
agenda.append([list(i) + goal[1:], inp])
if len(agenda) > 0:
entry = agenda.pop(strategy)
tdparse(entry[1], entry[0], grammar, agenda)
else: print("Fail: Agenda empty!")
myGrammar = PSG(grammarText)
print(myGrammar)
tdparse( ('John', 'loves', 'Mary') , ["S"], myGrammar, [])
```
| github_jupyter |
```
# to get a version that is at least we can use >=
# pip install plotly>=4.0.0
!pip install plotly>=4.1.1
!pip uninstall cufflinks
import plotly
plotly.__version__
# !pip install "notebook>=5.3" "ipywidgets>=7.2"
import plotly.graph_objects as go
fig = go.Figure(data=go.Bar(x=[-3,-2,-1,0,1], y=[2, 3, 1, 5.3, -1], marker_color='rgb(226, 118, 155)'))
fig.show()
# https://plot.ly/python/line-and-scatter/
import plotly.express as px
iris = px.data.iris()
fig = px.scatter(iris, x="sepal_width", y="sepal_length")
fig.show()
iris.head()
iris.info()
# https://plot.ly/python/line-and-scatter/
import plotly.express as px
iris = px.data.iris()
fig = px.scatter(iris, x="sepal_width", y="sepal_length", color="species")
fig.show()
# https://plot.ly/python/line-and-scatter/
import plotly.express as px
iris = px.data.iris()
fig = px.scatter(iris, x="sepal_width",
y="sepal_length",
size="petal_length",
color="species",
hover_data=['petal_width', 'species_id']
)
fig.show()
# Line plot with plotly express
import plotly.express as px
gapminder = px.data.gapminder().query("continent == 'Oceania'")
fig = px.line(gapminder, x='year', y='lifeExp', color='country')
fig.show()
# Line plot with plotly express
import plotly.express as px
gapminder = px.data.gapminder().query("continent == 'Oceania'")
fig = px.line(gapminder, x='year', y='lifeExp', color='country')
fig.show()
# Line plot with plotly express
import plotly.express as px
gapminder = px.data.gapminder().query("continent == 'Europe'")
fig = px.line(gapminder, x='year', y='lifeExp', color='country')
fig.show()
gapminder.head()
allcountries = px.data.gapminder()
allcountries.head()
finland = allcountries[allcountries['country'] == 'Finland']
finland.tail()
# https://stackoverflow.com/questions/12096252/use-a-list-of-values-to-select-rows-from-a-pandas-dataframe
nordic = allcountries[allcountries['country'].isin(['Finland', 'Norway', 'Denmark', 'Sweden'])]
nordic.head()
fig = px.line(nordic, x='year', y='pop', color='country')
fig.show()
fig = px.line(nordic, x='year', y='lifeExp', color='country')
fig.show()
import numpy as np
N = 1000
t = np.linspace(0, 10, 101)
y = np.sin(t)
t2 = np.arange(0, 10.01, 0.1)
t2
t
fig = go.Figure(data=go.Scatter(x=t, y=y, mode='markers'))
fig.show()
N = 100
random_x = np.linspace(0, 1, N)
random_y0 = np.random.randn(N) + 5
random_y1 = np.random.randn(N)
random_y2 = np.random.randn(N) - 5
fig = go.Figure()
# Add traces
fig.add_trace(go.Scatter(x=random_x, y=random_y0,
mode='markers',
name='markers'))
fig.add_trace(go.Scatter(x=random_x, y=random_y1,
mode='lines+markers',
name='lines+markers'))
# fig.add_trace(go.Scatter(x=random_x, y=random_y2,
# mode='lines',
# name='lines'))
fig.add_trace(go.Bar(x=random_x, y=random_y2))
fig.show()
N = 100
random_x = np.linspace(0, 1, N)
random_y0 = np.random.randn(N) + 5
random_y1 = np.random.randn(N)
random_y2 = np.random.randn(N) - 5
random_y3 = np.random.randn(N) + 10
fig = go.Figure()
# Add traces
fig.add_trace(go.Scatter(x=random_x, y=random_y0,
mode='markers',
name='markers'))
fig.add_trace(go.Scatter(x=random_x, y=random_y1,
mode='lines+markers',
name='lines+markers'))
fig.add_trace(go.Scatter(x=random_x, y=random_y2,
mode='lines',
name='lines'))
# using add_trace we can add different type of charts on the same plot figure
# https://plot.ly/python/bar-charts/
fig.add_trace(go.Bar(x=random_x, y=random_y3))
fig.show()
fig = px.bar(finland, x='year', y='lifeExp',
hover_data=['lifeExp', 'gdpPercap'], color='gdpPercap',
labels={'pop':'population of Finland'}, height=400)
fig.show()
nordic.columns
fig = px.bar(nordic, x='year', y='lifeExp',
hover_data=['lifeExp', 'gdpPercap'], color='country',
labels={'pop':'population of Nordic Countries'}, height=400, barmode="group")
fig.update_layout(barmode='group', xaxis_tickangle=-45)
fig.show()
# so default for bar is stacked bar , if we need grouped bars we specify it above
fig = px.bar(nordic, x='year', y='lifeExp',
hover_data=['lifeExp', 'gdpPercap'], color='country',
labels={'pop':'population of Nordic Countries'}, height=400)
fig.update_layout(xaxis_tickangle=-45)
fig.show()
labels = ['Oxygen','Hydrogen','Carbon_Dioxide','Nitrogen']
values = [4500, 2500, 1053, 500]
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
colors = ['rgb(127,0,127)', 'mediumturquoise', 'darkorange', 'lightgreen']
fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=20,
marker=dict(colors=colors, line=dict(color='#000000', width=2)))
fig.update_traces(hole = 0.3)
fig.update_traces(title = "Elements in atmosphere")
fig.show()
labels = ['Oxygen','Hydrogen','Carbon_Dioxide','Nitrogen']
values = [4500, 2500, 1053, 500]
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
colors = ['rgb(127,0,127)', 'mediumturquoise', 'darkorange', 'lightgreen']
fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=20,
marker=dict(colors=colors, line=dict(color='#000000', width=2)))
# fig.update_traces(hole = 0.3)
fig.update_traces(title = "Elements in atmosphere")
fig.show()
df = px.data.gapminder()
fig = px.scatter(df.query("year==2007"), x="gdpPercap", y="lifeExp",
size="pop", color="continent",
hover_name="country", log_x=True, size_max=60)
fig.show()
```
| github_jupyter |
# Mask R-CNN Demo
A quick intro to using the pre-trained model to detect and segment objects.
```
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
import cv2,time,json,glob
from IPython.display import clear_output
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
sys.path.append(os.path.join("/home/jchilders/coco/")) # To find local version
from coco import coco
import tensorflow as tf
print('tensorflow version: ',tf.__version__)
print('using gpu: ',tf.test.is_gpu_available())
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
```
## Configurations
We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.
For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.
```
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 10
BATCH_SIZE=10
config = InferenceConfig()
config.display()
```
## Create Model and Load Trained Weights
```
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
```
## Class Names
The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.
```
# Load COCO dataset
dataset = coco.CocoDataset()
dataset.load_coco(COCO_DIR, "train")
dataset.prepare()
# Print class names
print(dataset.class_names)
```
We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)
```
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
```
## Run Object Detection
```
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
fn = "/home/jchilders/car_videos/10.07.17-10.07.40.mp4"
cap = cv2.VideoCapture(fn)
fps = cap.get(cv2.CAP_PROP_FPS)
print('frames per second: %d' % fps)
frames = []
ret, frame = cap.read()
timestamp = [cap.get(cv2.CAP_PROP_POS_MSEC)]
frames.append(frame)
data = []
while(ret):
if len(frames) == 10:
results = model.detect(frames)
for i in range(len(results)):
r = results[i]
rois = r['rois'].tolist()
masks = r['masks'] * 1
class_ids = r['class_ids']
size = []
position = []
pixel_size = []
class_name = []
for i in range(len(rois)):
size.append([ rois[i][2] - rois[i][0],
rois[i][3] - rois[i][1] ])
position.append([ rois[i][0]+int(float(size[-1][0])/2.),
rois[i][1]+int(float(size[-1][1])/2.) ] )
pixel_size.append(int(masks[i].sum()))
class_name.append(class_names[class_ids[i]])
data.append({'size': size,
'position': position,
'pixel_size': pixel_size,
'timestamp': timestamp[i],
'rois':rois,
'class_ids':r['class_ids'].tolist(),
'class_names':class_name,
'scores':r['scores'].tolist()})
# clear_output(wait=True)
# visualize.display_instances(frames[i], r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'])
# print(r['rois'])
# print(r['class_ids'])
# print(r['scores'])
json.dump(data,open('%s_fps%d.json' % (os.path.basename(fn),fps),'w'),indent=2, sort_keys=True)
frames = []
timestamp = []
ret, frame = cap.read()
timestamp.append(cap.get(cv2.CAP_PROP_POS_MSEC))
frames.append(frame)
fn = "/home/jchilders/car_videos/10.07.17-10.07.40.mp4"
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
def get_video_data(fn,model,batch_size,show_img=False):
cap = cv2.VideoCapture(fn)
fps = cap.get(cv2.CAP_PROP_FPS)
print('frames per second: %d' % fps)
frames = []
ret, frame = cap.read()
timestamp = [cap.get(cv2.CAP_PROP_POS_MSEC)]
frames.append(frame)
data = []
output = {'filename:': fn,
'fps': fps,
'timestamp': str(time.ctime(os.path.getmtime(fn))),
'data': data}
while(ret):
if len(frames) == batch_size:
results = model.detect(frames)
for i in range(len(results)):
r = results[i]
rois = r['rois'].tolist()
masks = r['masks'] * 1
class_ids = r['class_ids']
size = []
position = []
pixel_size = []
class_name = []
for i in range(len(rois)):
size.append([ rois[i][2] - rois[i][0],
rois[i][3] - rois[i][1] ])
position.append([ rois[i][0]+int(float(size[-1][0])/2.),
rois[i][1]+int(float(size[-1][1])/2.) ] )
pixel_size.append(int(masks[i].sum()))
class_name.append(class_names[class_ids[i]])
data.append({'size': size,
'position': position,
'pixel_size': pixel_size,
'frametime': timestamp[i],
'rois':rois,
'class_ids':r['class_ids'].tolist(),
'class_names':class_name,
'scores':r['scores'].tolist()})
if show_img:
clear_output(wait=True)
vr = results[0]
visualize.display_instances(frames[0], vr['rois'], vr['masks'], vr['class_ids'],
class_names, vr['scores'])
# print(r['rois'])
# print(r['class_ids'])
# print(r['scores'])
# json.dump(data,open('%s_fps%d.json' % (os.path.basename(fn),fps),'w'),indent=2, sort_keys=True)
frames = []
timestamp = []
ret, frame = cap.read()
timestamp.append(cap.get(cv2.CAP_PROP_POS_MSEC))
frames.append(frame)
return output
batch_size = 25
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = batch_size
BATCH_SIZE = batch_size
config = InferenceConfig()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
filelist = open('/home/jchilders/car_videos/filelist.txt').readlines()
print('files: %d' % len(filelist))
output = []
for i,line in enumerate(filelist):
print(' %s of %s' % (i,len(filelist)))
fn = line.strip()
fn_output = get_video_data(fn,model,batch_size,show_img=True)
print(fn_output)
clear_output(wait=True)
output.append(fn_output)
json.dump(output,open('full_data.json'))
```
| github_jupyter |
# The perceptron - Recognising the MNIST digits
<div>Table of contents</div>
<div id="toc"></div>
```
%matplotlib inline
from pylab import *
from utils import *
```
Let us implement a perceptron that categorize the MNIST images as numbers. As you will see below the behaviour of the network is far from optimal. As we see the network [learns well the training set](#Plotting-the-results-of-test). Nevertheless its behaviour in a [test with new digits](#Spreading-of-the-network-during-test) is far from optimal. **The task we are asking the network to learn is too difficult!!**
### Training
#### Initializing data and parameters
First we initialize the dataset (see [The MNIST dataset](http://francesco-mannella.github.io/neunet-basics/mnist.html)), the we define few parameters and initialize the main variables:
```
#-----------------------------------------------------------
# training
# Set the number of patterns
n_patterns = 500
# Take 'n_patterns' rows
indices = arange(training_length)
shuffle(indices)
indices = indices[:n_patterns]
# Get patterns
patterns = array(mndata.train_images)[indices]
# Rescale all patterns between 0 and 1
patterns = sign(patterns/255.0)
# Get the labels of the patterns
labels = array(mndata.train_labels)[indices]
# Constants
# Number of repetitions of
# the pattern series
epochs = 30
# Number of trials for learning
stime = epochs*n_patterns
# Create a list of pattern indices.
# We will reshuffle it at each
# repetition of the series
pattern_indices = arange(n_patterns)
# Learning rate
eta = 0.0001
# Number of output units
m = 10
# the input is given
# by a 28*28 vector)
n = n_pixels
# Variables
# Init weights
w = zeros([m, n+1])
# Init input units
x = zeros(n)
# init net input
net = zeros(m)
# Init output units
y = zeros(m)
# Init desired output vector
y_target = zeros(m)
# We will store the input, output and error history
input_store = zeros([n,stime])
output_store = zeros([m,stime])
label_store = zeros([m,stime])
squared_errors = zeros(epochs)
```
Let us visualize the first 20 patterns of the trining set:
```
for i in xrange(20):
# Create a new figure after each 10-th item
if i%10 == 0:
fig = figure(figsize = (20, 1))
# Plot current item (we use the
# function plot_img in our utils.py)
plot_img( to_mat(patterns[i]),
fig, (i%10)+1, windows = 20 )
# show figure after all 1o items
# are plotted
if i%10 == 9:
show()
```
#### Spreading of the network during training
Here starts the core part, iterating the timesteps. We also divide the training phase in epochs. Each epoch is a single presentation of the whole input pattern series. The sum of squared errors will be grouped by epochs.
```
# counter of repetitions
# of the series of patterns
epoch = -1
# Iterate trials
for t in xrange(stime) :
# Reiterate the input pattern
# sequence through timesteps
# Reshuffle at the end
# of the series
if t%n_patterns == 0:
shuffle(pattern_indices)
epoch += 1
# Current pattern
k = pattern_indices[t%n_patterns]
# Aggregate inputs and the bias unit
x = hstack([ 1, patterns[k] ])
# Only the unit representing the desired
# category is set to 1
y_target *= 0
y_target[labels[k]] = 1
# !!!! The dot product becomes a matrix
# product with more than one output unit !!!!
net = dot(w,x)
# output function
y = step(net)
# Learning - outer product
w += eta*outer(y_target - y, x);
# Store data
input_store[:,t] = x[1:]
output_store[:,t] = y
label_store[:,t] = y_target
squared_errors[epoch] += 0.5*sum((y_target - y)**2)
```
#### Plotting the results of training
We plot the history of the squared errors through epocs a
```
fig = figure()
ax = fig.add_subplot(111)
ax.plot(squared_errors)
xlabel("Epochs")
ylabel("Sum of squared errors")
```
and a visualization of the weights to each ouput unit. Each set of weights seems to reproduce (in a very raugh manner) a generalization of the target digit.
```
figure(figsize=(15,2))
for i in xrange(m) :
subplot(1,m,i+1)
title(i)
im = to_mat(w[i,1:])
imshow(im, cmap=cm.bone)
axis('off')
show()
```
### Testing
#### Initializing data and parameters
Now we create a new dataset to test the network and reset some variables:
```
#-----------------------------------------------------------
# test
# Set the number of patterns
n_patterns = 1000
# Take 'n_patterns' rows
indices = arange(test_length)
shuffle(indices)
indices = indices[:n_patterns]
# Get patterns
patterns = array(mndata.test_images)[indices]
# Rescale all patterns between 0 and 1
patterns = sign(patterns/255.0)
# Get the labels of the patterns
labels = array(mndata.test_labels)[indices]
# Constants
# Create a list of pattern indices.
# We will reshuffle it at each
# repetition of the series
pattern_indices = arange(n_patterns)
shuffle(pattern_indices)
# Clear variables
x *= 0
net *= 0
y *= 0
# We will store the input, output and error history
input_store = zeros([patterns.shape[1], n_patterns])
output_store = zeros([m, n_patterns])
target_store = zeros(n_patterns)
error_store = zeros(n_patterns)
```
#### Spreading of the network during test
The network react to each test pattern in each spreading timestep:
```
# Iterate trials
for p in xrange(n_patterns) :
# Aggregate inputs and the bias unit
x = hstack([ 1, patterns[p] ])
# !!!! The dot product becomes a matrix
# product with more than one output unit !!!!
net = dot(w,x)
# output function
y = step(net)
y_index = squeeze(find(y==1))
y_index_target = int(labels[p])
error = 0
if y_index.size < 2 :
if y_index == y_index_target :
error = 1
# store
input_store[:,p] = x[1:]
output_store[:,p] = y
target_store[p] = labels[p]
error_store[p] = error
```
Let us see what is the proportion of correct answers of the network:
```
print "Proportion of correct answers:{}" \
.format(sum(error_store)/float(n_patterns))
```
#### Plotting the results of test
Now we plot few test samples to get the real idea. For each sample we plot the input digit on the top, the answer of the network on the center and the target digit on the left. Squared brakets indicate that the network gave zero or more than one answer.
```
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(8, 4*10)
n_patterns = 20
for p in xrange(n_patterns) :
im = to_mat(input_store[:,p])
k = p%10
if k==0 :
fig = figure(figsize=(15,2))
ax1 = fig.add_subplot(gs[:4,(k*4):(k*4+4)])
ax1.imshow(im, cmap=cm.binary)
ax1.set_axis_off()
if error_store[p] == True :
color = "blue"
else :
color = "red"
y = squeeze(find(output_store[:,p]==1))
y_target = int(labels[p])
ax2 = fig.add_subplot(gs[4:6,(k*4):(k*4+4)])
ax2.text(0.5,0.5,"{}".format(y),
fontsize="16", color=color)
axis("off")
ax3 = fig.add_subplot(gs[6:,(k*4):(k*4+4)])
ax3.text(0.5,0.5,"{}".format(y_target),
fontsize = "16", color=color )
axis("off")
if k == 9:
show()
```
<br><br><br><br><br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br><br><br><br><br>
The next cell is just for styling
```
from IPython.core.display import HTML
def css_styling():
styles = open("../style/ipybn.css", "r").read()
return HTML(styles)
css_styling()
```
| github_jupyter |
# BE 240 Lecture 4
# Sub-SBML
## Modeling diffusion, shared resources, and compartmentalized systems
## _Ayush Pandey_
```
# This notebook is designed to be converted to a HTML slide show
# To do this in the command prompt type (in the folder containing the notebook):
# jupyter nbconvert BE240_Lecture4_Sub-SBML.ipynb --to slides
```


# An example:
### Three different "subsystems" - each with its SBML model
### Another "signal in mixture" subsystem - models signal in the environment / mixture
### Using Sub-SBML we can obtain the combined model for such a system with
* transport across membrane
* shared resources : ATP, Ribosome etc
* resolve naming conflicts (Ribo, Ribosome, RNAP, RNAPolymerase etc.)

# Installing Sub-SBML
```
git clone https://github.com/BuildACell/subsbml.git
```
cd to `subsbml` directory then run the following command to install the package in your environment:
```
python setup.py install
```
# Dependencies:
1. python-libsbml : Run `pip install python-libsbml`, if you don't have it already. You probably already have this installed as it is also a dependency for bioscrape
1. A simulator: You will need a simulator of your choice to simulate the SBML models that Sub-SBML generates. Bioscrape is an example of a simulator and we will be using that for simulations.
# Update your bioscrape installation
From the bioscrape directory, run the following if you do not have a remote fork (your own Github fork of the original bioscrape repository - `biocircuits/bioscrape`. To list all remote repositories that your bioscrape directory is connected to you can run `git remote -v`. The `origin` in the next two commands corresponds to the biocircuits/bioscrape github repository (you should change it if your remote has a different name)
```
git pull origin master
python setup.py install
```
Update your BioCRNpyler installation as well - if you plan to use your own BioCRNpyler models with Sub-SBML. Run the same commands as for bioscrape from the BioCRNpyler directory.
## Sub-SBML notes:
## On "name" and "identifier":
> SBML elements can have a name and an identifier argument. A `name` is supposed to be a human readable name of the particular element in the model. On the other hand, an `identifier` is what the software tool reads. Hence, `identifier` argument in an SBML model is mandatory whereas `name` argument is optional.
Sub-SBML works with `name` arguments of various model components to figure out what components interact/get combined/shared etc. Bioscrape/BioCRNpyler and other common software tools generate SBML models with `name` arguments added to various components such as species, parameters. As an example, to combine two species, Sub-SBML looks at the names of the two species and if they are the same - they are combined together and given a new identifier but the name remains the same.
## A simple Sub-SBML use case:
A simple example where we have two different models : transcription and translation. Using Sub-SBML, we can combine these two together and run simulations.
```
# Import statements
from subsbml.Subsystem import createNewSubsystem, createSubsystem
import numpy as np
import pylab as plt
```
## Transcription Model:
Consider the following simple transcription-only model where $G$ is a gene, $T$ is a transcript, and $S$ is the signaling molecule.
We can write the following reduced order dynamics:
1. $G \xrightarrow[]{\rho_{tx}(G, S)} G + T$;
\begin{align}
\rho_{tx}(G, S) = G K_{X}\frac{S^{2}}{K_{S}^{2}+S^{2}}
\\
\end{align}
Here, $S$ is the inducer signal that cooperatively activates the transcription of the gene $G$. Since, this is a positive activation of the gene by the inducer, we have a positive proportional Hill function.
1. $T \xrightarrow[]{\delta} \varnothing$; massaction kinetics at rate $\delta$.
## Translation model:
1. $T \xrightarrow[]{\rho_{tl}(T)} T+X$;
\begin{align}
\rho_{tl}(T) = K_{TR} \frac{T}{K_{R} + T}
\\
\end{align}
Here $X$ is the protein species.
The lumped parameters $K_{TR}$ and $K_R$ model effects due to ribosome saturation. This is the similar Hill function as derived in the enzymatic reaction example.
1. $X \xrightarrow[]{\delta} \varnothing$; massaction kinetics at rate $\delta$.
```
# Import SBML models by creating Subsystem class objects
ss1 = createSubsystem('transcription_SBML_model.xml')
ss2 = createSubsystem('translation_SBML_model.xml')
ss1.renameSName('mRNA_T', 'T')
# Combine the two subsystems together
tx_tl_subsystem = ss1 + ss2
# The longer way to do the same thing:
# tx_tl_subsystem = createNewSubsystem()
# tx_tl_subsystem.combineSubsystems([ss1,ss2], verbose = True)
# Set signal concentration (input) - manually and get ID for protein X
X_id = tx_tl_subsystem.getSpeciesByName('X').getId()
# Writing a Subsystem to an SBML file (Export SBML)
_ = tx_tl_subsystem.writeSBML('txtl_ss.xml')
tx_tl_subsystem.setSpeciesAmount('S',10)
try:
# Simulate with Bioscrape and plot the result
timepoints = np.linspace(0,100,100)
results, _ = tx_tl_subsystem.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'S = 10')
tx_tl_subsystem.setSpeciesAmount('S',5)
results, _ = tx_tl_subsystem.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'S = 5')
plt.title('Protein X dynamics')
plt.ylabel('[X]')
plt.xlabel('Time')
plt.legend()
plt.show()
except:
print('Simulator not found')
# Viewing the change log for the changes that Sub-SBML made
# print(ss1.changeLog)
# print(ss2.changeLog)
print(tx_tl_subsystem.changeLog)
```
## Signal induction model:
1. $\varnothing \xrightarrow[]{\rho(I)} S$;
\begin{align}
\rho(S) = K_{0} \frac{I^2}{K_{I} + I^2}
\\
\end{align}
Here $S$ is the signal produced on induction by an inducer $I$.
The lumped parameters $K_{0}$ and $K_S$ model effects of cooperative production of the signal by the inducer. This is the similar Hill function as derived in the enzymatic reaction example.
```
ss3 = createSubsystem('signal_in_mixture.xml')
# Signal subsystem (production of signal molecule)
combined_ss = ss1 + ss2 + ss3
# Alternatively
combined_ss = createNewSubsystem()
combined_ss.combineSubsystems([ss1,ss2,ss3])
# Writing a Subsystem to an SBML file (Export SBML)
combined_ss.writeSBML('txtl_combined.xml')
# Set signal concentration (input) - manually and get ID for protein X
combined_ss.setSpeciesAmount('I',10)
X_id = combined_ss.getSpeciesByName('X').getId()
try:
# Simulate with Bioscrape and plot the result
timepoints = np.linspace(0,100,100)
results, _ = combined_ss.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'I = 10')
combined_ss.setSpeciesAmount('I',2)
results, _ = combined_ss.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'I = 5')
plt.title('Protein X dynamics')
plt.ylabel('[X]')
plt.xlabel('Time')
plt.legend()
plt.show()
except:
print('Simulator not found')
combined_ss.changeLog
```
## What does Sub-SBML look for?
1. For compartments: if two compartments have the same `name` and the same `size` attributes => they are combined together.
1. For species: if two species have the same `name` attribute => they are combined together. If initial amount is not the same, the first amount is set. It is easy to set species amounts later.
1. For parameters: if two paraemters have the same `name` attribute **and** the same `value` => they are combined together.
1. For reactions: if two reactions have the same `name` **and** the same reaction string (reactants -> products) => they are combined together.
1. Other SBML components are also merged.
# Utility functions for Subsystems
1. Set `verbose` keyword argument to `True` to get a list of detailed warning messages that describe the changes being made to the models. Helpful in debugging and creating clean models when combining multiple models.
1. Use `renameSName` method for a `Subsystem` to rename any species' names throughout a model and `renameSIdRefs` to rename identifiers.
1. Use `createBasicSubsystem()` function to get a basic "empty" subsystem model.
1. Use `getSpeciesByName` to get all species with a given name in a Subsystem model.
1. use `shareSubsystems` method similar to `combineSubsystems` method if you are only interested in getting a model with shared resource species combined together.
1. Set `combineNames` keyword argument to `False` in `combineSubsystems` method to combine the Subsystem objects but treating the elements with the same `name` as different.
# Modeling transport across membranes

## System 1 : TX-TL with IPTG reservoir and no membrane
```
from subsbml.System import System, combineSystems
cell_1 = System('cell_1')
ss1 = createSubsystem('txtl_ss.xml')
ss1.renameSName('S', 'IPTG')
ss2 = createSubsystem('IPTG_reservoir.xml')
IPTG_external_conc = ss2.getSpeciesByName('IPTG').getInitialConcentration()
cell_1.setInternal([ss1])
cell_1.setExternal([ss2])
# cell_1.setMembrane() # Membrane-less system
ss1.setSpeciesAmount('IPTG', IPTG_external_conc)
cell_1_model = cell_1.getModel() # Get a Subsystem object that represents the combined model for cell_1
cell_1_model.writeSBML('cell_1_model.xml')
```
## System 2 : TX-TL with IPTG reservoir and a simple membrane
### Membrane : IPTG external and internal diffusion in a one step reversible reaction
```
from subsbml import System, createSubsystem, combineSystems, createNewSubsystem
ss1 = createSubsystem('txtl_ss.xml')
ss1.renameSName('S','IPTG')
ss2 = createSubsystem('IPTG_reservoir.xml')
# Create a simple IPTG membrane where IPTG goes in an out of the membrane via a reversible reaction
mb2 = createSubsystem('membrane_IPTG.xml', membrane = True)
# cell_2 = System('cell_2',ListOfInternalSubsystems = [ss1],
# ListOfExternalSubsystems = [ss2],
# ListOfMembraneSubsystems = [mb2])
cell_2 = System('cell_2')
cell_2.setInternal(ss1)
cell_2.setExternal(ss2)
cell_2.setMembrane(mb2)
cell_2_model = cell_2.getModel()
cell_2_model.setSpeciesAmount('IPTG', 1e4, compartment = 'cell_2_external')
cell_2_model.writeSBML('cell_2_model.xml')
```
## System 3 : TX-TL with IPTG reservoir and a detailed membrane diffusion
### Membrane : IPTG external binds to a transport protein and forms a complex. This complex causes the diffusion of IPTG in the internal of the cell.
```
# Create a more detailed IPTG membrane where IPTG binds to an intermediate transporter protein, forms a complex
# then transports out of the cell system to the external environment
mb3 = createSubsystem('membrane_IPTG_detailed.xml', membrane = True)
cell_3 = System('cell_3',ListOfInternalSubsystems = [ss1],
ListOfExternalSubsystems = [ss2],
ListOfMembraneSubsystems = [mb3])
cell_3_model = cell_3.getModel()
cell_3_model.setSpeciesAmount('IPTG', 1e4, compartment = 'cell_3_external')
cell_3_model.writeSBML('cell_3_model.xml')
combined_model = combineSystems([cell_1, cell_2, cell_3])
try:
import numpy as np
import matplotlib.pyplot as plt
timepoints = np.linspace(0,2,100)
results_1, _ = cell_1_model.simulateWithBioscrape(timepoints)
results_2, _ = cell_2_model.simulateWithBioscrape(timepoints)
results_3, _ = cell_3_model.simulateWithBioscrape(timepoints)
X_id1 = cell_1_model.getSpeciesByName('X').getId()
X_id2 = cell_2_model.getSpeciesByName('X', compartment = 'cell_2_internal').getId()
X_id3 = cell_3_model.getSpeciesByName('X', compartment = 'cell_3_internal').getId()
plt.plot(timepoints, results_1[X_id1], linewidth = 3, label = 'No membrane')
plt.plot(timepoints, results_2[X_id2], linewidth = 3, label = 'Simple membrane')
plt.plot(timepoints, results_3[X_id3], linewidth = 3, label = 'Advanced membrane')
plt.xlabel('Time')
plt.ylabel('[X]')
plt.legend()
plt.show()
timepoints = np.linspace(0,200,100)
results_1, _ = cell_1_model.simulateWithBioscrape(timepoints)
results_2, _ = cell_2_model.simulateWithBioscrape(timepoints)
results_3, _ = cell_3_model.simulateWithBioscrape(timepoints)
X_id1 = cell_1_model.getSpeciesByName('X').getId()
X_id2 = cell_2_model.getSpeciesByName('X', compartment = 'cell_2_internal').getId()
X_id3 = cell_3_model.getSpeciesByName('X', compartment = 'cell_3_internal').getId()
plt.plot(timepoints, results_1[X_id1], linewidth = 3, label = 'No membrane')
plt.plot(timepoints, results_2[X_id2], linewidth = 3, label = 'Simple membrane')
plt.plot(timepoints, results_3[X_id3], linewidth = 3, label = 'Advanced membrane')
plt.xlabel('Time')
plt.ylabel('[X]')
plt.legend()
plt.show()
except:
print('Simulator not found')
```
# Additional Sub-SBML Tools:
* Create SBML models directly using `SimpleModel` class
* Simulate directly using `bioscrape` or `libRoadRunner` with various simulation options
* Various utility functions to edit SBML models:
1. Change species names/identifiers throughout an SBML model.
1. Edit parameter values or species initial conditions easily (directly in an SBML model).
* `combineSystems` function can be used to combine multiple `System` objects together as shown in the previous cell. Also, a special use case interaction modeling function is available : `connectSubsystems`. Refer to the tutorial_interconnetion.ipynb notebook in the tutorials directory for more information about this.
# Things to Try:
1. Compartmentalize your own SBML model - generate more than 1 model each with a different compartment names. Using tools in this notebook, try to combine your models together and regenerate the expected simulation.
1. Implement a diffusion model and use it as a membrane model for a `System` of your choice.
1. Implement an even more complicated diffusion model for the above example and run the simulation.
1. **The package has not been tested extensively. So, it would be really great if you could raise [issues](https://github.com/BuildACell/subsbml/issues) on Github if you face any errors with your models. Also, feel free to send a message on Slack channel or DM.**
| github_jupyter |
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# The Extended Kalman Filter
```
#format the book
%matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
```
At this point in the book we have developed the theory for the linear Kalman filter. Then, in the last two chapters we broached the topic of using Kalman filters for nonlinear problems. In this chapter we will learn the Extended Kalman filter (EKF). The EKF handles nonlinearity by linearizing the system at the point of the current estimate, and then the linear Kalman filter is used to filter this linearized system. It was one of the very first techniques used for nonlinear problems, and it remains the most common technique.
The EKF provides significant mathematical challenges to the designer of the filter; this is the most challenging chapter of the book. To be honest, I do everything I can to avoid the EKF in favor of other techniques that have been developed to filter nonlinear problems. However, the topic is unavoidable; all classic papers and a majority of current papers in the field use the EKF. Even if you do not use the EKF in your own work you will need to be familiar with the topic to be able to read the literature.
## Linearizing the Kalman Filter
The Kalman filter uses linear equations, so it does not work with nonlinear problems. Problems can be nonlinear in two ways. First, the process model might be nonlinear. An object falling through the atmosphere encounters drag which reduces its acceleration. The amount of drag varies based on the velocity the object. The resulting behavior is nonlinear - it cannot be modeled with linear equations. Second, the measurements could be nonlinear. For example, a radar gives a range and bearing to a target. We use trigonometry, which is nonlinear, to compute the position of the target.
For the linear filter we have these equations for the process and measurement models:
$$\begin{aligned}\overline{\mathbf x} &= \mathbf{Ax} + \mathbf{Bu} + w_x\\
\mathbf z &= \mathbf{Hx} + w_z
\end{aligned}$$
For the nonlinear model these equations must be modified to read:
$$\begin{aligned}\overline{\mathbf x} &= f(\mathbf x, \mathbf u) + w_x\\
\mathbf z &= h(\mathbf x) + w_z
\end{aligned}$$
The linear expression $\mathbf{Ax} + \mathbf{Bu}$ is replaced by a nonlinear function $f(\mathbf x, \mathbf u)$, and the linear expression $\mathbf{Hx}$ is replaced by a nonlinear function $h(\mathbf x)$.
You might imagine that we proceed by finding a new set of Kalman filter equations that optimally solve these equations. But if you remember the charts in the **Nonlinear Filtering** chapter you'll recall that passing a Gaussian through a nonlinear function results in a probability distribution that is no longer Gaussian. So this will not work.
The EKF does not alter the Kalman filter's linear equations. Instead, it *linearizes* the nonlinear equations at the point of the current estimate, and uses this linearization in the linear Kalman filter.
*Linearize* means what it sounds like. We find a line that most closely matches the curve at a defined point. The graph below linearizes the parabola $f(x)=x^2−2x$ at $x=1.5$.
```
import ekf_internal
ekf_internal.show_linearization()
```
If the curve above is the process model, then the dotted lines shows the linearization of that curve for the estimate $x=1.5$.
We linearize systems by finding the slope of the curve at the given point:
$$\begin{aligned}
f(x) &= x^2 -2x \\
\frac{df}{dx} &= 2x - 2
\end{aligned}$$
and then finding its value at the evaluation point:
$$\begin{aligned}m &= f'(x=1.5) \\&= 2(1.5) - 2 \\&= 1\end{aligned}$$
Our math will be more complicated because we are working with systems of differential equations. We linearize $f(\mathbf x, \mathbf u)$, and $h(\mathbf x)$ by taking the partial derivatives ($\frac{\partial}{\partial \mathbf x}$) of each to evaluate $\mathbf A$ and $\mathbf H$ at the point $\mathbf x_t$ and $\mathbf u_t$. This gives us the the system dynamics matrix and measurement model matrix:
$$
\begin{aligned}
\mathbf A
&= {\frac{\partial{f(\mathbf x_t, \mathbf u_t)}}{\partial{\mathbf x}}}\biggr|_{{\mathbf x_t},{\mathbf u_t}} \\
\mathbf H &= \frac{\partial{h(\mathbf x_t)}}{\partial{\mathbf x}}\biggr|_{\mathbf x_t}
\end{aligned}
$$
Finally, we find the discrete state transition matrix $\mathbf F$ by using the approximation of the Taylor-series expansion of $e^{\mathbf A \Delta t}$:
$$\mathbf F = e^{\mathbf A\Delta t} = \mathbf{I} + \mathbf A\Delta t + \frac{(\mathbf A\Delta t)^2}{2!} + \frac{(\mathbf A\Delta t)^3}{3!} + ... $$
Alternatively, you can use one of the other techniques we learned in the **Kalman Math** chapter.
This leads to the following equations for the EKF. I placed them beside the equations for the linear Kalman filter, and put boxes around the only changes:
$$\begin{array}{l|l}
\text{linear Kalman filter} & \text{EKF} \\
\hline
& \boxed{\mathbf A = {\frac{\partial{f(\mathbf x_t, \mathbf u_t)}}{\partial{\mathbf x}}}\biggr|_{{\mathbf x_t},{\mathbf u_t}}} \\
& \boxed{\mathbf F = e^{\mathbf A \Delta t}} \\
\mathbf{\overline x} = \mathbf{Fx} + \mathbf{Bu} & \boxed{\mathbf{\overline x} = f(\mathbf x, \mathbf u)} \\
\mathbf{\overline P} = \mathbf{FPF}^\mathsf{T}+\mathbf Q & \mathbf{\overline P} = \mathbf{FPF}^\mathsf{T}+\mathbf Q \\
\hline
& \boxed{\mathbf H = \frac{\partial{h(\mathbf x_t)}}{\partial{\mathbf x}}\biggr|_{\mathbf x_t}} \\
\textbf{y} = \mathbf z - \mathbf{H \bar{x}} & \textbf{y} = \mathbf z -\mathbf{H \bar{x}}\\
\mathbf{K} = \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf R)^{-1} & \mathbf{K} = \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf R)^{-1} \\
\mathbf x=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} & \mathbf x=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} \\
\mathbf P= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}} & \mathbf P= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}}
\end{array}$$
We don't normally use $\mathbf{Fx}$ to propagate the state for the EKF as the linearization causes inaccuracies. It is typical to compute $\overline{\mathbf x}$ using a numerical integration technique such as Euler or Runge Kutta. Thus I wrote $\mathbf{\overline x} = f(\mathbf x, \mathbf u)$.
I think the easiest way to understand the EKF is to start off with an example. After we do a few examples you may want to come back and reread this section.
## Example: Tracking a Flying Airplane
We will start by simulating tracking an airplane by using ground based radar. We implemented a UKF for this problem in the last chapter. Now we will implement an EKF for the same problem so we can compare both the filter performance and the level of effort required to implement the filter.
Radars work by emitting a beam of radio waves and scanning for a return bounce. Anything in the beam's path will reflects some of the signal back to the radar. By timing how long it takes for the reflected signal to get back to the radar the system can compute the *slant distance* - the straight line distance from the radar installation to the object.
For this example we want to take the slant range measurement from the radar and compute the horizontal position (distance of aircraft from the radar measured over the ground) and altitude of the aircraft, as in the diagram below.
```
import ekf_internal
ekf_internal.show_radar_chart()
```
This gives us the equality $x=\sqrt{slant^2 - altitude^2}$.
### Design the State Variables
We want to track the position of an aircraft assuming a constant velocity and altitude, and measurements of the slant distance to the aircraft. That means we need 3 state variables - horizontal distance, horizonal velocity, and altitude:
$$\mathbf x = \begin{bmatrix}\mathtt{distance} \\\mathtt{velocity}\\ \mathtt{altitude}\end{bmatrix}= \begin{bmatrix}x \\ \dot x\\ y\end{bmatrix}$$
### Design the Process Model
We assume a Newtonian, kinematic system for the aircraft. We've used this model in previous chapters, so by inspection you may recognize that we want
$$\mathbf F = \left[\begin{array}{cc|c} 1 & \Delta t & 0\\
0 & 1 & 0 \\ \hline
0 & 0 & 1\end{array}\right]$$
I've partioned the matrix into blocks to show the upper left block is a constant velocity model for $x$, and the lower right block is a constant position model for $y$.
However, let's practice finding these matrix for a nonlinear system. We model nonlinear systems with a set of differential equations. We need an equation in the form
$$\dot{\mathbf x} = \mathbf{Ax} + \mathbf{w}$$
where $\mathbf{w}$ is the system noise.
The variables $x$ and $y$ are independent so we can compute them separately. The differential equations for motion in one dimension are:
$$\begin{aligned}v &= \dot x \\
a &= \ddot{x} = 0\end{aligned}$$
Now we put the differential equations into state-space form. If this was a second or greater order differential system we would have to first reduce them to an equivalent set of first degree equations. The equations are first order, so we put them in state space matrix form as
$$\begin{aligned}\begin{bmatrix}\dot x \\ \ddot{x}\end{bmatrix} &= \begin{bmatrix}0&1\\0&0\end{bmatrix} \begin{bmatrix}x \\
\dot x\end{bmatrix} \\ \dot{\mathbf x} &= \mathbf{Ax}\end{aligned}$$
where $\mathbf A=\begin{bmatrix}0&1\\0&0\end{bmatrix}$.
Recall that $\mathbf A$ is the *system dynamics matrix*. It describes a set of linear differential equations. From it we must compute the state transition matrix $\mathbf F$. $\mathbf F$ describes a discrete set of linear equations which compute $\mathbf x$ for a discrete time step $\Delta t$.
and solve the following power series expansion of the matrix exponential to linearize the equations at $t$:
$$\mathbf F(\Delta t) = e^{\mathbf A\Delta t} = \mathbf{I} + \mathbf A\Delta t + \frac{(\mathbf A\Delta t)^2}{2!} + \frac{(\mathbf A \Delta t)^3}{3!} + ... $$
$\mathbf A^2 = \begin{bmatrix}0&0\\0&0\end{bmatrix}$, so all higher powers of $\mathbf A$ are also $\mathbf{0}$. Thus the power series expansion is:
$$
\begin{aligned}
\mathbf F(\Delta t) &=\mathbf{I} + \mathbf At + \mathbf{0} \\
&= \begin{bmatrix}1&0\\0&1\end{bmatrix} + \begin{bmatrix}0&1\\0&0\end{bmatrix}\Delta t\\
&= \begin{bmatrix}1&t\\0&1\end{bmatrix}
\end{aligned}$$
This give us
$$
\begin{aligned}
\mathbf{\overline x} &=\mathbf{Fx} \\
\mathbf{\overline x} &=\begin{bmatrix}1&\Delta t\\0&1\end{bmatrix}\mathbf x
\end{aligned}$$
This is the same result used by the kinematic equations! This exercise was unnecessary other than to illustrate linearizing differential equations. Subsequent examples will require you to use these techniques.
### Design the Measurement Model
The measurement function for our filter needs to take the filter state $\mathbf x$ and turn it into a measurement, which is the slant range distance. We use the Pythagorean theorem to derive
$$h(\mathbf x) = \sqrt{x^2 + y^2}$$
The relationship between the slant distance and the position on the ground is nonlinear due to the square root term. To use it in the EKF we must linearize it. As we discussed above, the best way to linearize an equation at a point is to find its slope, which we do by evaluatiing its partial derivative at a point:
$$
\mathbf H = \frac{\partial{h(\mathbf x)}}{\partial{\mathbf x}}\biggr|_{\mathbf x_t}
$$
The partial derivative of a matrix is called a Jacobian, and takes the form
$$\frac{\partial \mathbf H}{\partial \mathbf x} =
\begin{bmatrix}
\frac{\partial h_1}{\partial x_1} & \frac{\partial h_1}{\partial x_2} &\dots \\
\frac{\partial h_2}{\partial x_1} & \frac{\partial h_2}{\partial x_2} &\dots \\
\vdots & \vdots
\end{bmatrix}
$$
In other words, each element in the matrix is the partial derivative of the function $h$ with respect to the variables $x$. For our problem we have
$$\mathbf H = \begin{bmatrix}{\partial h}/{\partial x} & {\partial h}/{\partial \dot{x}} & {\partial h}/{\partial y}\end{bmatrix}$$
where $h(x) = \sqrt{x^2 + y^2}$.
Solving each in turn:
$$\begin{aligned}
\frac{\partial h}{\partial x} &= \frac{\partial}{\partial x} \sqrt{x^2 + y^2} \\
&= \frac{x}{\sqrt{x^2 + y^2}}
\end{aligned}$$
and
$$\begin{aligned}
\frac{\partial h}{\partial \dot{x}} &=
\frac{\partial}{\partial \dot{x}} \sqrt{x^2 + y^2} \\
&= 0
\end{aligned}$$
and
$$\begin{aligned}
\frac{\partial h}{\partial y} &= \frac{\partial}{\partial y} \sqrt{x^2 + y^2} \\
&= \frac{y}{\sqrt{x^2 + y^2}}
\end{aligned}$$
giving us
$$\mathbf H =
\begin{bmatrix}
\frac{x}{\sqrt{x^2 + y^2}} &
0 &
&
\frac{y}{\sqrt{x^2 + y^2}}
\end{bmatrix}$$
This may seem daunting, so step back and recognize that all of this math is doing something very simple. We have an equation for the slant range to the airplane which is nonlinear. The Kalman filter only works with linear equations, so we need to find a linear equation that approximates $\mathbf H$. As we discussed above, finding the slope of a nonlinear equation at a given point is a good approximation. For the Kalman filter, the 'given point' is the state variable $\mathbf x$ so we need to take the derivative of the slant range with respect to $\mathbf x$.
To make this more concrete, let's now write a Python function that computes the Jacobian of $\mathbf H$ for this problem. The `ExtendedKalmanFilter` class will be using this to generate `ExtendedKalmanFilter.H` at each step of the process.
```
from math import sqrt
def HJacobian_at(x):
""" compute Jacobian of H matrix at x """
horiz_dist = x[0]
altitude = x[2]
denom = sqrt(horiz_dist**2 + altitude**2)
return array ([[horiz_dist/denom, 0., altitude/denom]])
```
Finally, let's provide the code for $h(\mathbf x)$
```
def hx(x):
""" compute measurement for slant range that
would correspond to state x.
"""
return (x[0]**2 + x[2]**2) ** 0.5
```
Now lets write a simulation for our radar.
```
from numpy.random import randn
import math
class RadarSim(object):
""" Simulates the radar signal returns from an object
flying at a constant altityude and velocity in 1D.
"""
def __init__(self, dt, pos, vel, alt):
self.pos = pos
self.vel = vel
self.alt = alt
self.dt = dt
def get_range(self):
""" Returns slant range to the object. Call once
for each new measurement at dt time from last call.
"""
# add some process noise to the system
self.vel = self.vel + .1*randn()
self.alt = self.alt + .1*randn()
self.pos = self.pos + self.vel*self.dt
# add measurement noise
err = self.pos * 0.05*randn()
slant_dist = math.sqrt(self.pos**2 + self.alt**2)
return slant_dist + err
```
### Design Process and Measurement Noise
The radar returns the range distance. A good radar can achieve accuracy of $\sigma_{range}= 5$ meters, so we will use that value. This gives us
$$\mathbf R = \begin{bmatrix}\sigma_{range}^2\end{bmatrix} = \begin{bmatrix}25\end{bmatrix}$$
The design of $\mathbf Q$ requires some discussion. The state $\mathbf x= \begin{bmatrix}x & \dot x & y\end{bmatrix}^\mathtt{T}$. The first two elements are position (down range distance) and velocity, so we can use `Q_discrete_white_noise` noise to compute the values for the upper left hand side of $\mathbf Q$. The third element of $\mathbf x$ is altitude, which we are assuming is independent of the down range distance. That leads us to a block design of $\mathbf Q$ of:
$$\mathbf Q = \begin{bmatrix}\mathbf Q_\mathtt{x} & 0 \\ 0 & \mathbf Q_\mathtt{y}\end{bmatrix}$$
### Implementation
The `FilterPy` library provides the class `ExtendedKalmanFilter`. It works very similar to the `KalmanFilter` class we have been using, except that it allows you to provide functions that compute the Jacobian of $\mathbf H$ and the function $h(\mathbf x)$. We have already written the code for these two functions, so let's get going.
We start by importing the filter and creating it. There are 3 variables in `x` and only 1 measurement. At the same time we will create our radar simulator.
```python
from filterpy.kalman import ExtendedKalmanFilter
rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)
radar = RadarSim(dt, pos=0., vel=100., alt=1000.)
```
We will initialize the filter near the airplane's actual position
```python
rk.x = array([radar.pos, radar.vel-10, radar.alt+100])
```
We assign the system matrix using the first term of the Taylor series expansion we computed above.
```python
dt = 0.05
rk.F = eye(3) + array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])*dt
```
After assigning reasonable values to $\mathbf R$, $\mathbf Q$, and $\mathbf P$ we can run the filter with a simple loop
```python
for i in range(int(20/dt)):
z = radar.get_range()
rk.update(array([z]), HJacobian_at, hx)
rk.predict()
```
Adding some boilerplate code to save and plot the results we get:
```
from filterpy.common import Q_discrete_white_noise
from filterpy.kalman import ExtendedKalmanFilter
from numpy import eye, array, asarray
import numpy as np
dt = 0.05
rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)
radar = RadarSim(dt, pos=0., vel=100., alt=1000.)
# make an imperfect starting guess
rk.x = array([radar.pos-100, radar.vel+100, radar.alt+1000])
rk.F = eye(3) + array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]) * dt
range_std = 5. # meters
rk.R = np.diag([range_std**2])
rk.Q[0:2, 0:2] = Q_discrete_white_noise(2, dt=dt, var=0.1)
rk.Q[2,2] = 0.1
rk.P *= 50
xs, track = [], []
for i in range(int(20/dt)):
z = radar.get_range()
track.append((radar.pos, radar.vel, radar.alt))
rk.update(array([z]), HJacobian_at, hx)
xs.append(rk.x)
rk.predict()
xs = asarray(xs)
track = asarray(track)
time = np.arange(0, len(xs)*dt, dt)
ekf_internal.plot_radar(xs, track, time)
```
## Using SymPy to compute Jacobians
Depending on your experience with derivatives you may have found the computation of the Jacobian difficult. Even if you found it easy, a slightly more difficult problem easily leads to very difficult computations.
As explained in Appendix A, we can use the SymPy package to compute the Jacobian for us.
```
import sympy
sympy.init_printing(use_latex=True)
x, x_vel, y = sympy.symbols('x, x_vel y')
H = sympy.Matrix([sympy.sqrt(x**2 + y**2)])
state = sympy.Matrix([x, x_vel, y])
H.jacobian(state)
```
This result is the same as the result we computed above, and with much less effort on our part!
## Robot Localization
So, time to try a real problem. I warn you that this is far from a simple problem. However, most books choose simple, textbook problems with simple answers, and you are left wondering how to implement a real world solution.
We will consider the problem of robot localization. We already implemented this in the **Unscented Kalman Filter** chapter, and I recommend you read that first. In this scenario we have a robot that is moving through a landscape with sensors that give range and bearings to various landmarks. This could be a self driving car using computer vision to identify trees, buildings, and other landmarks. It might be one of those small robots that vacuum your house, or a robot in a warehouse.
Our robot has 4 wheels configured the same as an automobile. It maneuvers by pivoting the front wheels. This causes the robot to pivot around the rear axle while moving forward. This is nonlinear behavior which we will have to model.
The robot has a sensor that gives it approximate range and bearing to known targets in the landscape. This is nonlinear because computing a position from a range and bearing requires square roots and trigonometry.
Both the process model and measurement models are nonlinear. The UKF accommodates both, so we provisionally conclude that the UKF is a viable choice for this problem.
### Robot Motion Model
At a first approximation an automobile steers by pivoting the front tires while moving forward. The front of the car moves in the direction that the wheels are pointing while pivoting around the rear tires. This simple description is complicated by issues such as slippage due to friction, the differing behavior of the rubber tires at different speeds, and the need for the outside tire to travel a different radius than the inner tire. Accurately modeling steering requires a complicated set of differential equations.
For Kalman filtering, especially for lower speed robotic applications a simpler *bicycle model* has been found to perform well. This is a depiction of the model:
```
ekf_internal.plot_bicycle()
```
In the **Unscented Kalman Filter** chapter we derived these equations describing for this model:
$$\begin{aligned} x &= x - R\sin(\theta) + R\sin(\theta + \beta) \\
y &= y + R\cos(\theta) - R\cos(\theta + \beta) \\
\theta &= \theta + \beta
\end{aligned}
$$
where $\theta$ is the robot's heading.
You do not need to understand this model in detail if you are not interested in steering models. The important thing to recognize is that our motion model is nonlinear, and we will need to deal with that with our Kalman filter.
### Design the State Variables
For our robot we will maintain the position and orientation of the robot:
$$\mathbf x = \begin{bmatrix}x \\ y \\ \theta\end{bmatrix}$$
Our control input $\mathbf u$ is the velocity $v$ and steering angle $\alpha$:
$$\mathbf u = \begin{bmatrix}v \\ \alpha\end{bmatrix}$$
### Design the System Model
In general we model our system as a nonlinear motion model plus noise.
$$\overline x = x + f(x, u) + \mathcal{N}(0, Q)$$
Using the motion model for a robot that we created above, we can expand this to
$$\overline{\begin{bmatrix}x\\y\\\theta\end{bmatrix}} = \begin{bmatrix}x\\y\\\theta\end{bmatrix} +
\begin{bmatrix}- R\sin(\theta) + R\sin(\theta + \beta) \\
R\cos(\theta) - R\cos(\theta + \beta) \\
\beta\end{bmatrix}$$
We linearize this with a taylor expansion at $x$:
$$f(x, u) \approx \mathbf x + \frac{\partial f(\mathbf x, \mathbf u)}{\partial x}\biggr|_{\mathbf x, \mathbf u} $$
We replace $f(x, u)$ with our state estimate $\mathbf x$, and the derivative is the Jacobian of $f$.
The Jacobian $\mathbf F$ is
$$\mathbf F = \frac{\partial f(x, u)}{\partial x} =\begin{bmatrix}
\frac{\partial \dot x}{\partial x} &
\frac{\partial \dot x}{\partial y} &
\frac{\partial \dot x}{\partial \theta}\\
\frac{\partial \dot y}{\partial x} &
\frac{\partial \dot y}{\partial y} &
\frac{\partial \dot y}{\partial \theta} \\
\frac{\partial \dot{\theta}}{\partial x} &
\frac{\partial \dot{\theta}}{\partial y} &
\frac{\partial \dot{\theta}}{\partial \theta}
\end{bmatrix}
$$
When we calculate these we get
$$\mathbf F = \begin{bmatrix}
1 & 0 & -R\cos(\theta) + R\cos(\theta+\beta) \\
0 & 1 & -R\sin(\theta) + R\sin(\theta+\beta) \\
0 & 0 & 1
\end{bmatrix}$$
We can double check our work with SymPy.
```
import sympy
from sympy.abc import alpha, x, y, v, w, R, theta
from sympy import symbols, Matrix
sympy.init_printing(use_latex="mathjax", fontsize='16pt')
time = symbols('t')
d = v*time
beta = (d/w)*sympy.tan(alpha)
r = w/sympy.tan(alpha)
fxu = Matrix([[x-r*sympy.sin(theta) + r*sympy.sin(theta+beta)],
[y+r*sympy.cos(theta)- r*sympy.cos(theta+beta)],
[theta+beta]])
J = fxu.jacobian(Matrix([x, y, theta]))
J
```
That looks a bit complicated. We can use SymPy to substitute terms:
```
# reduce common expressions
B, R = symbols('beta, R')
J = J.subs((d/w)*sympy.tan(alpha), B)
J.subs(w/sympy.tan(alpha), R)
```
In that form we can see that our computation of the Jacobian is correct.
Now we can turn our attention to the noise. Here, the noise is in our control input, so it is in *control space*. In other words, we command a specific velocity and steering angle, but we need to convert that into errors in $x, y, \theta$. In a real system this might vary depending on velocity, so it will need to be recomputed for every prediction. I will choose this as the noise model; for a real robot you will need to choose a model that accurately depicts the error in your system.
$$\mathbf{M} = \begin{bmatrix}\sigma_{vel}^2 & 0 \\ 0 & \sigma_\alpha^2\end{bmatrix}$$
If this was a linear problem we would convert from control space to state space using the by now familiar $\mathbf{FMF}^\mathsf T$ form. Since our motion model is nonlinear we do not try to find a closed form solution to this, but instead linearize it with a Jacobian which we will name $\mathbf{V}$.
$$\mathbf{V} = \frac{\partial f(x, u)}{\partial u} \begin{bmatrix}
\frac{\partial \dot x}{\partial v} & \frac{\partial \dot x}{\partial \alpha} \\
\frac{\partial \dot y}{\partial v} & \frac{\partial \dot y}{\partial \alpha} \\
\frac{\partial \dot{\theta}}{\partial v} & \frac{\partial \dot{\theta}}{\partial \alpha}
\end{bmatrix}$$
These partial derivatives become very difficult to work with. Let's compute them with SymPy.
```
V = fxu.jacobian(Matrix([v, alpha]))
V = V.subs(sympy.tan(alpha)/w, 1/R)
V = V.subs(time*v/R, B)
V = V.subs(time*v, 'd')
V
```
This should give you an appreciation of how quickly the EKF become mathematically intractable.
This gives us the final form of our prediction equations:
$$\begin{aligned}
\mathbf{\overline x} &= \mathbf x +
\begin{bmatrix}- R\sin(\theta) + R\sin(\theta + \beta) \\
R\cos(\theta) - R\cos(\theta + \beta) \\
\beta\end{bmatrix}\\
\mathbf{\overline P} &=\mathbf{FPF}^{\mathsf T} + \mathbf{VMV}^{\mathsf T}
\end{aligned}$$
One final point. This form of linearization is not the only way to predict $\mathbf x$. For example, we could use a numerical integration technique like *Runge Kutta* to compute the position of the robot in the future. In fact, if the time step is relatively large you will have to do that. As I am sure you are realizing, things are not as cut and dried with the EKF as it was for the KF. For a real problem you have to very carefully model your system with differential equations and then determine the most appropriate way to solve that system. The correct approach depends on the accuracy you require, how nonlinear the equations are, your processor budget, and numerical stability concerns. These are all topics beyond the scope of this book.
### Design the Measurement Model
Now we need to design our measurement model. For this problem we are assuming that we have a sensor that receives a noisy bearing and range to multiple known locations in the landscape. The measurement model must convert the state $\begin{bmatrix}x & y&\theta\end{bmatrix}^\mathsf T$ into a range and bearing to the landmark. Using $p$ be the position of a landmark, the range $r$ is
$$r = \sqrt{(p_x - x)^2 + (p_y - y)^2}$$
We assume that the sensor provides bearing relative to the orientation of the robot, so we must subtract the robot's orientation from the bearing to get the sensor reading, like so:
$$\phi = \arctan(\frac{p_y - y}{p_x - x}) - \theta$$
Thus our function is
$$\begin{aligned}
\mathbf x& = h(x,p) &+ \mathcal{N}(0, R)\\
&= \begin{bmatrix}
\sqrt{(p_x - x)^2 + (p_y - y)^2} \\
\arctan(\frac{p_y - y}{p_x - x}) - \theta
\end{bmatrix} &+ \mathcal{N}(0, R)
\end{aligned}$$
This is clearly nonlinear, so we need linearize $h(x, p)$ at $\mathbf x$ by taking its Jacobian. We compute that with SymPy below.
```
px, py = symbols('p_x, p_y')
z = Matrix([[sympy.sqrt((px-x)**2 + (py-y)**2)],
[sympy.atan2(py-y, px-x) - theta]])
z.jacobian(Matrix([x, y, theta]))
```
Now we need to write that as a Python function. For example we might write:
```
from math import sqrt
def H_of(x, landmark_pos):
""" compute Jacobian of H matrix where h(x) computes
the range and bearing to a landmark for state x """
px = landmark_pos[0]
py = landmark_pos[1]
hyp = (px - x[0, 0])**2 + (py - x[1, 0])**2
dist = sqrt(hyp)
H = array(
[[-(px - x[0, 0]) / dist, -(py - x[1, 0]) / dist, 0],
[ (py - x[1, 0]) / hyp, -(px - x[0, 0]) / hyp, -1]])
return H
```
We also need to define a function that converts the system state into a measurement.
```
from math import atan2
def Hx(x, landmark_pos):
""" takes a state variable and returns the measurement
that would correspond to that state.
"""
px = landmark_pos[0]
py = landmark_pos[1]
dist = sqrt((px - x[0, 0])**2 + (py - x[1, 0])**2)
Hx = array([[dist],
[atan2(py - x[1, 0], px - x[0, 0]) - x[2, 0]]])
return Hx
```
### Design Measurement Noise
This is quite straightforward as we need to specify measurement noise in measurement space, hence it is linear. It is reasonable to assume that the range and bearing measurement noise is independent, hence
$$R=\begin{bmatrix}\sigma_{range}^2 & 0 \\ 0 & \sigma_{bearing}^2\end{bmatrix}$$
### Implementation
We will use `FilterPy`'s `ExtendedKalmanFilter` class to implement the filter. Its `predict()` method uses the standard linear equations. Our process model is nonlinear, so we will have to override `predict()` with our own version. I'll want to also use this class to simulate the robot, so I'll add a method `move()` that computes the position of the robot which both `predict()` and my simulation can call.
The matrices for the prediction step are quite large. While writing this code I made several errors before I finally got it working. I only found my errors by using SymPy's `evalf` function, which allows you to evaluate a SymPy `Matrix` with specific values for the variables. I decided to demonstrate this technique, and to eliminate a possible source of bugs, by using SymPy in the Kalman filter. You'll need to understand a couple of points.
First, `evalf` uses a dictionary to pass in the values you want to use. For example, if your matrix contains an `x` and `y`, you can write
```python
M.evalf(subs={x:3, y:17})
```
to evaluate the matrix for `x=3` and `y=17`.
Second, `evalf` returns a `sympy.Matrix` object. Use `numpy.array(M).astype(float)` to convert it to a NumPy array. `numpy.array(M)` creates an array of type `object`, which is not what you want.
Here is the code for the EKF:
```
from filterpy.kalman import ExtendedKalmanFilter as EKF
from numpy import dot, array, sqrt
class RobotEKF(EKF):
def __init__(self, dt, wheelbase, std_vel, std_steer):
EKF.__init__(self, 3, 2, 2)
self.dt = dt
self.wheelbase = wheelbase
self.std_vel = std_vel
self.std_steer = std_steer
a, x, y, v, w, theta, time = symbols(
'a, x, y, v, w, theta, t')
d = v*time
beta = (d/w)*sympy.tan(a)
r = w/sympy.tan(a)
self.fxu = Matrix(
[[x-r*sympy.sin(theta)+r*sympy.sin(theta+beta)],
[y+r*sympy.cos(theta)-r*sympy.cos(theta+beta)],
[theta+beta]])
self.F_j = self.fxu.jacobian(Matrix([x, y, theta]))
self.V_j = self.fxu.jacobian(Matrix([v, a]))
# save dictionary and it's variables for later use
self.subs = {x: 0, y: 0, v:0, a:0,
time:dt, w:wheelbase, theta:0}
self.x_x, self.x_y, = x, y
self.v, self.a, self.theta = v, a, theta
def predict(self, u=0):
self.x = self.move(self.x, u, self.dt)
self.subs[self.theta] = self.x[2, 0]
self.subs[self.v] = u[0]
self.subs[self.a] = u[1]
F = array(self.F_j.evalf(subs=self.subs)).astype(float)
V = array(self.V_j.evalf(subs=self.subs)).astype(float)
# covariance of motion noise in control space
M = array([[self.std_vel*u[0]**2, 0],
[0, self.std_steer**2]])
self.P = dot(F, self.P).dot(F.T) + dot(V, M).dot(V.T)
def move(self, x, u, dt):
hdg = x[2, 0]
vel = u[0]
steering_angle = u[1]
dist = vel * dt
if abs(steering_angle) > 0.001: # is robot turning?
beta = (dist / self.wheelbase) * tan(steering_angle)
r = self.wheelbase / tan(steering_angle) # radius
dx = np.array([[-r*sin(hdg) + r*sin(hdg + beta)],
[r*cos(hdg) - r*cos(hdg + beta)],
[beta]])
else: # moving in straight line
dx = np.array([[dist*cos(hdg)],
[dist*sin(hdg)],
[0]])
return x + dx
```
Now we have another issue to handle. The residual is notionally computed as $y = z - h(x)$ but this will not work because our measurement contains an angle in it. Suppose z has a bearing of $1^\circ$ and $h(x)$ has a bearing of $359^\circ$. Naively subtracting them would yield a bearing difference of $-358^\circ$, which will throw off the computation of the Kalman gain. The correct angle difference in this case is $2^\circ$. So we will have to write code to correctly compute the bearing residual.
```
def residual(a, b):
""" compute residual (a-b) between measurements containing
[range, bearing]. Bearing is normalized to [-pi, pi)"""
y = a - b
y[1] = y[1] % (2 * np.pi) # force in range [0, 2 pi)
if y[1] > np.pi: # move to [-pi, pi)
y[1] -= 2 * np.pi
return y
```
The rest of the code runs the simulation and plots the results, and shouldn't need too much comment by now. I create a variable `landmarks` that contains the coordinates of the landmarks. I update the simulated robot position 10 times a second, but run the EKF only once. This is for two reasons. First, we are not using Runge Kutta to integrate the differental equations of motion, so a narrow time step allows our simulation to be more accurate. Second, it is fairly normal in embedded systems to have limited processing speed. This forces you to run your Kalman filter only as frequently as absolutely needed.
```
from filterpy.stats import plot_covariance_ellipse
from math import sqrt, tan, cos, sin, atan2
import matplotlib.pyplot as plt
dt = 1.0
def z_landmark(lmark, sim_pos, std_rng, std_brg):
x, y = sim_pos[0, 0], sim_pos[1, 0]
d = np.sqrt((lmark[0] - x)**2 + (lmark[1] - y)**2)
a = atan2(lmark[1] - y, lmark[0] - x) - sim_pos[2, 0]
z = np.array([[d + randn()*std_rng],
[a + randn()*std_brg]])
return z
def ekf_update(ekf, z, landmark):
ekf.update(z, HJacobian=H_of, Hx=Hx,
residual=residual,
args=(landmark), hx_args=(landmark))
def run_localization(landmarks, std_vel, std_steer,
std_range, std_bearing,
step=10, ellipse_step=20, ylim=None):
ekf = RobotEKF(dt, wheelbase=0.5, std_vel=std_vel,
std_steer=std_steer)
ekf.x = array([[2, 6, .3]]).T # x, y, steer angle
ekf.P = np.diag([.1, .1, .1])
ekf.R = np.diag([std_range**2, std_bearing**2])
sim_pos = ekf.x.copy() # simulated position
# steering command (vel, steering angle radians)
u = array([1.1, .01])
plt.scatter(landmarks[:, 0], landmarks[:, 1],
marker='s', s=60)
track = []
for i in range(200):
sim_pos = ekf.move(sim_pos, u, dt/10.) # simulate robot
track.append(sim_pos)
if i % step == 0:
ekf.predict(u=u)
if i % ellipse_step == 0:
plot_covariance_ellipse(
(ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2],
std=6, facecolor='k', alpha=0.3)
x, y = sim_pos[0, 0], sim_pos[1, 0]
for lmark in landmarks:
z = z_landmark(lmark, sim_pos,
std_range, std_bearing)
ekf_update(ekf, z, lmark)
if i % ellipse_step == 0:
plot_covariance_ellipse(
(ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2],
std=6, facecolor='g', alpha=0.8)
track = np.array(track)
plt.plot(track[:, 0], track[:,1], color='k', lw=2)
plt.axis('equal')
plt.title("EKF Robot localization")
if ylim is not None: plt.ylim(*ylim)
plt.show()
return ekf
landmarks = array([[5, 10], [10, 5], [15, 15]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1)
print('Final P:', ekf.P.diagonal())
```
I have plotted the landmarks as solid squares. The path of the robot is drawn with black line. The covariance ellipses for the predict step is light gray, and the covariances of the update are shown in green. To make them visible at this scale I have set the ellipse boundary at 6$\sigma$.
From this we can see that there is a lot of uncertainty added by our motion model, and that most of the error in in the direction of motion. We can see that from the shape of the blue ellipses. After a few steps we can see that the filter incorporates the landmark measurements.
I used the same initial conditions and landmark locations in the UKF chapter. You can see both in the plot and in the printed final value for $\mathbf P$ that the UKF achieves much better accuracy in terms of the error ellipse. The black solid line denotes the robot's actual path. Both perform roughly as well as far as their estimate for $\mathbf x$ is concerned.
Now lets add another landmark.
```
landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1)
plt.show()
print('Final P:', ekf.P.diagonal())
```
The uncertainly in the estimates near the end of the track are smaller with the additional landmark. We can see the fantastic effect that multiple landmarks has on our uncertainty by only using the first two landmarks.
```
ekf = run_localization(
landmarks[0:2], std_vel=1.e-10, std_steer=1.e-10,
std_range=1.4, std_bearing=.05)
print('Final P:', ekf.P.diagonal())
```
The estimate quickly diverges from the robot's path after passing the landmarks. The covariance also grows quickly. Let's see what happens with only one landmark:
```
ekf = run_localization(
landmarks[0:1], std_vel=1.e-10, std_steer=1.e-10,
std_range=1.4, std_bearing=.05)
print('Final P:', ekf.P.diagonal())
```
As you probably suspected, only one landmark produces a very bad result. Conversely, a large number of landmarks allows us to make very accurate estimates.
```
landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5], [15, 10],
[10,14], [23, 14], [25, 20], [10, 20]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1, ylim=(0, 21))
print('Final P:', ekf.P.diagonal())
```
### Discussion
I said that this was a 'real' problem, and in some ways it is. I've seen alternative presentations that used robot motion models that led to much easier Jacobians. On the other hand, my model of a automobile's movement is itself simplistic in several ways. First, it uses a bicycle model. A real car has two sets of tires, and each travels on a different radius. The wheels do not grip the surface perfectly. I also assumed that the robot responds instantaneously to the control input. Sebastian Thrun writes in *Probabilistic Robots* that simplified models are justified because the filters perform well when used to track real vehicles. The lesson here is that while you have to have a reasonably accurate nonlinear model, it does not need to be perfect to operate well. As a designer you will need to balance the fidelity of your model with the difficulty of the math and the computation required to implement the equations.
Another way in which this problem was simplistic is that we assumed that we knew the correspondance between the landmarks and measurements. But suppose we are using radar - how would we know that a specific signal return corresponded to a specific building in the local scene? This question hints at SLAM algorithms - simultaneous localization and mapping. SLAM is not the point of this book, so I will not elaborate on this topic.
## UKF vs EKF
I implemented this tracking problem using the UKF in the previous chapter. The difference in implementation should be very clear. Computing the Jacobians for the state and measurement models was not trivial despite a rudimentary motion model. I am justified in using this model because the research resulting from the DARPA car challenges has shown that it works well in practice. A different problem could result in a Jacobian which is difficult or impossible to derive analytically. In contrast, the UKF only requires you to provide a function that computes the system motion model and another for the measurement model.
There are many cases where the Jacobian cannot be found analytically. The details are beyond the scope of this book, but you will have to use numerical methods to compute the Jacobian. That is a very nontrivial undertaking, and you will spend a significant portion of a master's degree at a STEM school learning techniques to handle such situations. Even then you'll likely only be able to solve problems related to your field - an aeronautical engineer learns a lot about Navier Stokes equations, but not much about modelling chemical reaction rates.
So, UKFs are easy. Are they accurate? In practice they often perform better than the EKF. You can find plenty of research papers that prove that the UKF outperforms the EKF in various problem domains. It's not hard to understand why this would be true. The EKF works by linearizing the system model and measurement model at a single point, and the UKF uses $2n+1$ points.
Let's look at a specific example. Take $f(x) = x^3$ and pass a Gaussian distribution through it. I will compute an accurate answer using a monte carlo simulation. I generate 50,000 points randomly distributed according to the Gaussian, pass each through $f(x)$, then compute the mean and variance of the result.
First, let's see how the EKF fairs. The EKF linearizes the function by taking the derivative and evaluating it the mean $x$ to get the slope tangent to the function at that point. This slope becomes the linear function that we use to transform the Gaussian. Here is a plot of that.
```
import nonlinear_plots
nonlinear_plots.plot_ekf_vs_mc()
```
We can see from both the graph and the print out at the bottom that the EKF has introduced quite a bit of error.
In contrast, here is the performance of the UKF:
```
nonlinear_plots.plot_ukf_vs_mc(alpha=0.001, beta=3., kappa=1.)
```
Here we can see that the computation of the UKF's mean is accurate to 2 decimal places. The standard deviation is slightly off, but you can also fine tune how the UKF computes the distribution by using the $\alpha$, $\beta$, and $\gamma$ parameters for generating the sigma points. Here I used $\alpha=0.001$, $\beta=3$, and $\gamma=1$. Feel free to modify them in the function call to see the result. You should be able to get better results than I did. However, avoid over-tuning the UKF for a specific test. It may perform better for your test case, but worse in general.
This is a contrived example, but as I said the literature is filled with detailed studies of real world problems that exhibit similar performance differences between the two filters.
| github_jupyter |
# Examples of usage of Gate Angle Placeholder
The word "Placeholder" is used in Qubiter (we are in good company, Tensorflow uses this word in the same way) to mean a variable for which we delay/postpone assigning a numerical value (evaluating it) until a later time. In the case of Qubiter, it is useful to define gates with placeholders standing for angles. One can postpone evaluating those placeholders until one is ready to call the circuit simulator, and then pass the values of the placeholders as an argument to the simulator’s constructor. Placeholders of this type can be useful, for example, with quantum neural nets (QNNs). In some QNN algorithms, the circuit gate structure is fixed but the angles of the gates are varied many times, gradually, trying to lower a cost function each time.
> In Qubiter, legal variable names must be of form `#3` or `-#3` or `#3*.5` or
`-#3*.5` where 3 can be replaced by any non-negative int, and .5 can
be replaced by anything that can be an argument of float() without
throwing an exception. In this example, the 3 that follows the hash
character is called the variable number
>NEW! (functional placeholder variables)
Now legal variable names can ALSO be of the form `my_fun#1#2` or
`-my_fun#1#2`, where
* the 1 and 2 can be replaced by any non-negative integers and there
might be any number > 0 of hash variables. Thus, there need not
always be precisely 2 hash variables as in the example.
* `my_fun` can be replaced by the name of any function with one or
more input floats (2 inputs in the example), as long as the first
character of the function's name is a lower case letter.
>The strings `my_fun#1#2` or `-my_fun#1#2` indicate than one wants to
use for the angle being replaced, the values of `my_fun(#1, #2)` or
`-my_fun(#1, #2)`, respectively, where the inputs #1 and #2 are
floats standing for radians and the output is also a float standing
for radians.
```
import os
import sys
print(os.getcwd())
os.chdir('../../')
print(os.getcwd())
sys.path.insert(0,os.getcwd())
```
We begin by writing a simple circuit with 4 qubits. As usual, the following code will
write an English and a Picture file in the `io_folder` directory. Note that some
angles have been entered into the write() Python functions as legal
variable names instead of floats. In the English file, you will see those legal
names where the numerical values of those angles would have been.
```
from qubiter.SEO_writer import *
from qubiter.SEO_reader import *
from qubiter.EchoingSEO_reader import *
from qubiter.SEO_simulator import *
num_bits = 4
file_prefix = 'placeholder_test'
emb = CktEmbedder(num_bits, num_bits)
wr = SEO_writer(file_prefix, emb)
wr.write_Rx(2, rads=np.pi/7)
wr.write_Rx(1, rads='#2*.5')
wr.write_Rx(1, rads='my_fun1#2')
wr.write_Rn(3, rads_list=['#1', '-#1*3', '#3'])
wr.write_Rx(1, rads='-my_fun2#2#1')
wr.write_cnot(2, 3)
wr.close_files()
```
The following 2 files were just written:
1. <a href='../io_folder/placeholder_test_4_eng.txt'>../io_folder/placeholder_test_4_eng.txt</a>
2. <a href='../io_folder/placeholder_test_4_ZLpic.txt'>../io_folder/placeholder_test_4_ZLpic.txt</a>
Simply by creating an object of the class SEO_reader with the flag `write_log` set equal to True, you can create a log file which contains
* a list of distinct variable numbers
* a list of distinct function names
encountered in the English file
```
rdr = SEO_reader(file_prefix, num_bits, write_log=True)
```
The following log file was just written:
<a href='../io_folder/placeholder_test_4_log.txt'>../io_folder/placeholder_test_4_log.txt</a>
Next, let us create two functions that will be used for the functional placeholders
```
def my_fun1(x):
return x*.5
def my_fun2(x, y):
return x + y
```
**Partial Substitution**
This creates new files
with `#1=30`, `#2=60`, `'my_fun1'->my_fun1`,
but `#3` and `'my_fun2'` still undecided
```
vman = PlaceholderManager(eval_all_vars=False,
var_num_to_rads={1: np.pi/6, 2: np.pi/3},
fun_name_to_fun={'my_fun1': my_fun1})
wr = SEO_writer(file_prefix + '_eval01', emb)
EchoingSEO_reader(file_prefix, num_bits, wr,
vars_manager=vman)
```
The following 2 files were just written:
1. <a href='../io_folder/placeholder_test_eval01_4_eng.txt'>../io_folder/placeholder_test_eval01_4_eng.txt</a>
2. <a href='../io_folder/placeholder_test_eval01_4_ZLpic.txt'>../io_folder/placeholder_test_eval01_4_ZLpic.txt</a>
The following code runs the simulator after substituting
`#1=30`, `#2=60`, `#3=90`, `'my_fun1'->my_fun1`, `'my_fun2'->my_fun2`
```
vman = PlaceholderManager(
var_num_to_rads={1: np.pi/6, 2: np.pi/3, 3: np.pi/2},
fun_name_to_fun={'my_fun1': my_fun1, 'my_fun2': my_fun2}
)
sim = SEO_simulator(file_prefix, num_bits, verbose=False,
vars_manager=vman)
StateVec.describe_st_vec_dict(sim.cur_st_vec_dict)
```
| github_jupyter |
# The art of using pipelines
Pipelines are a natural way to think about a machine learning system. Indeed with some practice a data scientist can visualise data "flowing" through a series of steps. The input is typically some raw data which has to be processed in some manner. The goal is to represent the data in such a way that is can be ingested by a machine learning algorithm. Along the way some steps will extract features, while others will normalize the data and remove undesirable elements. Pipelines are simple, and yet they are a powerful way of designing sophisticated machine learning systems.
Both [scikit-learn](https://stackoverflow.com/questions/33091376/python-what-is-exactly-sklearn-pipeline-pipeline) and [pandas](https://tomaugspurger.github.io/method-chaining) make it possible to use pipelines. However it's quite rare to see pipelines being used in practice (at least on Kaggle). Sometimes you get to see people using scikit-learn's `pipeline` module, however the `pipe` method from `pandas` is sadly underappreciated. A big reason why pipelines are not given much love is that it's easier to think of batch learning in terms of a script or a notebook. Indeed many people doing data science seem to prefer a procedural style to a declarative style. Moreover in practice pipelines can be a bit rigid if one wishes to do non-orthodox operations.
Although pipelines may be a bit of an odd fit for batch learning, they make complete sense when they are used for online learning. Indeed the UNIX philosophy has advocated the use of pipelines for data processing for many decades. If you can visualise data as a stream of observations then using pipelines should make a lot of sense to you. We'll attempt to convince you by writing a machine learning algorithm in a procedural way and then converting it to a declarative pipeline in small steps. Hopefully by the end you'll be convinced, or not!
In this notebook we'll manipulate data from the [Kaggle Recruit Restaurants Visitor Forecasting competition](https://www.kaggle.com/c/recruit-restaurant-visitor-forecasting). The data is directly available through `river`'s `datasets` module.
```
from pprint import pprint
from river import datasets
for x, y in datasets.Restaurants():
pprint(x)
pprint(y)
break
```
We'll start by building and running a model using a procedural coding style. The performance of the model doesn't matter, we're simply interested in the design of the model.
```
from river import feature_extraction
from river import linear_model
from river import metrics
from river import preprocessing
from river import stats
means = (
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))
)
scaler = preprocessing.StandardScaler()
lin_reg = linear_model.LinearRegression()
metric = metrics.MAE()
for x, y in datasets.Restaurants():
# Derive date features
x['weekday'] = x['date'].weekday()
x['is_weekend'] = x['date'].weekday() in (5, 6)
# Process the rolling means of the target
for mean in means:
x = {**x, **mean.transform_one(x)}
mean.learn_one(x, y)
# Remove the key/value pairs that aren't features
for key in ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']:
x.pop(key)
# Rescale the data
x = scaler.learn_one(x).transform_one(x)
# Fit the linear regression
y_pred = lin_reg.predict_one(x)
lin_reg.learn_one(x, y)
# Update the metric using the out-of-fold prediction
metric.update(y, y_pred)
print(metric)
```
We're not using many features. We can print the last `x` to get an idea of the features (don't forget they've been scaled!)
```
pprint(x)
```
The above chunk of code is quite explicit but it's a bit verbose. The whole point of libraries such as `river` is to make life easier for users. Moreover there's too much space for users to mess up the order in which things are done, which increases the chance of there being target leakage. We'll now rewrite our model in a declarative fashion using a pipeline *à la sklearn*.
```
from river import compose
def get_date_features(x):
weekday = x['date'].weekday()
return {'weekday': weekday, 'is_weekend': weekday in (5, 6)}
model = compose.Pipeline(
('features', compose.TransformerUnion(
('date_features', compose.FuncTransformer(get_date_features)),
('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7))),
('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14))),
('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)))
)),
('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),
('scale', preprocessing.StandardScaler()),
('lin_reg', linear_model.LinearRegression())
)
metric = metrics.MAE()
for x, y in datasets.Restaurants():
# Make a prediction without using the target
y_pred = model.predict_one(x)
# Update the model using the target
model.learn_one(x, y)
# Update the metric using the out-of-fold prediction
metric.update(y, y_pred)
print(metric)
```
We use a `Pipeline` to arrange each step in a sequential order. A `TransformerUnion` is used to merge multiple feature extractors into a single transformer. The `for` loop is now much shorter and is thus easier to grok: we get the out-of-fold prediction, we fit the model, and finally we update the metric. This way of evaluating a model is typical of online learning, and so we put it wrapped it inside a function called `progressive_val_score` part of the `evaluate` module. We can use it to replace the `for` loop.
```
from river import evaluate
model = compose.Pipeline(
('features', compose.TransformerUnion(
('date_features', compose.FuncTransformer(get_date_features)),
('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7))),
('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14))),
('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)))
)),
('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),
('scale', preprocessing.StandardScaler()),
('lin_reg', linear_model.LinearRegression())
)
evaluate.progressive_val_score(dataset=datasets.Restaurants(), model=model, metric=metrics.MAE())
```
Notice that you couldn't have used the `progressive_val_score` method if you wrote the model in a procedural manner.
Our code is getting shorter, but it's still a bit difficult on the eyes. Indeed there is a lot of boilerplate code associated with pipelines that can get tedious to write. However `river` has some special tricks up it's sleeve to save you from a lot of pain.
The first trick is that the name of each step in the pipeline can be omitted. If no name is given for a step then `river` automatically infers one.
```
model = compose.Pipeline(
compose.TransformerUnion(
compose.FuncTransformer(get_date_features),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))
),
compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),
preprocessing.StandardScaler(),
linear_model.LinearRegression()
)
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Under the hood a `Pipeline` inherits from `collections.OrderedDict`. Indeed this makes sense because if you think about it a `Pipeline` is simply a sequence of steps where each step has a name. The reason we mention this is because it means you can manipulate a `Pipeline` the same way you would manipulate an ordinary `dict`. For instance we can print the name of each step by using the `keys` method.
```
for name in model.steps:
print(name)
```
The first step is a `FeatureUnion` and it's string representation contains the string representation of each of it's elements. Not having to write names saves up some time and space and is certainly less tedious.
The next trick is that we can use mathematical operators to compose our pipeline. For example we can use the `+` operator to merge `Transformer`s into a `TransformerUnion`.
```
model = compose.Pipeline(
compose.FuncTransformer(get_date_features) + \
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)) + \
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)) + \
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)),
compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),
preprocessing.StandardScaler(),
linear_model.LinearRegression()
)
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Likewhise we can use the `|` operator to assemble steps into a `Pipeline`.
```
model = (
compose.FuncTransformer(get_date_features) +
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)) +
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)) +
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))
)
to_discard = ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']
model = model | compose.Discard(*to_discard) | preprocessing.StandardScaler()
model |= linear_model.LinearRegression()
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Hopefully you'll agree that this is a powerful way to express machine learning pipelines. For some people this should be quite remeniscent of the UNIX pipe operator. One final trick we want to mention is that functions are automatically wrapped with a `FuncTransformer`, which can be quite handy.
```
model = get_date_features
for n in [7, 14, 21]:
model += feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(n))
model |= compose.Discard(*to_discard)
model |= preprocessing.StandardScaler()
model |= linear_model.LinearRegression()
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Naturally some may prefer the procedural style we first used because they find it easier to work with. It all depends on your style and you should use what you feel comfortable with. However we encourage you to use operators because we believe that this will increase the readability of your code, which is very important. To each their own!
Before finishing we can take an interactive look at our pipeline.
```
model
```
| github_jupyter |
Subsets and Splits